@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final List<FlowFile> flowFiles = session.get(50); if (flowFiles.isEmpty()) { return; } final ProcessorLog logger = getLogger(); try { if (fileWatcher.checkAndReset()) { this.dictionaryTerms = createDictionary(context); } } catch (final IOException e) { logger.error("Unable to reload dictionary due to {}", e); } final boolean matchAll = context.getProperty(MATCHING_CRITERIA).getValue().equals(MATCH_CRITERIA_ALL); for (final FlowFile flowFile : flowFiles) { final boolean matched = matchAll ? allMatch(flowFile, attributePattern, dictionaryTerms) : anyMatch(flowFile, attributePattern, dictionaryTerms); final Relationship relationship = matched ? REL_MATCHED : REL_UNMATCHED; session.getProvenanceReporter().route(flowFile, relationship); session.transfer(flowFile, relationship); logger.info("Transferred {} to {}", new Object[] {flowFile, relationship}); } }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ProcessorLog logger = getLogger(); boolean encode = context.getProperty(MODE).getValue().equalsIgnoreCase(ENCODE_MODE); String encoding = context.getProperty(ENCODING).getValue(); StreamCallback encoder = null; // Select the encoder/decoder to use if (encode) { if (encoding.equalsIgnoreCase(BASE64_ENCODING)) { encoder = new EncodeBase64(); } else if (encoding.equalsIgnoreCase(BASE32_ENCODING)) { encoder = new EncodeBase32(); } else if (encoding.equalsIgnoreCase(HEX_ENCODING)) { encoder = new EncodeHex(); } } else { if (encoding.equalsIgnoreCase(BASE64_ENCODING)) { encoder = new DecodeBase64(); } else if (encoding.equalsIgnoreCase(BASE32_ENCODING)) { encoder = new DecodeBase32(); } else if (encoding.equalsIgnoreCase(HEX_ENCODING)) { encoder = new DecodeHex(); } } if (encoder == null) { logger.warn( "Unknown operation: {} {}", new Object[] {encode ? "encode" : "decode", encoding}); return; } try { final StopWatch stopWatch = new StopWatch(true); flowFile = session.write(flowFile, encoder); logger.info("Successfully {} {}", new Object[] {encode ? "encoded" : "decoded", flowFile}); session .getProvenanceReporter() .modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); } catch (Exception e) { logger.error( "Failed to {} {} due to {}", new Object[] {encode ? "encode" : "decode", flowFile, e}); session.transfer(flowFile, REL_FAILURE); } }
private Map<String, String> sendDirectMessage( final ProcessContext context, final ProcessSession session, final FlowFile flowFile, final ProcessorLog logger, Message message, Recipient recipient) { String groupAttribute = context.getProperty(GROUP_ATTRIBUTE_NAME).getValue(); String groupSufixAttribute = context.getProperty(GROUP_SUFIX).isSet() ? context.getProperty(GROUP_SUFIX).evaluateAttributeExpressions(flowFile).getValue() : ""; String groupSubjectAttribute = context.getProperty(GROUP_SUBJECT_ATTRIBUTE_NAME).getValue(); String participantsAttribute = context.getProperty(PARTICIPANTS_ATTRIBUTE_NAME).getValue(); String groupName = message.getHeader().getRelatedConversationId() != null ? message.getHeader().getRelatedConversationId() : UUID.randomUUID().toString(); groupName += groupSufixAttribute; String groupSubject = message.getHeader().getSubject() != null ? message.getHeader().getSubject() : "Private Message"; String referenceAttributeName = context.getProperty(REFERENCE_ATTRIBUTE_NAME).getValue(); Map<String, String> generatedReferences = new HashMap<>(); // create references generatedReferences.put(recipient.getRecipientId(), UUID.randomUUID().toString()); FlowFile clone = session.clone(flowFile); session.getProvenanceReporter().clone(flowFile, clone); clone = session.putAttribute(clone, groupAttribute, groupName); clone = session.putAttribute(clone, groupSubjectAttribute, groupSubject); clone = session.putAttribute( clone, referenceAttributeName, generatedReferences.values().stream().collect(Collectors.joining(","))); clone = session.putAttribute( clone, participantsAttribute, recipient.getDeliveryAddress().getPhysicalAddress().getAddress()); session.getProvenanceReporter().modifyAttributes(clone); logger.debug("Routing message {} to {}.", new Object[] {clone, REL_DIRECT_MESSAGE}); session.transfer(clone, REL_DIRECT_MESSAGE); session.getProvenanceReporter().route(clone, REL_DIRECT_MESSAGE); return generatedReferences; }
public static JmsProcessingSummary map2FlowFile( final ProcessContext context, final ProcessSession session, final Message message, final boolean addAttributes, ProcessorLog logger) throws Exception { // Currently not very useful, because always one Message == one FlowFile final IntegerHolder msgsThisFlowFile = new IntegerHolder(1); FlowFile flowFile = session.create(); try { // MapMessage is exception, add only name-value pairs to FlowFile attributes if (message instanceof MapMessage) { MapMessage mapMessage = (MapMessage) message; flowFile = session.putAllAttributes(flowFile, createMapMessageValues(mapMessage)); } else { // all other message types, write Message body to FlowFile content flowFile = session.write( flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream rawOut) throws IOException { try (final OutputStream out = new BufferedOutputStream(rawOut, 65536)) { final byte[] messageBody = JmsFactory.createByteArray(message); out.write(messageBody); } catch (final JMSException e) { throw new ProcessException("Failed to receive JMS Message due to {}", e); } } }); } if (addAttributes) { flowFile = session.putAllAttributes(flowFile, JmsFactory.createAttributeMap(message)); } session.getProvenanceReporter().receive(flowFile, context.getProperty(URL).getValue()); session.transfer(flowFile, REL_SUCCESS); logger.info( "Created {} from {} messages received from JMS Server and transferred to 'success'", new Object[] {flowFile, msgsThisFlowFile.get()}); return new JmsProcessingSummary(flowFile.getSize(), message, flowFile); } catch (Exception e) { session.remove(flowFile); throw e; } }
private boolean reloadDictionary( final ProcessContext context, final boolean force, final ProcessorLog logger) throws IOException { boolean obtainedLock; if (force) { dictionaryUpdateLock.lock(); obtainedLock = true; } else { obtainedLock = dictionaryUpdateLock.tryLock(); } if (obtainedLock) { try { final Search<byte[]> search = new AhoCorasick<>(); final Set<SearchTerm<byte[]>> terms = new HashSet<>(); final InputStream inStream = Files.newInputStream( Paths.get(context.getProperty(DICTIONARY).getValue()), StandardOpenOption.READ); final TermLoader termLoader; if (context.getProperty(DICTIONARY_ENCODING).getValue().equalsIgnoreCase(TEXT_ENCODING)) { termLoader = new TextualTermLoader(inStream); } else { termLoader = new BinaryTermLoader(inStream); } try { SearchTerm<byte[]> term; while ((term = termLoader.nextTerm()) != null) { terms.add(term); } search.initializeDictionary(terms); searchRef.set(search); logger.info( "Loaded search dictionary from {}", new Object[] {context.getProperty(DICTIONARY).getValue()}); return true; } finally { termLoader.close(); } } finally { dictionaryUpdateLock.unlock(); } } else { return false; } }
private Map<String, String> sendMessagesToPreStablishedGroups( final ProcessContext context, final ProcessSession session, final FlowFile flowFile, final ProcessorLog logger, List<Recipient> recipients) { String groupAttribute = context.getProperty(GROUP_ATTRIBUTE_NAME).getValue(); String groupSufixAttribute = context.getProperty(GROUP_SUFIX).isSet() ? context.getProperty(GROUP_SUFIX).evaluateAttributeExpressions(flowFile).getValue() : ""; String referenceAttributeName = context.getProperty(REFERENCE_ATTRIBUTE_NAME).getValue(); Map<String, String> generatedReferences = new HashMap<>(); recipients .stream() .forEach( r -> { generatedReferences.put(r.getRecipientId(), UUID.randomUUID().toString()); String address = r.getDeliveryAddress().getPhysicalAddress().getAddress(); FlowFile clone = session.clone(flowFile); session.getProvenanceReporter().clone(flowFile, clone); clone = session.putAttribute( clone, groupAttribute, convertAddressToGroupName(address, groupSufixAttribute)); clone = session.putAttribute( clone, referenceAttributeName, generatedReferences.get(r.getRecipientId())); session.getProvenanceReporter().modifyAttributes(clone); logger.debug("Routing message {} to {}.", new Object[] {clone, REL_PERMANENT_GROUP}); session.transfer(clone, REL_PERMANENT_GROUP); session.getProvenanceReporter().route(clone, REL_PERMANENT_GROUP); }); return generatedReferences; }
private Map<String, String> sendMessageToDynamicGroup( final ProcessContext context, final ProcessSession session, final FlowFile flowFile, final ProcessorLog logger, Message message, List<Recipient> recipients) { String groupAttribute = context.getProperty(GROUP_ATTRIBUTE_NAME).getValue(); String groupSufixAttribute = context.getProperty(GROUP_SUFIX).isSet() ? context.getProperty(GROUP_SUFIX).evaluateAttributeExpressions(flowFile).getValue() : ""; String groupSubjectAttribute = context.getProperty(GROUP_SUBJECT_ATTRIBUTE_NAME).getValue(); String participantsAttribute = context.getProperty(PARTICIPANTS_ATTRIBUTE_NAME).getValue(); String groupName = message.getHeader().getRelatedConversationId() != null ? message.getHeader().getRelatedConversationId() : UUID.randomUUID().toString(); groupName += groupSufixAttribute; String groupSubject = message.getHeader().getSubject() != null ? message.getHeader().getSubject() : "Group Message"; String referenceAttributeName = context.getProperty(REFERENCE_ATTRIBUTE_NAME).getValue(); Map<String, String> generatedReferences = new HashMap<>(); // create references recipients .stream() .forEach(r -> generatedReferences.put(r.getRecipientId(), UUID.randomUUID().toString())); // convert the recipients insto a JSON array. JsonArray participants = new JsonArray(); recipients .stream() .map( r -> { JsonObject jo = new JsonObject(); jo.addProperty( "participant", r.getDeliveryAddress().getPhysicalAddress().getAddress()); return jo; }) .forEach(participants::add); FlowFile clone = session.clone(flowFile); session.getProvenanceReporter().clone(flowFile, clone); clone = session.putAttribute(clone, groupAttribute, groupName); clone = session.putAttribute(clone, groupSubjectAttribute, groupSubject); clone = session.putAttribute( clone, referenceAttributeName, generatedReferences.values().stream().collect(Collectors.joining(","))); clone = session.putAttribute(clone, participantsAttribute, new Gson().toJson(participants)); session.getProvenanceReporter().modifyAttributes(clone); logger.debug("Routing message {} to {}.", new Object[] {clone, REL_DYNAMIC_GROUP}); session.transfer(clone, REL_DYNAMIC_GROUP); session.getProvenanceReporter().route(clone, REL_DYNAMIC_GROUP); return generatedReferences; }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ProcessorLog logger = getLogger(); final ObjectHolder<Throwable> errorHolder = new ObjectHolder<>(null); final ObjectHolder<MessageWrapper> messageWrapperHolder = new ObjectHolder<>(null); session.read( flowFile, (final InputStream rawIn) -> { try { messageWrapperHolder.set(MessageSerializer.deserializeMessageWrapper(rawIn)); } catch (MessageSerializationException ex) { errorHolder.set( new RuntimeException( "Error deserializing FlowFile content into a MessageWrapper instance. Routing to FAILURE", ex)); } }); if (errorHolder.get() != null) { UCSCreateException.routeFlowFileToException( context, session, logger, flowFile, REL_FAILURE, null, "Error in message deserialization: " + errorHolder.get().getCause() != null ? errorHolder.get().getCause().getMessage() : errorHolder.get().getMessage(), ExceptionType.InvalidMessage, null, null); return; } Message message = messageWrapperHolder.get().getMessage(); // resolve the sender. We couldn't resolved it before because we needed // the specific serviceId. UCSController ucsService = context.getProperty(UCS_CONTROLLER_SERVICE).asControllerService(UCSController.class); UserContactInfo uci = ucsService.resolveUserContactInfo( message.getHeader().getSender().getPhysicalAddress().getAddress()); if (uci == null) { UCSCreateException.routeFlowFileToException( context, session, logger, flowFile, REL_FAILURE, null, "Unknown User: "******"Unknown Service " + context.getProperty(SERVICE_ID).getValue() + " for User: "******"GROUP:"). Map<Boolean, List<Recipient>> chatRecipients = message .getHeader() .getRecipientsList() .stream() .filter( r -> r.getDeliveryAddress() != null && r.getDeliveryAddress().getPhysicalAddress() != null) .filter( r -> context .getProperty(SERVICE_ID) .getValue() .equals(r.getDeliveryAddress().getPhysicalAddress().getServiceId())) .collect( Collectors.groupingBy( r -> r.getDeliveryAddress() .getPhysicalAddress() .getAddress() .startsWith("GROUP:"))); Map<String, String> generatedReferences = new HashMap<>(); if (chatRecipients.containsKey(Boolean.TRUE)) { generatedReferences.putAll( this.sendMessagesToPreStablishedGroups( context, session, flowFile, logger, chatRecipients.get(Boolean.TRUE))); } if (chatRecipients.containsKey(Boolean.FALSE)) { List<Recipient> recipients = chatRecipients.get(Boolean.FALSE); if (recipients.size() == 1) { generatedReferences.putAll( this.sendDirectMessage(context, session, flowFile, logger, message, recipients.get(0))); } else { generatedReferences.putAll( this.sendMessageToDynamicGroup( context, session, flowFile, logger, message, recipients)); } } logger.debug("Removing original FlowFile"); session.remove(flowFile); // keep track of the generated references // TODO: is this check correct/enough? if (message.getHeader().isReceiptNotification()) { logger.debug( "The message has ReceiptNotification flag enabled -> We are persisting its references."); generatedReferences .entrySet() .stream() .forEach( (gr) -> { ucsService.saveMessageReference(message, gr.getKey(), gr.getValue()); }); } else { logger.debug( "The message doesn't have ReceiptNotification flag enabled -> We are not persisting its references."); } }
private DestinationAccepts getDestinationAcceptance( final HttpClient client, final String uri, final ProcessorLog logger, final String transactionId) throws IOException { final HttpHead head = new HttpHead(uri); head.addHeader(TRANSACTION_ID_HEADER, transactionId); final HttpResponse response = client.execute(head); final int statusCode = response.getStatusLine().getStatusCode(); if (statusCode == Status.METHOD_NOT_ALLOWED.getStatusCode()) { // we assume that the destination can support FlowFile v1 always. return new DestinationAccepts(false, false, true, false, null); } else if (statusCode == Status.OK.getStatusCode()) { boolean acceptsFlowFileV3 = false; boolean acceptsFlowFileV2 = false; boolean acceptsFlowFileV1 = true; boolean acceptsGzip = false; Integer protocolVersion = null; Header[] headers = response.getHeaders(ACCEPT); if (headers != null) { for (final Header header : headers) { for (final String accepted : header.getValue().split(",")) { final String trimmed = accepted.trim(); if (trimmed.equals(APPLICATION_FLOW_FILE_V3)) { acceptsFlowFileV3 = true; } else if (trimmed.equals(APPLICATION_FLOW_FILE_V2)) { acceptsFlowFileV2 = true; } else { // we assume that the destination accepts FlowFile V1 because legacy versions // of NiFi that accepted V1 did not use an Accept header to indicate it... or // any other header. So the bets thing we can do is just assume that V1 is // accepted, if we're going to send as FlowFile. acceptsFlowFileV1 = true; } } } } final Header destinationVersion = response.getFirstHeader(PROTOCOL_VERSION_HEADER); if (destinationVersion != null) { try { protocolVersion = Integer.valueOf(destinationVersion.getValue()); } catch (final NumberFormatException e) { // nothing to do here really.... it's an invalid value, so treat the same as if not // specified } } if (acceptsFlowFileV3) { logger.debug( "Connection to URI " + uri + " will be using Content Type " + APPLICATION_FLOW_FILE_V3 + " if sending data as FlowFile"); } else if (acceptsFlowFileV2) { logger.debug( "Connection to URI " + uri + " will be using Content Type " + APPLICATION_FLOW_FILE_V2 + " if sending data as FlowFile"); } else if (acceptsFlowFileV1) { logger.debug( "Connection to URI " + uri + " will be using Content Type " + APPLICATION_FLOW_FILE_V1 + " if sending data as FlowFile"); } headers = response.getHeaders(ACCEPT_ENCODING); if (headers != null) { for (final Header header : headers) { for (final String accepted : header.getValue().split(",")) { if (accepted.equalsIgnoreCase("gzip")) { acceptsGzip = true; } } } } if (acceptsGzip) { logger.debug( "Connection to URI " + uri + " indicates that inline GZIP compression is supported"); } else { logger.debug( "Connection to URI " + uri + " indicates that it does NOT support inline GZIP compression"); } return new DestinationAccepts( acceptsFlowFileV3, acceptsFlowFileV2, acceptsFlowFileV1, acceptsGzip, protocolVersion); } else { logger.warn( "Unable to communicate with destination; when attempting to perform an HTTP HEAD, got unexpected response code of " + statusCode + ": " + response.getStatusLine().getReasonPhrase()); return new DestinationAccepts(false, false, false, false, null); } }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { final boolean sendAsFlowFile = context.getProperty(SEND_AS_FLOWFILE).asBoolean(); final int compressionLevel = context.getProperty(COMPRESSION_LEVEL).asInteger(); final String userAgent = context.getProperty(USER_AGENT).getValue(); final RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); requestConfigBuilder.setConnectionRequestTimeout( context.getProperty(DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue()); requestConfigBuilder.setConnectTimeout( context.getProperty(CONNECTION_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue()); requestConfigBuilder.setRedirectsEnabled(false); requestConfigBuilder.setSocketTimeout( context.getProperty(DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue()); final RequestConfig requestConfig = requestConfigBuilder.build(); final StreamThrottler throttler = throttlerRef.get(); final ProcessorLog logger = getLogger(); final Double maxBatchBytes = context.getProperty(MAX_BATCH_SIZE).asDataSize(DataUnit.B); String lastUrl = null; long bytesToSend = 0L; final List<FlowFile> toSend = new ArrayList<>(); DestinationAccepts destinationAccepts = null; CloseableHttpClient client = null; final String transactionId = UUID.randomUUID().toString(); final ObjectHolder<String> dnHolder = new ObjectHolder<>("none"); while (true) { FlowFile flowFile = session.get(); if (flowFile == null) { break; } final String url = context.getProperty(URL).evaluateAttributeExpressions(flowFile).getValue(); try { new java.net.URL(url); } catch (final MalformedURLException e) { logger.error( "After substituting attribute values for {}, URL is {}; this is not a valid URL, so routing to failure", new Object[] {flowFile, url}); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); continue; } // If this FlowFile doesn't have the same url, throw it back on the queue and stop grabbing // FlowFiles if (lastUrl != null && !lastUrl.equals(url)) { session.transfer(flowFile); break; } lastUrl = url; toSend.add(flowFile); if (client == null || destinationAccepts == null) { final Config config = getConfig(url, context); final HttpClientConnectionManager conMan = config.getConnectionManager(); final HttpClientBuilder clientBuilder = HttpClientBuilder.create(); clientBuilder.setConnectionManager(conMan); clientBuilder.setUserAgent(userAgent); clientBuilder.addInterceptorFirst( new HttpResponseInterceptor() { @Override public void process(final HttpResponse response, final HttpContext httpContext) throws HttpException, IOException { HttpCoreContext coreContext = HttpCoreContext.adapt(httpContext); ManagedHttpClientConnection conn = coreContext.getConnection(ManagedHttpClientConnection.class); if (!conn.isOpen()) { return; } SSLSession sslSession = conn.getSSLSession(); if (sslSession != null) { final X509Certificate[] certChain = sslSession.getPeerCertificateChain(); if (certChain == null || certChain.length == 0) { throw new SSLPeerUnverifiedException("No certificates found"); } final X509Certificate cert = certChain[0]; dnHolder.set(cert.getSubjectDN().getName().trim()); } } }); clientBuilder.disableAutomaticRetries(); clientBuilder.disableContentCompression(); final String username = context.getProperty(USERNAME).getValue(); final String password = context.getProperty(PASSWORD).getValue(); // set the credentials if appropriate if (username != null) { final CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); if (password == null) { credentialsProvider.setCredentials( AuthScope.ANY, new UsernamePasswordCredentials(username)); } else { credentialsProvider.setCredentials( AuthScope.ANY, new UsernamePasswordCredentials(username, password)); } clientBuilder.setDefaultCredentialsProvider(credentialsProvider); } client = clientBuilder.build(); // determine whether or not destination accepts flowfile/gzip destinationAccepts = config.getDestinationAccepts(); if (destinationAccepts == null) { try { if (sendAsFlowFile) { destinationAccepts = getDestinationAcceptance(client, url, getLogger(), transactionId); } else { destinationAccepts = new DestinationAccepts(false, false, false, false, null); } config.setDestinationAccepts(destinationAccepts); } catch (IOException e) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); logger.error( "Unable to communicate with destination {} to determine whether or not it can accept " + "flowfiles/gzip; routing {} to failure due to {}", new Object[] {url, flowFile, e}); context.yield(); return; } } } // if we are not sending as flowfile, or if the destination doesn't accept V3 or V2 // (streaming) format, // then only use a single FlowFile if (!sendAsFlowFile || (!destinationAccepts.isFlowFileV3Accepted() && !destinationAccepts.isFlowFileV2Accepted())) { break; } bytesToSend += flowFile.getSize(); if (bytesToSend > maxBatchBytes.longValue()) { break; } } if (toSend.isEmpty()) { return; } final String url = lastUrl; final HttpPost post = new HttpPost(url); final List<FlowFile> flowFileList = toSend; final DestinationAccepts accepts = destinationAccepts; final boolean isDestinationLegacyNiFi = accepts.getProtocolVersion() == null; final EntityTemplate entity = new EntityTemplate( new ContentProducer() { @Override public void writeTo(final OutputStream rawOut) throws IOException { final OutputStream throttled = (throttler == null) ? rawOut : throttler.newThrottledOutputStream(rawOut); OutputStream wrappedOut = new BufferedOutputStream(throttled); if (compressionLevel > 0 && accepts.isGzipAccepted()) { wrappedOut = new GZIPOutputStream(wrappedOut, compressionLevel); } try (final OutputStream out = wrappedOut) { for (final FlowFile flowFile : flowFileList) { session.read( flowFile, new InputStreamCallback() { @Override public void process(final InputStream rawIn) throws IOException { try (final InputStream in = new BufferedInputStream(rawIn)) { FlowFilePackager packager = null; if (!sendAsFlowFile) { packager = null; } else if (accepts.isFlowFileV3Accepted()) { packager = new FlowFilePackagerV3(); } else if (accepts.isFlowFileV2Accepted()) { packager = new FlowFilePackagerV2(); } else if (accepts.isFlowFileV1Accepted()) { packager = new FlowFilePackagerV1(); } // if none of the above conditions is met, we should never get here, // because // we will have already verified that at least 1 of the FlowFile // packaging // formats is acceptable if sending as FlowFile. if (packager == null) { StreamUtils.copy(in, out); } else { final Map<String, String> flowFileAttributes; if (isDestinationLegacyNiFi) { // Old versions of NiFi expect nf.file.name and nf.file.path to // indicate filename & path; // in order to maintain backward compatibility, we copy the // filename & path to those attribute keys. flowFileAttributes = new HashMap<>(flowFile.getAttributes()); flowFileAttributes.put( "nf.file.name", flowFile.getAttribute(CoreAttributes.FILENAME.key())); flowFileAttributes.put( "nf.file.path", flowFile.getAttribute(CoreAttributes.PATH.key())); } else { flowFileAttributes = flowFile.getAttributes(); } packager.packageFlowFile( in, out, flowFileAttributes, flowFile.getSize()); } } } }); } out.flush(); } } }); entity.setChunked(context.getProperty(CHUNKED_ENCODING).asBoolean()); post.setEntity(entity); post.setConfig(requestConfig); final String contentType; if (sendAsFlowFile) { if (accepts.isFlowFileV3Accepted()) { contentType = APPLICATION_FLOW_FILE_V3; } else if (accepts.isFlowFileV2Accepted()) { contentType = APPLICATION_FLOW_FILE_V2; } else if (accepts.isFlowFileV1Accepted()) { contentType = APPLICATION_FLOW_FILE_V1; } else { logger.error( "Cannot send data to {} because the destination does not accept FlowFiles and this processor is " + "configured to deliver FlowFiles; rolling back session", new Object[] {url}); session.rollback(); context.yield(); return; } } else { final String attributeValue = toSend.get(0).getAttribute(CoreAttributes.MIME_TYPE.key()); contentType = (attributeValue == null) ? DEFAULT_CONTENT_TYPE : attributeValue; } final String attributeHeaderRegex = context.getProperty(ATTRIBUTES_AS_HEADERS_REGEX).getValue(); if (attributeHeaderRegex != null && !sendAsFlowFile && flowFileList.size() == 1) { final Pattern pattern = Pattern.compile(attributeHeaderRegex); final Map<String, String> attributes = flowFileList.get(0).getAttributes(); for (final Map.Entry<String, String> entry : attributes.entrySet()) { final String key = entry.getKey(); if (pattern.matcher(key).matches()) { post.setHeader(entry.getKey(), entry.getValue()); } } } post.setHeader(CONTENT_TYPE, contentType); post.setHeader(FLOWFILE_CONFIRMATION_HEADER, "true"); post.setHeader(PROTOCOL_VERSION_HEADER, PROTOCOL_VERSION); post.setHeader(TRANSACTION_ID_HEADER, transactionId); if (compressionLevel > 0 && accepts.isGzipAccepted()) { post.setHeader(GZIPPED_HEADER, "true"); } // Do the actual POST final String flowFileDescription = toSend.size() <= 10 ? toSend.toString() : toSend.size() + " FlowFiles"; final String uploadDataRate; final long uploadMillis; CloseableHttpResponse response = null; try { final StopWatch stopWatch = new StopWatch(true); response = client.execute(post); // consume input stream entirely, ignoring its contents. If we // don't do this, the Connection will not be returned to the pool EntityUtils.consume(response.getEntity()); stopWatch.stop(); uploadDataRate = stopWatch.calculateDataRate(bytesToSend); uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); } catch (final IOException e) { logger.error( "Failed to Post {} due to {}; transferring to failure", new Object[] {flowFileDescription, e}); context.yield(); for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); } return; } finally { if (response != null) { try { response.close(); } catch (IOException e) { getLogger().warn("Failed to close HTTP Response due to {}", new Object[] {e}); } } } // If we get a 'SEE OTHER' status code and an HTTP header that indicates that the intent // of the Location URI is a flowfile hold, we will store this holdUri. This prevents us // from posting to some other webservice and then attempting to delete some resource to which // we are redirected final int responseCode = response.getStatusLine().getStatusCode(); final String responseReason = response.getStatusLine().getReasonPhrase(); String holdUri = null; if (responseCode == HttpServletResponse.SC_SEE_OTHER) { final Header locationUriHeader = response.getFirstHeader(LOCATION_URI_INTENT_NAME); if (locationUriHeader != null) { if (LOCATION_URI_INTENT_VALUE.equals(locationUriHeader.getValue())) { final Header holdUriHeader = response.getFirstHeader(LOCATION_HEADER_NAME); if (holdUriHeader != null) { holdUri = holdUriHeader.getValue(); } } } if (holdUri == null) { for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); logger.error( "Failed to Post {} to {}: sent content and received status code {}:{} but no Hold URI", new Object[] {flowFile, url, responseCode, responseReason}); session.transfer(flowFile, REL_FAILURE); } return; } } if (holdUri == null) { if (responseCode == HttpServletResponse.SC_SERVICE_UNAVAILABLE) { for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); logger.error( "Failed to Post {} to {}: response code was {}:{}; will yield processing, " + "since the destination is temporarily unavailable", new Object[] {flowFile, url, responseCode, responseReason}); session.transfer(flowFile, REL_FAILURE); } context.yield(); return; } if (responseCode >= 300) { for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); logger.error( "Failed to Post {} to {}: response code was {}:{}", new Object[] {flowFile, url, responseCode, responseReason}); session.transfer(flowFile, REL_FAILURE); } return; } logger.info( "Successfully Posted {} to {} in {} at a rate of {}", new Object[] { flowFileDescription, url, FormatUtils.formatMinutesSeconds(uploadMillis, TimeUnit.MILLISECONDS), uploadDataRate }); for (final FlowFile flowFile : toSend) { session .getProvenanceReporter() .send(flowFile, url, "Remote DN=" + dnHolder.get(), uploadMillis, true); session.transfer(flowFile, REL_SUCCESS); } return; } // // the response indicated a Hold URI; delete the Hold. // // determine the full URI of the Flow File's Hold; Unfortunately, the responses that are // returned have // changed over the past, so we have to take into account a few different possibilities. String fullHoldUri = holdUri; if (holdUri.startsWith("/contentListener")) { // If the Hold URI that we get starts with /contentListener, it may not really be // /contentListener, // as this really indicates that it should be whatever we posted to -- if posting directly to // the // ListenHTTP component, it will be /contentListener, but if posting to a proxy/load balancer, // we may // be posting to some other URL. fullHoldUri = url + holdUri.substring(16); } else if (holdUri.startsWith("/")) { // URL indicates the full path but not hostname or port; use the same hostname & port that we // posted // to but use the full path indicated by the response. int firstSlash = url.indexOf("/", 8); if (firstSlash < 0) { firstSlash = url.length(); } final String beforeSlash = url.substring(0, firstSlash); fullHoldUri = beforeSlash + holdUri; } else if (!holdUri.startsWith("http")) { // Absolute URL fullHoldUri = url + (url.endsWith("/") ? "" : "/") + holdUri; } final HttpDelete delete = new HttpDelete(fullHoldUri); delete.setHeader(TRANSACTION_ID_HEADER, transactionId); while (true) { try { final HttpResponse holdResponse = client.execute(delete); EntityUtils.consume(holdResponse.getEntity()); final int holdStatusCode = holdResponse.getStatusLine().getStatusCode(); final String holdReason = holdResponse.getStatusLine().getReasonPhrase(); if (holdStatusCode >= 300) { logger.error( "Failed to delete Hold that destination placed on {}: got response code {}:{}; routing to failure", new Object[] {flowFileDescription, holdStatusCode, holdReason}); for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); } return; } logger.info( "Successfully Posted {} to {} in {} milliseconds at a rate of {}", new Object[] {flowFileDescription, url, uploadMillis, uploadDataRate}); for (FlowFile flowFile : toSend) { session.getProvenanceReporter().send(flowFile, url); session.transfer(flowFile, REL_SUCCESS); } return; } catch (final IOException e) { logger.warn( "Failed to delete Hold that destination placed on {} due to {}", new Object[] {flowFileDescription, e}); } if (!isScheduled()) { context.yield(); logger.warn( "Failed to delete Hold that destination placed on {}; Processor has been stopped so routing FlowFile(s) to failure", new Object[] {flowFileDescription}); for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); } return; } } }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final ProcessorLog logger = getLogger(); final SynchronousFileWatcher fileWatcher = fileWatcherRef.get(); try { if (fileWatcher.checkAndReset()) { reloadDictionary(context, true, logger); } } catch (final IOException e) { throw new ProcessException(e); } Search<byte[]> search = searchRef.get(); try { if (search == null) { if (reloadDictionary(context, false, logger)) { search = searchRef.get(); } } } catch (final IOException e) { throw new ProcessException(e); } if (search == null) { return; } FlowFile flowFile = session.get(); if (flowFile == null) { return; } final Search<byte[]> finalSearch = search; final ObjectHolder<SearchTerm<byte[]>> termRef = new ObjectHolder<>(null); termRef.set(null); session.read( flowFile, new InputStreamCallback() { @Override public void process(final InputStream rawIn) throws IOException { try (final InputStream in = new BufferedInputStream(rawIn)) { final SearchState<byte[]> searchResult = finalSearch.search(in, false); if (searchResult.foundMatch()) { termRef.set(searchResult.getResults().keySet().iterator().next()); } } } }); final SearchTerm<byte[]> matchingTerm = termRef.get(); if (matchingTerm == null) { logger.info("Routing {} to 'unmatched'", new Object[] {flowFile}); session.getProvenanceReporter().route(flowFile, REL_NO_MATCH); session.transfer(flowFile, REL_NO_MATCH); } else { final String matchingTermString = matchingTerm.toString(UTF8); logger.info( "Routing {} to 'matched' because it matched term {}", new Object[] {flowFile, matchingTermString}); flowFile = session.putAttribute(flowFile, MATCH_ATTRIBUTE_KEY, matchingTermString); session.getProvenanceReporter().route(flowFile, REL_MATCH); session.transfer(flowFile, REL_MATCH); } }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ProcessorLog logger = getLogger(); // cache key is computed from attribute 'CACHE_ENTRY_IDENTIFIER' with expression language // support final String cacheKey = context .getProperty(CACHE_ENTRY_IDENTIFIER) .evaluateAttributeExpressions(flowFile) .getValue(); // if the computed value is null, or empty, we transfer the flow file to failure relationship if (StringUtils.isBlank(cacheKey)) { logger.error( "FlowFile {} has no attribute for given Cache Entry Identifier", new Object[] {flowFile}); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); return; } // the cache client used to interact with the distributed cache final DistributedMapCacheClient cache = context .getProperty(DISTRIBUTED_CACHE_SERVICE) .asControllerService(DistributedMapCacheClient.class); try { final long maxCacheEntrySize = context.getProperty(CACHE_ENTRY_MAX_BYTES).asDataSize(DataUnit.B).longValue(); long flowFileSize = flowFile.getSize(); // too big flow file if (flowFileSize > maxCacheEntrySize) { logger.warn( "Flow file {} size {} exceeds the max cache entry size ({} B).", new Object[] {flowFile, flowFileSize, maxCacheEntrySize}); session.transfer(flowFile, REL_FAILURE); return; } if (flowFileSize == 0) { logger.warn("Flow file {} is empty, there is nothing to cache.", new Object[] {flowFile}); session.transfer(flowFile, REL_FAILURE); return; } // get flow file content final ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); session.exportTo(flowFile, byteStream); byte[] cacheValue = byteStream.toByteArray(); final String updateStrategy = context.getProperty(CACHE_UPDATE_STRATEGY).getValue(); boolean cached = false; if (updateStrategy.equals(CACHE_UPDATE_REPLACE.getValue())) { cache.put(cacheKey, cacheValue, keySerializer, valueSerializer); cached = true; } else if (updateStrategy.equals(CACHE_UPDATE_KEEP_ORIGINAL.getValue())) { final byte[] oldValue = cache.getAndPutIfAbsent( cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer); if (oldValue == null) { cached = true; } } // set 'cached' attribute flowFile = session.putAttribute(flowFile, CACHED_ATTRIBUTE_NAME, String.valueOf(cached)); if (cached) { session.transfer(flowFile, REL_SUCCESS); } else { session.transfer(flowFile, REL_FAILURE); } } catch (final IOException e) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); logger.error( "Unable to communicate with cache when processing {} due to {}", new Object[] {flowFile, e}); } }
public void consume( final ProcessContext context, final ProcessSession session, final WrappedMessageConsumer wrappedConsumer) throws ProcessException { final ProcessorLog logger = getLogger(); final MessageConsumer consumer = wrappedConsumer.getConsumer(); final boolean clientAcknowledge = context.getProperty(ACKNOWLEDGEMENT_MODE).getValue().equalsIgnoreCase(ACK_MODE_CLIENT); final long timeout = context.getProperty(TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS); final boolean addAttributes = context.getProperty(JMS_PROPS_TO_ATTRIBUTES).asBoolean(); final int batchSize = context.getProperty(BATCH_SIZE).asInteger(); final JmsProcessingSummary processingSummary = new JmsProcessingSummary(); final StopWatch stopWatch = new StopWatch(true); for (int i = 0; i < batchSize; i++) { final Message message; try { // If we haven't received a message, wait until one is available. If we have already // received at least one // message, then we are not willing to wait for more to become available, but we are willing // to keep receiving // all messages that are immediately available. if (processingSummary.getMessagesReceived() == 0) { message = consumer.receive(timeout); } else { message = consumer.receiveNoWait(); } } catch (final JMSException e) { logger.error("Failed to receive JMS Message due to {}", e); wrappedConsumer.close(logger); break; } if (message == null) { // if no messages, we're done break; } try { processingSummary.add(map2FlowFile(context, session, message, addAttributes, logger)); } catch (Exception e) { logger.error("Failed to receive JMS Message due to {}", e); wrappedConsumer.close(logger); break; } } if (processingSummary.getFlowFilesCreated() == 0) { context.yield(); return; } session.commit(); stopWatch.stop(); if (processingSummary.getFlowFilesCreated() > 0) { final float secs = ((float) stopWatch.getDuration(TimeUnit.MILLISECONDS) / 1000F); float messagesPerSec = ((float) processingSummary.getMessagesReceived()) / secs; final String dataRate = stopWatch.calculateDataRate(processingSummary.getBytesReceived()); logger.info( "Received {} messages in {} milliseconds, at a rate of {} messages/sec or {}", new Object[] { processingSummary.getMessagesReceived(), stopWatch.getDuration(TimeUnit.MILLISECONDS), messagesPerSec, dataRate }); } // if we need to acknowledge the messages, do so now. final Message lastMessage = processingSummary.getLastMessageReceived(); if (clientAcknowledge && lastMessage != null) { try { lastMessage .acknowledge(); // acknowledge all received messages by acknowledging only the last. } catch (final JMSException e) { logger.error( "Failed to acknowledge {} JMS Message(s). This may result in duplicate messages. Reason for failure: {}", new Object[] {processingSummary.getMessagesReceived(), e}); } } }