/** * Fetches all of the latest heartbeats and updates the Cluster Coordinator as appropriate, based * on the heartbeats received. * * <p>Visible for testing. */ protected synchronized void monitorHeartbeats() { final Map<NodeIdentifier, NodeHeartbeat> latestHeartbeats = getLatestHeartbeats(); if (latestHeartbeats == null || latestHeartbeats.isEmpty()) { logger.debug( "Received no new heartbeats. Will not disconnect any nodes due to lack of heartbeat"); return; } final StopWatch procStopWatch = new StopWatch(true); for (final NodeHeartbeat heartbeat : latestHeartbeats.values()) { try { processHeartbeat(heartbeat); } catch (final Exception e) { clusterCoordinator.reportEvent( null, Severity.ERROR, "Received heartbeat from " + heartbeat.getNodeIdentifier() + " but failed to process heartbeat due to " + e); logger.error( "Failed to process heartbeat from {} due to {}", heartbeat.getNodeIdentifier(), e.toString()); logger.error("", e); } } procStopWatch.stop(); logger.info( "Finished processing {} heartbeats in {}", latestHeartbeats.size(), procStopWatch.getDuration()); // Disconnect any node that hasn't sent a heartbeat in a long time (8 times the heartbeat // interval) final long maxMillis = heartbeatIntervalMillis * 8; final long threshold = System.currentTimeMillis() - maxMillis; for (final NodeHeartbeat heartbeat : latestHeartbeats.values()) { if (heartbeat.getTimestamp() < threshold) { clusterCoordinator.requestNodeDisconnect( heartbeat.getNodeIdentifier(), DisconnectionCode.LACK_OF_HEARTBEAT, "Latest heartbeat from Node has expired"); try { removeHeartbeat(heartbeat.getNodeIdentifier()); } catch (final Exception e) { logger.warn( "Failed to remove heartbeat for {} due to {}", heartbeat.getNodeIdentifier(), e.toString()); logger.warn("", e); } } } }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ProcessorLog logger = getLogger(); boolean encode = context.getProperty(MODE).getValue().equalsIgnoreCase(ENCODE_MODE); String encoding = context.getProperty(ENCODING).getValue(); StreamCallback encoder = null; // Select the encoder/decoder to use if (encode) { if (encoding.equalsIgnoreCase(BASE64_ENCODING)) { encoder = new EncodeBase64(); } else if (encoding.equalsIgnoreCase(BASE32_ENCODING)) { encoder = new EncodeBase32(); } else if (encoding.equalsIgnoreCase(HEX_ENCODING)) { encoder = new EncodeHex(); } } else { if (encoding.equalsIgnoreCase(BASE64_ENCODING)) { encoder = new DecodeBase64(); } else if (encoding.equalsIgnoreCase(BASE32_ENCODING)) { encoder = new DecodeBase32(); } else if (encoding.equalsIgnoreCase(HEX_ENCODING)) { encoder = new DecodeHex(); } } if (encoder == null) { logger.warn( "Unknown operation: {} {}", new Object[] {encode ? "encode" : "decode", encoding}); return; } try { final StopWatch stopWatch = new StopWatch(true); flowFile = session.write(flowFile, encoder); logger.info("Successfully {} {}", new Object[] {encode ? "encoded" : "decoded", flowFile}); session .getProvenanceReporter() .modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); } catch (Exception e) { logger.error( "Failed to {} {} due to {}", new Object[] {encode ? "encode" : "decode", flowFile, e}); session.transfer(flowFile, REL_FAILURE); } }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { final boolean sendAsFlowFile = context.getProperty(SEND_AS_FLOWFILE).asBoolean(); final int compressionLevel = context.getProperty(COMPRESSION_LEVEL).asInteger(); final String userAgent = context.getProperty(USER_AGENT).getValue(); final RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); requestConfigBuilder.setConnectionRequestTimeout( context.getProperty(DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue()); requestConfigBuilder.setConnectTimeout( context.getProperty(CONNECTION_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue()); requestConfigBuilder.setRedirectsEnabled(false); requestConfigBuilder.setSocketTimeout( context.getProperty(DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue()); final RequestConfig requestConfig = requestConfigBuilder.build(); final StreamThrottler throttler = throttlerRef.get(); final ProcessorLog logger = getLogger(); final Double maxBatchBytes = context.getProperty(MAX_BATCH_SIZE).asDataSize(DataUnit.B); String lastUrl = null; long bytesToSend = 0L; final List<FlowFile> toSend = new ArrayList<>(); DestinationAccepts destinationAccepts = null; CloseableHttpClient client = null; final String transactionId = UUID.randomUUID().toString(); final ObjectHolder<String> dnHolder = new ObjectHolder<>("none"); while (true) { FlowFile flowFile = session.get(); if (flowFile == null) { break; } final String url = context.getProperty(URL).evaluateAttributeExpressions(flowFile).getValue(); try { new java.net.URL(url); } catch (final MalformedURLException e) { logger.error( "After substituting attribute values for {}, URL is {}; this is not a valid URL, so routing to failure", new Object[] {flowFile, url}); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); continue; } // If this FlowFile doesn't have the same url, throw it back on the queue and stop grabbing // FlowFiles if (lastUrl != null && !lastUrl.equals(url)) { session.transfer(flowFile); break; } lastUrl = url; toSend.add(flowFile); if (client == null || destinationAccepts == null) { final Config config = getConfig(url, context); final HttpClientConnectionManager conMan = config.getConnectionManager(); final HttpClientBuilder clientBuilder = HttpClientBuilder.create(); clientBuilder.setConnectionManager(conMan); clientBuilder.setUserAgent(userAgent); clientBuilder.addInterceptorFirst( new HttpResponseInterceptor() { @Override public void process(final HttpResponse response, final HttpContext httpContext) throws HttpException, IOException { HttpCoreContext coreContext = HttpCoreContext.adapt(httpContext); ManagedHttpClientConnection conn = coreContext.getConnection(ManagedHttpClientConnection.class); if (!conn.isOpen()) { return; } SSLSession sslSession = conn.getSSLSession(); if (sslSession != null) { final X509Certificate[] certChain = sslSession.getPeerCertificateChain(); if (certChain == null || certChain.length == 0) { throw new SSLPeerUnverifiedException("No certificates found"); } final X509Certificate cert = certChain[0]; dnHolder.set(cert.getSubjectDN().getName().trim()); } } }); clientBuilder.disableAutomaticRetries(); clientBuilder.disableContentCompression(); final String username = context.getProperty(USERNAME).getValue(); final String password = context.getProperty(PASSWORD).getValue(); // set the credentials if appropriate if (username != null) { final CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); if (password == null) { credentialsProvider.setCredentials( AuthScope.ANY, new UsernamePasswordCredentials(username)); } else { credentialsProvider.setCredentials( AuthScope.ANY, new UsernamePasswordCredentials(username, password)); } clientBuilder.setDefaultCredentialsProvider(credentialsProvider); } client = clientBuilder.build(); // determine whether or not destination accepts flowfile/gzip destinationAccepts = config.getDestinationAccepts(); if (destinationAccepts == null) { try { if (sendAsFlowFile) { destinationAccepts = getDestinationAcceptance(client, url, getLogger(), transactionId); } else { destinationAccepts = new DestinationAccepts(false, false, false, false, null); } config.setDestinationAccepts(destinationAccepts); } catch (IOException e) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); logger.error( "Unable to communicate with destination {} to determine whether or not it can accept " + "flowfiles/gzip; routing {} to failure due to {}", new Object[] {url, flowFile, e}); context.yield(); return; } } } // if we are not sending as flowfile, or if the destination doesn't accept V3 or V2 // (streaming) format, // then only use a single FlowFile if (!sendAsFlowFile || (!destinationAccepts.isFlowFileV3Accepted() && !destinationAccepts.isFlowFileV2Accepted())) { break; } bytesToSend += flowFile.getSize(); if (bytesToSend > maxBatchBytes.longValue()) { break; } } if (toSend.isEmpty()) { return; } final String url = lastUrl; final HttpPost post = new HttpPost(url); final List<FlowFile> flowFileList = toSend; final DestinationAccepts accepts = destinationAccepts; final boolean isDestinationLegacyNiFi = accepts.getProtocolVersion() == null; final EntityTemplate entity = new EntityTemplate( new ContentProducer() { @Override public void writeTo(final OutputStream rawOut) throws IOException { final OutputStream throttled = (throttler == null) ? rawOut : throttler.newThrottledOutputStream(rawOut); OutputStream wrappedOut = new BufferedOutputStream(throttled); if (compressionLevel > 0 && accepts.isGzipAccepted()) { wrappedOut = new GZIPOutputStream(wrappedOut, compressionLevel); } try (final OutputStream out = wrappedOut) { for (final FlowFile flowFile : flowFileList) { session.read( flowFile, new InputStreamCallback() { @Override public void process(final InputStream rawIn) throws IOException { try (final InputStream in = new BufferedInputStream(rawIn)) { FlowFilePackager packager = null; if (!sendAsFlowFile) { packager = null; } else if (accepts.isFlowFileV3Accepted()) { packager = new FlowFilePackagerV3(); } else if (accepts.isFlowFileV2Accepted()) { packager = new FlowFilePackagerV2(); } else if (accepts.isFlowFileV1Accepted()) { packager = new FlowFilePackagerV1(); } // if none of the above conditions is met, we should never get here, // because // we will have already verified that at least 1 of the FlowFile // packaging // formats is acceptable if sending as FlowFile. if (packager == null) { StreamUtils.copy(in, out); } else { final Map<String, String> flowFileAttributes; if (isDestinationLegacyNiFi) { // Old versions of NiFi expect nf.file.name and nf.file.path to // indicate filename & path; // in order to maintain backward compatibility, we copy the // filename & path to those attribute keys. flowFileAttributes = new HashMap<>(flowFile.getAttributes()); flowFileAttributes.put( "nf.file.name", flowFile.getAttribute(CoreAttributes.FILENAME.key())); flowFileAttributes.put( "nf.file.path", flowFile.getAttribute(CoreAttributes.PATH.key())); } else { flowFileAttributes = flowFile.getAttributes(); } packager.packageFlowFile( in, out, flowFileAttributes, flowFile.getSize()); } } } }); } out.flush(); } } }); entity.setChunked(context.getProperty(CHUNKED_ENCODING).asBoolean()); post.setEntity(entity); post.setConfig(requestConfig); final String contentType; if (sendAsFlowFile) { if (accepts.isFlowFileV3Accepted()) { contentType = APPLICATION_FLOW_FILE_V3; } else if (accepts.isFlowFileV2Accepted()) { contentType = APPLICATION_FLOW_FILE_V2; } else if (accepts.isFlowFileV1Accepted()) { contentType = APPLICATION_FLOW_FILE_V1; } else { logger.error( "Cannot send data to {} because the destination does not accept FlowFiles and this processor is " + "configured to deliver FlowFiles; rolling back session", new Object[] {url}); session.rollback(); context.yield(); return; } } else { final String attributeValue = toSend.get(0).getAttribute(CoreAttributes.MIME_TYPE.key()); contentType = (attributeValue == null) ? DEFAULT_CONTENT_TYPE : attributeValue; } final String attributeHeaderRegex = context.getProperty(ATTRIBUTES_AS_HEADERS_REGEX).getValue(); if (attributeHeaderRegex != null && !sendAsFlowFile && flowFileList.size() == 1) { final Pattern pattern = Pattern.compile(attributeHeaderRegex); final Map<String, String> attributes = flowFileList.get(0).getAttributes(); for (final Map.Entry<String, String> entry : attributes.entrySet()) { final String key = entry.getKey(); if (pattern.matcher(key).matches()) { post.setHeader(entry.getKey(), entry.getValue()); } } } post.setHeader(CONTENT_TYPE, contentType); post.setHeader(FLOWFILE_CONFIRMATION_HEADER, "true"); post.setHeader(PROTOCOL_VERSION_HEADER, PROTOCOL_VERSION); post.setHeader(TRANSACTION_ID_HEADER, transactionId); if (compressionLevel > 0 && accepts.isGzipAccepted()) { post.setHeader(GZIPPED_HEADER, "true"); } // Do the actual POST final String flowFileDescription = toSend.size() <= 10 ? toSend.toString() : toSend.size() + " FlowFiles"; final String uploadDataRate; final long uploadMillis; CloseableHttpResponse response = null; try { final StopWatch stopWatch = new StopWatch(true); response = client.execute(post); // consume input stream entirely, ignoring its contents. If we // don't do this, the Connection will not be returned to the pool EntityUtils.consume(response.getEntity()); stopWatch.stop(); uploadDataRate = stopWatch.calculateDataRate(bytesToSend); uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); } catch (final IOException e) { logger.error( "Failed to Post {} due to {}; transferring to failure", new Object[] {flowFileDescription, e}); context.yield(); for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); } return; } finally { if (response != null) { try { response.close(); } catch (IOException e) { getLogger().warn("Failed to close HTTP Response due to {}", new Object[] {e}); } } } // If we get a 'SEE OTHER' status code and an HTTP header that indicates that the intent // of the Location URI is a flowfile hold, we will store this holdUri. This prevents us // from posting to some other webservice and then attempting to delete some resource to which // we are redirected final int responseCode = response.getStatusLine().getStatusCode(); final String responseReason = response.getStatusLine().getReasonPhrase(); String holdUri = null; if (responseCode == HttpServletResponse.SC_SEE_OTHER) { final Header locationUriHeader = response.getFirstHeader(LOCATION_URI_INTENT_NAME); if (locationUriHeader != null) { if (LOCATION_URI_INTENT_VALUE.equals(locationUriHeader.getValue())) { final Header holdUriHeader = response.getFirstHeader(LOCATION_HEADER_NAME); if (holdUriHeader != null) { holdUri = holdUriHeader.getValue(); } } } if (holdUri == null) { for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); logger.error( "Failed to Post {} to {}: sent content and received status code {}:{} but no Hold URI", new Object[] {flowFile, url, responseCode, responseReason}); session.transfer(flowFile, REL_FAILURE); } return; } } if (holdUri == null) { if (responseCode == HttpServletResponse.SC_SERVICE_UNAVAILABLE) { for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); logger.error( "Failed to Post {} to {}: response code was {}:{}; will yield processing, " + "since the destination is temporarily unavailable", new Object[] {flowFile, url, responseCode, responseReason}); session.transfer(flowFile, REL_FAILURE); } context.yield(); return; } if (responseCode >= 300) { for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); logger.error( "Failed to Post {} to {}: response code was {}:{}", new Object[] {flowFile, url, responseCode, responseReason}); session.transfer(flowFile, REL_FAILURE); } return; } logger.info( "Successfully Posted {} to {} in {} at a rate of {}", new Object[] { flowFileDescription, url, FormatUtils.formatMinutesSeconds(uploadMillis, TimeUnit.MILLISECONDS), uploadDataRate }); for (final FlowFile flowFile : toSend) { session .getProvenanceReporter() .send(flowFile, url, "Remote DN=" + dnHolder.get(), uploadMillis, true); session.transfer(flowFile, REL_SUCCESS); } return; } // // the response indicated a Hold URI; delete the Hold. // // determine the full URI of the Flow File's Hold; Unfortunately, the responses that are // returned have // changed over the past, so we have to take into account a few different possibilities. String fullHoldUri = holdUri; if (holdUri.startsWith("/contentListener")) { // If the Hold URI that we get starts with /contentListener, it may not really be // /contentListener, // as this really indicates that it should be whatever we posted to -- if posting directly to // the // ListenHTTP component, it will be /contentListener, but if posting to a proxy/load balancer, // we may // be posting to some other URL. fullHoldUri = url + holdUri.substring(16); } else if (holdUri.startsWith("/")) { // URL indicates the full path but not hostname or port; use the same hostname & port that we // posted // to but use the full path indicated by the response. int firstSlash = url.indexOf("/", 8); if (firstSlash < 0) { firstSlash = url.length(); } final String beforeSlash = url.substring(0, firstSlash); fullHoldUri = beforeSlash + holdUri; } else if (!holdUri.startsWith("http")) { // Absolute URL fullHoldUri = url + (url.endsWith("/") ? "" : "/") + holdUri; } final HttpDelete delete = new HttpDelete(fullHoldUri); delete.setHeader(TRANSACTION_ID_HEADER, transactionId); while (true) { try { final HttpResponse holdResponse = client.execute(delete); EntityUtils.consume(holdResponse.getEntity()); final int holdStatusCode = holdResponse.getStatusLine().getStatusCode(); final String holdReason = holdResponse.getStatusLine().getReasonPhrase(); if (holdStatusCode >= 300) { logger.error( "Failed to delete Hold that destination placed on {}: got response code {}:{}; routing to failure", new Object[] {flowFileDescription, holdStatusCode, holdReason}); for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); } return; } logger.info( "Successfully Posted {} to {} in {} milliseconds at a rate of {}", new Object[] {flowFileDescription, url, uploadMillis, uploadDataRate}); for (FlowFile flowFile : toSend) { session.getProvenanceReporter().send(flowFile, url); session.transfer(flowFile, REL_SUCCESS); } return; } catch (final IOException e) { logger.warn( "Failed to delete Hold that destination placed on {} due to {}", new Object[] {flowFileDescription, e}); } if (!isScheduled()) { context.yield(); logger.warn( "Failed to delete Hold that destination placed on {}; Processor has been stopped so routing FlowFile(s) to failure", new Object[] {flowFileDescription}); for (FlowFile flowFile : toSend) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); } return; } } }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final BlockingQueue<String> partitionIds = this.partitionNames; final String partitionId = partitionIds.poll(); if (partitionId == null) { getLogger().debug("No partitions available"); return; } final StopWatch stopWatch = new StopWatch(true); try { final Iterable<EventData> receivedEvents = receiveEvents(context, partitionId); if (receivedEvents == null) { return; } for (final EventData eventData : receivedEvents) { if (null != eventData) { final Map<String, String> attributes = new HashMap<>(); FlowFile flowFile = session.create(); EventData.SystemProperties systemProperties = eventData.getSystemProperties(); if (null != systemProperties) { attributes.put( "eventhub.enqueued.timestamp", String.valueOf(eventData.getSystemProperties().getEnqueuedTime())); attributes.put("eventhub.offset", eventData.getSystemProperties().getOffset()); attributes.put( "eventhub.sequence", String.valueOf(eventData.getSystemProperties().getSequenceNumber())); } attributes.put("eventhub.name", context.getProperty(EVENT_HUB_NAME).getValue()); attributes.put("eventhub.partition", partitionId); flowFile = session.putAllAttributes(flowFile, attributes); flowFile = session.write( flowFile, out -> { out.write(eventData.getBody()); }); session.transfer(flowFile, REL_SUCCESS); final String namespace = context.getProperty(NAMESPACE).getValue(); final String eventHubName = context.getProperty(EVENT_HUB_NAME).getValue(); final String consumerGroup = context.getProperty(CONSUMER_GROUP).getValue(); final String transitUri = "amqps://" + namespace + ".servicebus.windows.net" + "/" + eventHubName + "/ConsumerGroups/" + consumerGroup + "/Partitions/" + partitionId; session .getProvenanceReporter() .receive(flowFile, transitUri, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); } } } finally { partitionIds.offer(partitionId); } }
public void consume( final ProcessContext context, final ProcessSession session, final WrappedMessageConsumer wrappedConsumer) throws ProcessException { final ProcessorLog logger = getLogger(); final MessageConsumer consumer = wrappedConsumer.getConsumer(); final boolean clientAcknowledge = context.getProperty(ACKNOWLEDGEMENT_MODE).getValue().equalsIgnoreCase(ACK_MODE_CLIENT); final long timeout = context.getProperty(TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS); final boolean addAttributes = context.getProperty(JMS_PROPS_TO_ATTRIBUTES).asBoolean(); final int batchSize = context.getProperty(BATCH_SIZE).asInteger(); final JmsProcessingSummary processingSummary = new JmsProcessingSummary(); final StopWatch stopWatch = new StopWatch(true); for (int i = 0; i < batchSize; i++) { final Message message; try { // If we haven't received a message, wait until one is available. If we have already // received at least one // message, then we are not willing to wait for more to become available, but we are willing // to keep receiving // all messages that are immediately available. if (processingSummary.getMessagesReceived() == 0) { message = consumer.receive(timeout); } else { message = consumer.receiveNoWait(); } } catch (final JMSException e) { logger.error("Failed to receive JMS Message due to {}", e); wrappedConsumer.close(logger); break; } if (message == null) { // if no messages, we're done break; } try { processingSummary.add(map2FlowFile(context, session, message, addAttributes, logger)); } catch (Exception e) { logger.error("Failed to receive JMS Message due to {}", e); wrappedConsumer.close(logger); break; } } if (processingSummary.getFlowFilesCreated() == 0) { context.yield(); return; } session.commit(); stopWatch.stop(); if (processingSummary.getFlowFilesCreated() > 0) { final float secs = ((float) stopWatch.getDuration(TimeUnit.MILLISECONDS) / 1000F); float messagesPerSec = ((float) processingSummary.getMessagesReceived()) / secs; final String dataRate = stopWatch.calculateDataRate(processingSummary.getBytesReceived()); logger.info( "Received {} messages in {} milliseconds, at a rate of {} messages/sec or {}", new Object[] { processingSummary.getMessagesReceived(), stopWatch.getDuration(TimeUnit.MILLISECONDS), messagesPerSec, dataRate }); } // if we need to acknowledge the messages, do so now. final Message lastMessage = processingSummary.getLastMessageReceived(); if (clientAcknowledge && lastMessage != null) { try { lastMessage .acknowledge(); // acknowledge all received messages by acknowledging only the last. } catch (final JMSException e) { logger.error( "Failed to acknowledge {} JMS Message(s). This may result in duplicate messages. Reason for failure: {}", new Object[] {processingSummary.getMessagesReceived(), e}); } } }