@Override public Status appendBatch(List<AvroFlumeEvent> events) { logger.debug( "Avro source {}: Received avro event batch of {} events.", getName(), events.size()); sourceCounter.incrementAppendBatchReceivedCount(); sourceCounter.addToEventReceivedCount(events.size()); List<Event> batch = new ArrayList<Event>(); for (AvroFlumeEvent avroEvent : events) { Event event = EventBuilder.withBody(avroEvent.getBody().array(), toStringMap(avroEvent.getHeaders())); batch.add(event); } try { getChannelProcessor().processEventBatch(batch); } catch (Throwable t) { logger.error( "Avro source " + getName() + ": Unable to process event " + "batch. Exception follows.", t); if (t instanceof Error) { throw (Error) t; } return Status.FAILED; } sourceCounter.incrementAppendBatchAcceptedCount(); sourceCounter.addToEventAcceptedCount(events.size()); return Status.OK; }
@Override public void stop() { logger.info("Avro source {} stopping: {}", getName(), this); server.close(); try { server.join(); } catch (InterruptedException e) { logger.info( "Avro source " + getName() + ": Interrupted while waiting " + "for Avro server to stop. Exiting. Exception follows.", e); } sourceCounter.stop(); connectionCountUpdater.shutdown(); while (!connectionCountUpdater.isTerminated()) { try { Thread.sleep(100); } catch (InterruptedException ex) { logger.error( "Interrupted while waiting for connection count executor " + "to terminate", ex); Throwables.propagate(ex); } } super.stop(); logger.info("Avro source {} stopped. Metrics: {}", getName(), sourceCounter); }
@Override public void start() { logger.info("Starting {}...", this); Responder responder = new SpecificResponder(AvroSourceProtocol.class, this); if (maxThreads <= 0) { server = new NettyServer(responder, new InetSocketAddress(bindAddress, port)); } else { server = new NettyServer( responder, new InetSocketAddress(bindAddress, port), new NioServerSocketChannelFactory( Executors.newCachedThreadPool(), Executors.newFixedThreadPool(maxThreads))); } connectionCountUpdater = Executors.newSingleThreadScheduledExecutor(); server.start(); sourceCounter.start(); super.start(); final NettyServer srv = (NettyServer) server; connectionCountUpdater.scheduleWithFixedDelay( new Runnable() { @Override public void run() { sourceCounter.setOpenConnectionCount(Long.valueOf(srv.getNumActiveConnections())); } }, 0, 60, TimeUnit.SECONDS); logger.info("Avro source {} started.", getName()); }
@Override public void run() { System.out.println("start to run SpoolDirectoryRunnable..."); int backoffInterval = 250; try { while (!Thread.interrupted()) { List<Event> events = reader.readEvents(batchSize); if (events.isEmpty()) { break; } sourceCounter.addToEventReceivedCount(events.size()); sourceCounter.incrementAppendBatchReceivedCount(); try { getChannelProcessor().processEventBatch(events); reader.commit(); } catch (ChannelException ex) { logger.warn( "The channel is full, and cannot write data now. The " + "source will try again after " + String.valueOf(backoffInterval) + " milliseconds"); hitChannelException = true; if (backoff) { TimeUnit.MILLISECONDS.sleep(backoffInterval); backoffInterval = backoffInterval << 1; backoffInterval = backoffInterval >= maxBackoff ? maxBackoff : backoffInterval; } continue; } backoffInterval = 250; sourceCounter.addToEventAcceptedCount(events.size()); sourceCounter.incrementAppendBatchAcceptedCount(); } } catch (Throwable t) { logger.error( "FATAL: " + SpoolDirectoryZipSource.this.toString() + ": " + "Uncaught exception in SpoolDirectorySource thread. " + "Restart or reconfigure Flume to continue processing.", t); hasFatalError = true; Throwables.propagate(t); } }
@Override public void start() { logger.info("Starting thrift source"); ExecutorService sourceService; ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("Flume Thrift IPC Thread %d").build(); if (maxThreads == 0) { sourceService = Executors.newCachedThreadPool(threadFactory); } else { sourceService = Executors.newFixedThreadPool(maxThreads, threadFactory); } try { serverTransport = new TNonblockingServerSocket(new InetSocketAddress(bindAddress, port)); } catch (TTransportException e) { throw new FlumeException("Failed to start Thrift Source.", e); } THsHaServer.Args thhsArgs = new THsHaServer.Args(serverTransport); thhsArgs.processor(new ThriftSourceProtocol.Processor(new ThriftSourceHandler())); // thhsArgs.transportFactory(new TFramedTransport.Factory()); thhsArgs.protocolFactory(new TBinaryProtocol.Factory()); thhsArgs.executorService(sourceService); server = new THsHaServer(thhsArgs); // 半同步半异步的服务模型 servingExecutor = Executors.newSingleThreadExecutor( new ThreadFactoryBuilder().setNameFormat("Flume Thrift Source I/O Boss").build()); /** Start serving. */ servingExecutor.submit( new Runnable() { @Override public void run() { server.serve(); } }); long timeAfterStart = System.currentTimeMillis(); while (!server.isServing()) { try { if (System.currentTimeMillis() - timeAfterStart >= 10000) { throw new FlumeException("Thrift server failed to start!"); } TimeUnit.MILLISECONDS.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new FlumeException("Interrupted while waiting for Thrift server" + " to start.", e); } } sourceCounter.start(); logger.info("Started Thrift source."); super.start(); }
@Override public Status append(AvroFlumeEvent avroEvent) { logger.debug("Avro source {}: Received avro event: {}", getName(), avroEvent); sourceCounter.incrementAppendReceivedCount(); sourceCounter.incrementEventReceivedCount(); Event event = EventBuilder.withBody(avroEvent.getBody().array(), toStringMap(avroEvent.getHeaders())); try { getChannelProcessor().processEvent(event); } catch (ChannelException ex) { logger.warn( "Avro source " + getName() + ": Unable to process event. " + "Exception follows.", ex); return Status.FAILED; } sourceCounter.incrementAppendAcceptedCount(); sourceCounter.incrementEventAcceptedCount(); return Status.OK; }
@Override public synchronized void stop() { executor.shutdown(); try { executor.awaitTermination(10L, TimeUnit.SECONDS); } catch (InterruptedException ex) { logger.info("Interrupted while awaiting termination", ex); } executor.shutdownNow(); super.stop(); sourceCounter.stop(); logger.info("SpoolDir source {} stopped. Metrics: {}", getName(), sourceCounter); }
public void stop() { if (server != null && server.isServing()) { server.stop(); } servingExecutor.shutdown(); try { if (!servingExecutor.awaitTermination(5, TimeUnit.SECONDS)) { servingExecutor.shutdownNow(); } } catch (InterruptedException e) { throw new FlumeException("Interrupted while waiting for server to be " + "shutdown."); } sourceCounter.stop(); // Thrift will shutdown the executor passed to it. super.stop(); }
@Override public synchronized void start() { logger.info("SpoolDirectorySource source starting with directory: {}", spoolDirectory); executor = Executors.newSingleThreadScheduledExecutor(); File directory = new File(spoolDirectory); try { reader = new ReliableSpoolingZipFileEventReader.Builder() .spoolDirectory(directory) .completedSuffix(completedSuffix) .ignorePattern(ignorePattern) .trackerDirPath(trackerDirPath) .annotateFileName(fileHeader) .fileNameHeader(fileHeaderKey) .annotateBaseName(basenameHeader) .baseNameHeader(basenameHeaderKey) .deserializerType(deserializerType) .deserializerContext(deserializerContext) .deletePolicy(deletePolicy) .inputCharset(inputCharset) .decodeErrorPolicy(decodeErrorPolicy) .consumeOrder(consumeOrder) .build(); } catch (IOException ioe) { throw new FlumeException("Error instantiating spooling event parser", ioe); } Runnable runner = new SpoolDirectoryRunnable(reader, sourceCounter); executor.scheduleWithFixedDelay(runner, 0, POLL_DELAY_MS, TimeUnit.MILLISECONDS); super.start(); logger.debug("SpoolDirectoryZipSource source started"); sourceCounter.start(); }