/** This also implements the Apache Commons Daemon interface's start */ public synchronized void start() { FlumeConfiguration conf = FlumeConfiguration.get(); // the simple report interface simpleReportManager.add(vmInfo); simpleReportManager.add(sysInfo); simpleReportManager.add( new Reportable() { @Override public String getName() { return FlumeNode.this.getName(); } @Override public ReportEvent getMetrics() { return FlumeNode.this.getReport(); } @Override public Map<String, Reportable> getSubMetrics() { return ReportUtil.noChildren(); } }); // the full report interface ReportManager.get().add(vmInfo); ReportManager.get().add(sysInfo); ReportManager.get().add(this); if (startHttp) { int nodePort = conf.getNodeStatusPort(); String bindAddress = "0.0.0.0"; ContextCreator cc = new ContextCreator() { @Override public void addContexts(ContextHandlerCollection handlers) { handlers.addHandler(InternalHttpServer.createLogAppContext()); handlers.addHandler(InternalHttpServer.createStackSevletContext()); String webAppRoot = FlumeConfiguration.get().getNodeWebappRoot(); InternalHttpServer.addHandlersFromPaths(handlers, new File(webAppRoot)); } }; http = InternalHttpServer.startFindPortHttpServer(cc, bindAddress, nodePort); } if (reportPusher != null) { reportPusher.start(); } if (liveMan != null) { liveMan.start(); } if (chokeMan != null) { // JVM exits if only daemons threads remain. chokeMan.setDaemon(true); chokeMan.start(); } }
/** * Tests to make sure the report sink receives data. * * @throws InterruptedException */ @Test public void testReportSink() throws FlumeSpecException, IOException, InterruptedException { String spec = "{benchinject(\"foo\") => {benchreport(\"report\", \"[ console , counter(\\\"test\\\") ]\") => null } }"; EventSink snk = FlumeBuilder.buildSink(new ReportTestingContext(), spec); snk.open(); snk.append(new EventImpl(new byte[0])); snk.append(new EventImpl(new byte[0])); snk.close(); CounterSink ctr = (CounterSink) ReportManager.get().getReportable("test"); Assert.assertEquals(1, ctr.getCount()); }
@Test public void testAutoRoll() throws IOException, InterruptedException { RollSink snk = new RollSink(new ReportTestingContext(), "counter(\"foo\")", 2000, 10000); // two // second sleeper, but check period is really long Event e = new EventImpl("this is a test message".getBytes()); snk.open(); snk.append(e); CounterSink cnt = (CounterSink) ReportManager.get().getReportable("foo"); Clock.sleep(3000); // sleep 3s // the roller automatically flushed! assertEquals(1, cnt.getCount()); snk.close(); }
/** * This tests the new failover builder that uses specs strings as arguments and instantiates them! * * @throws InterruptedException */ @Test public void testBackoffFailoverBuilder() throws IOException, InterruptedException { SinkBuilder bld = BackOffFailOverSink.builder(); EventSink snk = bld.build( new ReportTestingContext(), "{intervalFlakeyAppend(2) => counter(\"pri\") } ", "counter(\"sec\")"); snk.open(); Event e = new EventImpl("foo".getBytes()); snk.append(e); snk.append(e); snk.append(e); snk.append(e); snk.append(e); snk.close(); CounterSink priCnt = (CounterSink) ReportManager.get().getReportable("pri"); CounterSink secCnt = (CounterSink) ReportManager.get().getReportable("sec"); // these are timing based, may fail. Assert.assertEquals(1, priCnt.getCount()); Assert.assertEquals(4, secCnt.getCount()); }
public void doTestLogicalNodesConcurrentDFOMans(final int threads, final int events, int timeout) throws IOException, InterruptedException, FlumeSpecException { FlumeTestHarness.setupLocalWriteDir(); FlumeMaster master = new FlumeMaster(); FlumeNode node = new FlumeNode(new DirectMasterRPC(master), false, false); final Reportable[] dfos = new Reportable[threads]; for (int i = 0; i < threads; i++) { String name = "test." + i; String report = "report." + i; int count = events + i; String src = "asciisynth(" + count + ",100)"; String snk = "{ diskFailover => counter(\"" + report + "\") } "; node.getLogicalNodeManager().testingSpawn(name, src, snk); dfos[i] = node.getLogicalNodeManager().get(name); } // TODO (jon) using sleep is cheating to give all threads a chance to start. // Test seems flakey without this due to a race condition. Thread.sleep(500); // wait for all to be done. waitForEmptyDFOs(node, timeout); // check to make sure everyone got the right number of events boolean success = true; for (int i = 0; i < threads; i++) { LOG.info(dfos[i].getMetrics()); } for (int i = 0; i < threads; i++) { CounterSink cnt = (CounterSink) ReportManager.get().getReportable("report." + i); LOG.info(i + " expected " + (events + i) + " and got " + cnt.getCount()); success &= ((events + i) == cnt.getCount()); assertEquals(events + i, cnt.getCount()); } assertTrue("Counts did not line up", success); FlumeTestHarness.cleanupLocalWriteDir(); }
/** * Bang on the wal mechanism as hard was you want with number of concurrent threads, number of * events per thread. Timeout in millis, */ public void doTestConcurrentDFOMans(final int threads, final int events, int timeout) throws IOException, InterruptedException { final CountDownLatch started = new CountDownLatch(threads); final CountDownLatch done = new CountDownLatch(threads); final DiskFailoverManager[] dfos = new DiskFailoverManager[threads]; for (int i = 0; i < threads; i++) { final int idx = i; new Thread("Concurrent-" + i) { @Override public void run() { try { File f1 = FileUtil.mktempdir(); AccumulatorSink cnt1 = new AccumulatorSink("count." + idx); DiskFailoverManager dfoMan = new NaiveFileFailoverManager(f1); dfos[idx] = dfoMan; // save for checking. // short trigger causes lots of rolls EventSink snk = new DiskFailoverDeco( cnt1, LogicalNodeContext.testingContext(), dfoMan, new TimeTrigger(100), 50); ReportManager.get().add(cnt1); // make each parallel instance send a slightly different number of // messages. EventSource src = new NoNlASCIISynthSource(events + idx, 100); src.open(); snk.open(); started.countDown(); EventUtil.dumpAll(src, snk); src.close(); snk.close(); // this triggers a flush of current file!? FileUtil.rmr(f1); } catch (Exception e) { LOG.error(e, e); } finally { done.countDown(); } } }.start(); } started.await(); boolean ok = done.await(timeout, TimeUnit.MILLISECONDS); assertTrue("Test timed out", ok); for (int i = 0; i < threads; i++) { AccumulatorSink cnt = (AccumulatorSink) ReportManager.get().getReportable("count." + i); // check for the slightly different counts based on thread. int exp = events + i; LOG.info("count." + i + " expected " + exp + " and got " + cnt.getCount()); assertEquals(exp, (int) cnt.getCount()); // check dfo reports to see if they are sane. ReportEvent rpt = dfos[i].getMetrics(); LOG.info(rpt); long failovered = rpt.getLongMetric(DiskFailoverManager.A_MSG_WRITING); assertEquals(events + i, failovered); } }
public void serve() throws IOException { if (cfg.getMasterStore().equals(ZK_CFG_STORE)) { try { ZooKeeperService.getAndInit(cfg); } catch (InterruptedException e) { throw new IOException("Unexpected interrupt when starting ZooKeeper", e); } } ReportManager.get().add(vmInfo); ReportManager.get().add(sysInfo); if (doHttp) { String webPath = FlumeNode.getWebPath(cfg); this.http = new StatusHttpServer("flumeconfig", webPath, "0.0.0.0", cfg.getMasterHttpPort(), false); http.addServlet(jerseyMasterServlet(), "/master/*"); http.start(); } controlServer = new MasterClientServer(this, FlumeConfiguration.get()); configServer = new MasterAdminServer(this, FlumeConfiguration.get()); /* * We instantiate both kinds of report servers below, but no resources are * allocated till we call serve() on them. */ avroReportServer = new AvroReportServer(FlumeConfiguration.get().getReportServerPort()); thriftReportServer = new ThriftReportServer(FlumeConfiguration.get().getReportServerPort()); ReportManager.get().add(this); try { controlServer.serve(); configServer.serve(); /* * Start the Avro/Thrift ReportServer based on the flag set in the * configuration file. */ if (cfg.getReportServerRPC() == cfg.RPC_TYPE_AVRO) { avroReportServer.serve(); } else { thriftReportServer.serve(); } } catch (TTransportException e1) { throw new IOException("Error starting control or config server", e1); } cmdman.start(); ackman.start(); specman.start(); // TODO (jon) clean shutdown reaper = new Thread("Lost node reaper") { @Override public void run() { try { while (true) { Thread.sleep(FlumeConfiguration.get().getConfigHeartbeatPeriod()); statman.checkup(); } } catch (InterruptedException e) { LOG.error("Reaper thread unexpectedly interrupted:" + e.getMessage()); LOG.debug("Lost node reaper unexpectedly interrupted", e); } } }; reaper.start(); }