Пример #1
0
  @Test
  public void testNullSink() throws IOException, InterruptedException {
    Benchmark b = new Benchmark("nullsink");
    b.mark("begin");
    TextFileSource txt = new TextFileSource(HADOOP_DATA[0]);
    txt.open();
    MemorySinkSource mem = new MemorySinkSource();
    mem.open();
    EventUtil.dumpAll(txt, mem);

    b.mark("disk_loaded");

    EventSink nullsnk = new NullSink();
    EventUtil.dumpAll(mem, nullsnk);
    b.mark("nullsink done");

    b.done();
  }
Пример #2
0
  @Test
  public void testCountSink() throws IOException, InterruptedException {
    Benchmark b = new Benchmark("nullsink");
    b.mark("begin");
    TextFileSource txt = new TextFileSource(HADOOP_DATA[0]);
    txt.open();
    MemorySinkSource mem = new MemorySinkSource();
    mem.open();
    EventUtil.dumpAll(txt, mem);

    b.mark("disk_loaded");

    CounterSink snk = new CounterSink("counter");
    EventUtil.dumpAll(mem, snk);
    b.mark(snk.getName() + " done", snk.getCount());

    b.done();
  }
Пример #3
0
  @Test
  public void testHadoopRegexes() throws IOException, InterruptedException {
    Benchmark b = new Benchmark("hadoop_regexes");
    b.mark("begin");
    TextFileSource txt = new TextFileSource(HADOOP_DATA[0]);
    txt.open();
    MemorySinkSource mem = new MemorySinkSource();
    mem.open();
    EventUtil.dumpAll(txt, mem);

    b.mark("disk_loaded");

    SimpleRegexReporterBuilder bld = new SimpleRegexReporterBuilder(HADOOP_REGEXES);

    Collection<RegexGroupHistogramSink> sinks = bld.load();
    MultiReporter snk = new MultiReporter("hadoop_regex_sinks", sinks);
    snk.open();
    b.mark("filters_loaded", new File(HADOOP_REGEXES).getName(), sinks.size());

    EventUtil.dumpAll(mem, snk);
    b.mark(snk.getName() + " done");

    b.done();
  }
  /**
   * This test builds a disk failover and then attempts to roll the output of it. The diskFailover
   * is set to retry every 1s (1000ms). We then check to see if the number of elements has gone up
   * for at most 3s.
   */
  @Test
  public void testAgentDFOCollector() throws IOException, FlumeSpecException, InterruptedException {
    String agentCollector = "{diskFailover(1000) => roll (100000) { null } }";
    Event e = new EventImpl("foo".getBytes());
    EventSink agent = FlumeBuilder.buildSink(new Context(), agentCollector);
    agent.open();
    agent.append(e);

    for (int i = 0; i < 30; i++) {
      Clock.sleep(100);
      ReportEvent r = mem.getReport();
      LOG.info(r);
      if (r.getLongMetric("number of events") > 0) {
        return;
      }
    }
    fail("Test timed out, event didn't make it");
  }