Example #1
0
  public void SequenceFileRenameRetryCoreTest(int numberOfRetriesRequired, boolean closeSucceed)
      throws Exception {
    String hdfsPath =
        "file:///tmp/flume-test."
            + Calendar.getInstance().getTimeInMillis()
            + "."
            + Thread.currentThread().getId();

    Context context = new Context();
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(hdfsPath);
    fs.delete(dirPath, true);
    fs.mkdirs(dirPath);
    context.put("hdfs.path", hdfsPath);
    context.put("hdfs.closeTries", String.valueOf(numberOfRetriesRequired));
    context.put("hdfs.rollCount", "1");
    context.put("hdfs.retryInterval", "1");
    context.put("hdfs.callTimeout", Long.toString(1000));
    MockFileSystem mockFs = new MockFileSystem(fs, numberOfRetriesRequired, closeSucceed);
    BucketWriter bucketWriter =
        new BucketWriter(
            0,
            0,
            1,
            1,
            ctx,
            hdfsPath,
            hdfsPath,
            "singleBucket",
            ".tmp",
            null,
            null,
            null,
            new MockDataStream(mockFs),
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            null,
            null,
            30000,
            Executors.newSingleThreadExecutor(),
            1,
            numberOfRetriesRequired);

    bucketWriter.setFileSystem(mockFs);
    // At this point, we checked if isFileClosed is available in
    // this JVM, so lets make it check again.
    Event event = EventBuilder.withBody("test", Charsets.UTF_8);
    bucketWriter.append(event);
    // This is what triggers the close, so a 2nd append is required :/
    bucketWriter.append(event);

    TimeUnit.SECONDS.sleep(numberOfRetriesRequired + 2);

    Assert.assertTrue(
        "Expected " + numberOfRetriesRequired + " " + "but got " + bucketWriter.renameTries.get(),
        bucketWriter.renameTries.get() == numberOfRetriesRequired);
  }
Example #2
0
  @Test
  public void testFileSuffixGiven() throws IOException, InterruptedException {
    final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
    final String suffix = ".avro";

    MockHDFSWriter hdfsWriter = new MockHDFSWriter();
    BucketWriter bucketWriter =
        new BucketWriter(
            ROLL_INTERVAL,
            0,
            0,
            0,
            ctx,
            "/tmp",
            "file",
            "",
            ".tmp",
            suffix,
            null,
            SequenceFile.CompressionType.NONE,
            hdfsWriter,
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            null,
            null,
            30000,
            Executors.newSingleThreadExecutor(),
            0,
            0);

    // Need to override system time use for test so we know what to expect

    final long testTime = System.currentTimeMillis();

    Clock testClock =
        new Clock() {
          public long currentTimeMillis() {
            return testTime;
          }
        };
    bucketWriter.setClock(testClock);

    Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
    bucketWriter.append(e);

    Assert.assertTrue(
        "Incorrect suffix",
        hdfsWriter.getOpenedFilePath().endsWith(Long.toString(testTime + 1) + suffix + ".tmp"));
  }
Example #3
0
  @Test
  public void testCallbackOnClose() throws IOException, InterruptedException {
    final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
    final String SUFFIX = "WELCOME_TO_THE_EREBOR";
    final AtomicBoolean callbackCalled = new AtomicBoolean(false);

    MockHDFSWriter hdfsWriter = new MockHDFSWriter();
    BucketWriter bucketWriter =
        new BucketWriter(
            ROLL_INTERVAL,
            0,
            0,
            0,
            ctx,
            "/tmp",
            "file",
            "",
            SUFFIX,
            null,
            null,
            SequenceFile.CompressionType.NONE,
            hdfsWriter,
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            new HDFSEventSink.WriterCallback() {
              @Override
              public void run(String filePath) {
                callbackCalled.set(true);
              }
            },
            "blah",
            30000,
            Executors.newSingleThreadExecutor(),
            0,
            0);

    Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
    bucketWriter.append(e);
    bucketWriter.close(true);

    Assert.assertTrue(callbackCalled.get());
  }
Example #4
0
  @Test
  public void testEventCountingRoller() throws IOException, InterruptedException {
    int maxEvents = 100;
    MockHDFSWriter hdfsWriter = new MockHDFSWriter();
    BucketWriter bucketWriter =
        new BucketWriter(
            0,
            0,
            maxEvents,
            0,
            ctx,
            "/tmp",
            "file",
            "",
            ".tmp",
            null,
            null,
            SequenceFile.CompressionType.NONE,
            hdfsWriter,
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            null,
            null,
            30000,
            Executors.newSingleThreadExecutor(),
            0,
            0);

    Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
    for (int i = 0; i < 1000; i++) {
      bucketWriter.append(e);
    }

    logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
    logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
    logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());

    Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten());
    Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten());
    Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened());
  }
Example #5
0
  @Test
  public void testInUseSuffix() throws IOException, InterruptedException {
    final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
    final String SUFFIX = "WELCOME_TO_THE_HELLMOUNTH";

    MockHDFSWriter hdfsWriter = new MockHDFSWriter();
    HDFSTextSerializer serializer = new HDFSTextSerializer();
    BucketWriter bucketWriter =
        new BucketWriter(
            ROLL_INTERVAL,
            0,
            0,
            0,
            ctx,
            "/tmp",
            "file",
            "",
            SUFFIX,
            null,
            null,
            SequenceFile.CompressionType.NONE,
            hdfsWriter,
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            null,
            null,
            30000,
            Executors.newSingleThreadExecutor(),
            0,
            0);

    Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
    bucketWriter.append(e);

    Assert.assertTrue("Incorrect in use suffix", hdfsWriter.getOpenedFilePath().contains(SUFFIX));
  }
Example #6
0
  @Test
  public void testIntervalRollerBug() throws IOException, InterruptedException {
    final int ROLL_INTERVAL = 1; // seconds
    final int NUM_EVENTS = 10;

    HDFSWriter hdfsWriter =
        new HDFSWriter() {
          private volatile boolean open = false;

          public void configure(Context context) {}

          public void sync() throws IOException {
            if (!open) {
              throw new IOException("closed");
            }
          }

          public void open(String filePath, CompressionCodec codec, CompressionType cType)
              throws IOException {
            open = true;
          }

          public void open(String filePath) throws IOException {
            open = true;
          }

          public void close() throws IOException {
            open = false;
          }

          @Override
          public boolean isUnderReplicated() {
            return false;
          }

          public void append(Event e) throws IOException {
            // we just re-open in append if closed
            open = true;
          }
        };
    HDFSTextSerializer serializer = new HDFSTextSerializer();
    File tmpFile = File.createTempFile("flume", "test");
    tmpFile.deleteOnExit();
    String path = tmpFile.getParent();
    String name = tmpFile.getName();

    BucketWriter bucketWriter =
        new BucketWriter(
            ROLL_INTERVAL,
            0,
            0,
            0,
            ctx,
            path,
            name,
            "",
            ".tmp",
            null,
            null,
            SequenceFile.CompressionType.NONE,
            hdfsWriter,
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            null,
            null,
            30000,
            Executors.newSingleThreadExecutor(),
            0,
            0);

    Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
    for (int i = 0; i < NUM_EVENTS - 1; i++) {
      bucketWriter.append(e);
    }

    // sleep to force a roll... wait 2x interval just to be sure
    Thread.sleep(2 * ROLL_INTERVAL * 1000L);

    bucketWriter.flush(); // throws closed exception
  }
Example #7
0
  @Test
  public void testIntervalRoller() throws IOException, InterruptedException {
    final int ROLL_INTERVAL = 1; // seconds
    final int NUM_EVENTS = 10;
    final AtomicBoolean calledBack = new AtomicBoolean(false);

    MockHDFSWriter hdfsWriter = new MockHDFSWriter();
    BucketWriter bucketWriter =
        new BucketWriter(
            ROLL_INTERVAL,
            0,
            0,
            0,
            ctx,
            "/tmp",
            "file",
            "",
            ".tmp",
            null,
            null,
            SequenceFile.CompressionType.NONE,
            hdfsWriter,
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            new HDFSEventSink.WriterCallback() {
              @Override
              public void run(String filePath) {
                calledBack.set(true);
              }
            },
            null,
            30000,
            Executors.newSingleThreadExecutor(),
            0,
            0);

    Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
    long startNanos = System.nanoTime();
    for (int i = 0; i < NUM_EVENTS - 1; i++) {
      bucketWriter.append(e);
    }

    // sleep to force a roll... wait 2x interval just to be sure
    Thread.sleep(2 * ROLL_INTERVAL * 1000L);

    Assert.assertTrue(bucketWriter.closed);
    Assert.assertTrue(calledBack.get());

    bucketWriter =
        new BucketWriter(
            ROLL_INTERVAL,
            0,
            0,
            0,
            ctx,
            "/tmp",
            "file",
            "",
            ".tmp",
            null,
            null,
            SequenceFile.CompressionType.NONE,
            hdfsWriter,
            timedRollerPool,
            proxy,
            new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
            0,
            null,
            null,
            30000,
            Executors.newSingleThreadExecutor(),
            0,
            0);
    // write one more event (to reopen a new file so we will roll again later)
    bucketWriter.append(e);

    long elapsedMillis =
        TimeUnit.MILLISECONDS.convert(System.nanoTime() - startNanos, TimeUnit.NANOSECONDS);
    long elapsedSeconds = elapsedMillis / 1000L;

    logger.info("Time elapsed: {} milliseconds", elapsedMillis);
    logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
    logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
    logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());
    logger.info("Number of files closed: {}", hdfsWriter.getFilesClosed());

    Assert.assertEquals("events written", NUM_EVENTS, hdfsWriter.getEventsWritten());
    Assert.assertEquals(
        "bytes written", e.getBody().length * NUM_EVENTS, hdfsWriter.getBytesWritten());
    Assert.assertEquals("files opened", 2, hdfsWriter.getFilesOpened());

    // before auto-roll
    Assert.assertEquals("files closed", 1, hdfsWriter.getFilesClosed());

    logger.info("Waiting for roll...");
    Thread.sleep(2 * ROLL_INTERVAL * 1000L);

    logger.info("Number of files closed: {}", hdfsWriter.getFilesClosed());
    Assert.assertEquals("files closed", 2, hdfsWriter.getFilesClosed());
  }