@Test
  public void laterCreatedLogsDontBlockExecution() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");

    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);
    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    bw.close();

    System.out.println("Reading metadata first time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
      System.out.println(e.getKey());
    }

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // We need to wait long enough for the table to read once
    Thread.sleep(2000);

    // Write another file, but also delete the old files
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m =
        new Mutation(
            ReplicationSection.getRowPrefix() + "/accumulo/wals/tserver+port/" + UUID.randomUUID());
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, tableId1);
    bw.addMutation(m);
    bw.close();

    System.out.println("Reading metadata second time");
    for (Entry<Key, Value> e : conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
      System.out.println(e.getKey());
    }

    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, tableId1);
    bw.addMutation(m);
    bw.close();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperatiotns.drain did not complete");
    }

    // We should pass immediately because we aren't waiting on both files to be deleted (just the
    // one that we did)
    Assert.assertTrue(done.get());
  }
  @Test
  public void waitsUntilEntriesAreReplicated() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");
    Text tableId = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID(),
        file2 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);

    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    m = new Mutation(file2);
    StatusSection.add(m, tableId, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    bw.close();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));

    bw.addMutation(m);

    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.put(ReplicationSection.COLF, tableId, ProtobufUtil.toValue(stat));

    bw.close();

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.putDelete(ReplicationSection.COLF, tableId);
    bw.addMutation(m);
    bw.flush();

    Assert.assertFalse(done.get());

    m = new Mutation(ReplicationSection.getRowPrefix() + file2);
    m.putDelete(ReplicationSection.COLF, tableId);
    bw.addMutation(m);
    bw.flush();
    bw.close();

    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());

    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.putDelete(StatusSection.NAME, tableId);
    bw.addMutation(m);
    bw.flush();

    Assert.assertFalse(done.get());

    m = new Mutation(file2);
    m.putDelete(StatusSection.NAME, tableId);
    bw.addMutation(m);
    bw.flush();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperations.drain did not complete");
    }

    // After both metadata and replication
    Assert.assertTrue(done.get());
    Assert.assertFalse(exception.get());
  }
  @Test
  public void inprogressReplicationRecordsBlockExecution() throws Exception {
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    conn.tableOperations().create("foo");

    Text tableId1 = new Text(conn.tableOperations().tableIdMap().get("foo"));

    String file1 = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    Status stat =
        Status.newBuilder()
            .setBegin(0)
            .setEnd(10000)
            .setInfiniteEnd(false)
            .setClosed(false)
            .build();

    BatchWriter bw = ReplicationTable.getBatchWriter(conn);

    Mutation m = new Mutation(file1);
    StatusSection.add(m, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);
    bw.close();

    LogEntry logEntry = new LogEntry();
    logEntry.extent = new KeyExtent(new Text(tableId1), null, null);
    logEntry.server = "tserver";
    logEntry.filename = file1;
    logEntry.tabletId = 1;
    logEntry.logSet = Arrays.asList(file1);
    logEntry.timestamp = System.currentTimeMillis();

    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(stat));
    bw.addMutation(m);

    m = new Mutation(logEntry.getRow());
    m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
    bw.addMutation(m);

    bw.close();

    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean exception = new AtomicBoolean(false);
    ClientContext context =
        new ClientContext(
            inst, new Credentials("root", new PasswordToken("")), new ClientConfiguration());
    final ReplicationOperationsImpl roi = new ReplicationOperationsImpl(context);
    Thread t =
        new Thread(
            new Runnable() {
              @Override
              public void run() {
                try {
                  roi.drain("foo");
                } catch (Exception e) {
                  log.error("Got error", e);
                  exception.set(true);
                }
                done.set(true);
              }
            });

    t.start();

    // With the records, we shouldn't be drained
    Assert.assertFalse(done.get());

    Status newStatus =
        Status.newBuilder()
            .setBegin(1000)
            .setEnd(2000)
            .setInfiniteEnd(false)
            .setClosed(true)
            .build();
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(ReplicationSection.getRowPrefix() + file1);
    m.put(ReplicationSection.COLF, tableId1, ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();

    // Removing metadata entries doesn't change anything
    Assert.assertFalse(done.get());

    // Remove the replication entries too
    bw = ReplicationTable.getBatchWriter(conn);
    m = new Mutation(file1);
    m.put(StatusSection.NAME, tableId1, ProtobufUtil.toValue(newStatus));
    bw.addMutation(m);
    bw.flush();

    try {
      t.join(5000);
    } catch (InterruptedException e) {
      Assert.fail("ReplicationOperations.drain did not complete");
    }

    // New records, but not fully replicated ones don't cause it to complete
    Assert.assertFalse(done.get());
    Assert.assertFalse(exception.get());
  }