public MiniAccumuloClusterImpl create(
      String testClassName,
      String testMethodName,
      AuthenticationToken token,
      MiniClusterConfigurationCallback configCallback,
      TestingKdc kdc)
      throws Exception {
    requireNonNull(token);
    checkArgument(
        token instanceof PasswordToken || token instanceof KerberosToken,
        "A PasswordToken or KerberosToken is required");

    String rootPasswd;
    if (token instanceof PasswordToken) {
      rootPasswd = new String(((PasswordToken) token).getPassword(), UTF_8);
    } else {
      rootPasswd = UUID.randomUUID().toString();
    }

    File baseDir = AccumuloClusterHarness.createTestDir(testClassName + "_" + testMethodName);
    MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, rootPasswd);

    // Enable native maps by default
    cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());

    Configuration coreSite = new Configuration(false);

    // Setup SSL and credential providers if the properties request such
    configureForEnvironment(
        cfg, getClass(), AccumuloClusterHarness.getSslDir(baseDir), coreSite, kdc);

    // Invoke the callback for tests to configure MAC before it starts
    configCallback.configureMiniCluster(cfg, coreSite);

    MiniAccumuloClusterImpl miniCluster = new MiniAccumuloClusterImpl(cfg);

    // Write out any configuration items to a file so HDFS will pick them up automatically (from the
    // classpath)
    if (coreSite.size() > 0) {
      File csFile = new File(miniCluster.getConfig().getConfDir(), "core-site.xml");
      if (csFile.exists()) throw new RuntimeException(csFile + " already exist");

      OutputStream out =
          new BufferedOutputStream(
              new FileOutputStream(
                  new File(miniCluster.getConfig().getConfDir(), "core-site.xml")));
      coreSite.writeXml(out);
      out.close();
    }

    return miniCluster;
  }
Esempio n. 2
0
  @Override
  public void execute() throws MojoExecutionException {
    if (shouldSkip()) {
      return;
    }

    for (MiniAccumuloClusterImpl mac : StartMojo.runningClusters) {
      getLog().info("Stopping MiniAccumuloCluster: " + mac.getInstanceName());
      try {
        mac.stop();
        for (LogWriter log : mac.getLogWriters()) log.flush();
      } catch (Exception e) {
        throw new MojoExecutionException(
            "Unable to start " + MiniAccumuloCluster.class.getSimpleName(), e);
      }
    }
  }
Esempio n. 3
0
 public static void deleteTest(Connector c, MiniAccumuloClusterImpl cluster) throws Exception {
   VerifyIngest.Opts vopts = new VerifyIngest.Opts();
   TestIngest.Opts opts = new TestIngest.Opts();
   vopts.rows = opts.rows = 1000;
   vopts.cols = opts.cols = 1;
   vopts.random = opts.random = 56;
   TestIngest.ingest(c, opts, BWOPTS);
   assertEquals(
       0,
       cluster
           .exec(
               TestRandomDeletes.class,
               "-u",
               "root",
               "-p",
               ROOT_PASSWORD,
               "-i",
               cluster.getInstanceName(),
               "-z",
               cluster.getZooKeepers())
           .waitFor());
   TestIngest.ingest(c, opts, BWOPTS);
   VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
 }
Esempio n. 4
0
  static void runTest(Connector c, MiniAccumuloClusterImpl cluster)
      throws AccumuloException, AccumuloSecurityException, TableExistsException,
          TableNotFoundException, MutationsRejectedException, IOException, InterruptedException,
          NoSuchAlgorithmException {
    c.tableOperations().create(tablename);
    BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
    for (int i = 0; i < 10; i++) {
      Mutation m = new Mutation("" + i);
      m.put(input_cf, input_cq, "row" + i);
      bw.addMutation(m);
    }
    bw.close();
    Process hash =
        cluster.exec(
            RowHash.class,
            Collections.singletonList(hadoopTmpDirArg),
            "-i",
            c.getInstance().getInstanceName(),
            "-z",
            c.getInstance().getZooKeepers(),
            "-u",
            "root",
            "-p",
            ROOT_PASSWORD,
            "-t",
            tablename,
            "--column",
            input_cfcq);
    assertEquals(0, hash.waitFor());

    Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
    s.fetchColumn(new Text(input_cf), new Text(output_cq));
    int i = 0;
    for (Entry<Key, Value> entry : s) {
      MessageDigest md = MessageDigest.getInstance("MD5");
      byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
      assertEquals(entry.getValue().toString(), new String(check));
      i++;
    }
  }
Esempio n. 5
0
 @Before
 public void checkCluster() {
   Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
   MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) getCluster();
   rootPath = mac.getConfig().getDir().getAbsolutePath();
 }
  @Test
  public void dataReplicatedToCorrectTableWithoutDrain() throws Exception {
    MiniAccumuloConfigImpl peerCfg =
        new MiniAccumuloConfigImpl(
            createTestDir(
                this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
            ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);

    peer1Cluster.start();

    try {
      Connector connMaster = getConnector();
      Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));

      String peerClusterName = "peer";

      String peerUserName = "******";
      String peerPassword = "******";

      // Create a user on the peer for replication to use
      connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));

      // Configure the credentials we should use to authenticate ourselves to the peer for
      // replication
      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);

      // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
      connMaster
          .instanceOperations()
          .setProperty(
              Property.REPLICATION_PEERS.getKey() + peerClusterName,
              ReplicaSystemFactory.getPeerConfigurationValue(
                  AccumuloReplicaSystem.class,
                  AccumuloReplicaSystem.buildConfiguration(
                      peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));

      String masterTable1 = "master1",
          peerTable1 = "peer1",
          masterTable2 = "master2",
          peerTable2 = "peer2";

      connMaster.tableOperations().create(masterTable1);
      String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
      Assert.assertNotNull(masterTableId1);

      connMaster.tableOperations().create(masterTable2);
      String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
      Assert.assertNotNull(masterTableId2);

      connPeer.tableOperations().create(peerTable1);
      String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
      Assert.assertNotNull(peerTableId1);

      connPeer.tableOperations().create(peerTable2);
      String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
      Assert.assertNotNull(peerTableId2);

      // Give our replication user the ability to write to the tables
      connPeer
          .securityOperations()
          .grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
      connPeer
          .securityOperations()
          .grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);

      // Replicate this table to the peerClusterName in a table with the peerTableId table id
      connMaster
          .tableOperations()
          .setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
      connMaster
          .tableOperations()
          .setProperty(
              masterTable1,
              Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName,
              peerTableId1);

      connMaster
          .tableOperations()
          .setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
      connMaster
          .tableOperations()
          .setProperty(
              masterTable2,
              Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName,
              peerTableId2);

      // Wait for zookeeper updates (configuration) to propagate
      sleepUninterruptibly(3, TimeUnit.SECONDS);

      // Write some data to table1
      BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
      for (int rows = 0; rows < 2500; rows++) {
        Mutation m = new Mutation(masterTable1 + rows);
        for (int cols = 0; cols < 100; cols++) {
          String value = Integer.toString(cols);
          m.put(value, "", value);
        }
        bw.addMutation(m);
      }

      bw.close();

      // Write some data to table2
      bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
      for (int rows = 0; rows < 2500; rows++) {
        Mutation m = new Mutation(masterTable2 + rows);
        for (int cols = 0; cols < 100; cols++) {
          String value = Integer.toString(cols);
          m.put(value, "", value);
        }
        bw.addMutation(m);
      }

      bw.close();

      log.info("Wrote all data to master cluster");

      while (!ReplicationTable.isOnline(connMaster)) {
        Thread.sleep(500);
      }

      for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
        cluster.killProcess(ServerType.TABLET_SERVER, proc);
      }

      cluster.exec(TabletServer.class);

      // Wait until we fully replicated something
      boolean fullyReplicated = false;
      for (int i = 0; i < 10 && !fullyReplicated; i++) {
        sleepUninterruptibly(timeoutFactor * 2, TimeUnit.SECONDS);

        Scanner s = ReplicationTable.getScanner(connMaster);
        WorkSection.limit(s);
        for (Entry<Key, Value> entry : s) {
          Status status = Status.parseFrom(entry.getValue().get());
          if (StatusUtil.isFullyReplicated(status)) {
            fullyReplicated |= true;
          }
        }
      }

      Assert.assertNotEquals(0, fullyReplicated);

      long countTable = 0l;

      // Check a few times
      for (int i = 0; i < 10; i++) {
        countTable = 0l;
        for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
          countTable++;
          Assert.assertTrue(
              "Found unexpected key-value"
                  + entry.getKey().toStringNoTruncate()
                  + " "
                  + entry.getValue(),
              entry.getKey().getRow().toString().startsWith(masterTable1));
        }
        log.info("Found {} records in {}", countTable, peerTable1);
        if (0 < countTable) {
          break;
        }
        Thread.sleep(2000);
      }

      Assert.assertTrue("Did not find any records in " + peerTable1 + " on peer", countTable > 0);

      for (int i = 0; i < 10; i++) {
        countTable = 0l;
        for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
          countTable++;
          Assert.assertTrue(
              "Found unexpected key-value"
                  + entry.getKey().toStringNoTruncate()
                  + " "
                  + entry.getValue(),
              entry.getKey().getRow().toString().startsWith(masterTable2));
        }

        log.info("Found {} records in {}", countTable, peerTable2);
        if (0 < countTable) {
          break;
        }
        Thread.sleep(2000);
      }
      Assert.assertTrue("Did not find any records in " + peerTable2 + " on peer", countTable > 0);

    } finally {
      peer1Cluster.stop();
    }
  }
  @Test
  public void dataWasReplicatedToThePeerWithoutDrain() throws Exception {
    MiniAccumuloConfigImpl peerCfg =
        new MiniAccumuloConfigImpl(
            createTestDir(
                this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
            ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);

    peerCluster.start();

    Connector connMaster = getConnector();
    Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));

    String peerUserName = "******";
    String peerPassword = "******";

    // Create a user on the peer for replication to use
    connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));

    String peerClusterName = "peer";

    // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
    connMaster
        .instanceOperations()
        .setProperty(
            Property.REPLICATION_PEERS.getKey() + peerClusterName,
            ReplicaSystemFactory.getPeerConfigurationValue(
                AccumuloReplicaSystem.class,
                AccumuloReplicaSystem.buildConfiguration(
                    peerCluster.getInstanceName(), peerCluster.getZooKeepers())));

    // Configure the credentials we should use to authenticate ourselves to the peer for replication
    connMaster
        .instanceOperations()
        .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
    connMaster
        .instanceOperations()
        .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);

    String masterTable = "master", peerTable = "peer";

    connMaster.tableOperations().create(masterTable);
    String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
    Assert.assertNotNull(masterTableId);

    connPeer.tableOperations().create(peerTable);
    String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
    Assert.assertNotNull(peerTableId);

    // Give our replication user the ability to write to the table
    connPeer
        .securityOperations()
        .grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);

    // Replicate this table to the peerClusterName in a table with the peerTableId table id
    connMaster
        .tableOperations()
        .setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
    connMaster
        .tableOperations()
        .setProperty(
            masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);

    // Write some data to table1
    BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
    for (int rows = 0; rows < 5000; rows++) {
      Mutation m = new Mutation(Integer.toString(rows));
      for (int cols = 0; cols < 100; cols++) {
        String value = Integer.toString(cols);
        m.put(value, "", value);
      }
      bw.addMutation(m);
    }

    bw.close();

    log.info("Wrote all data to master cluster");

    Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable);
    for (String s : files) {
      log.info("Found referenced file for " + masterTable + ": " + s);
    }

    for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
      cluster.killProcess(ServerType.TABLET_SERVER, proc);
    }

    cluster.exec(TabletServer.class);

    Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());

    for (Entry<Key, Value> kv :
        connMaster.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)) {
      log.debug(
          kv.getKey().toStringNoTruncate()
              + " "
              + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
    }

    connMaster.replicationOperations().drain(masterTable, files);

    Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY),
        peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
    Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator();
    Assert.assertTrue("No data in master table", masterIter.hasNext());
    Assert.assertTrue("No data in peer table", peerIter.hasNext());
    while (masterIter.hasNext() && peerIter.hasNext()) {
      Entry<Key, Value> masterEntry = masterIter.next(), peerEntry = peerIter.next();
      Assert.assertEquals(
          peerEntry.getKey() + " was not equal to " + peerEntry.getKey(),
          0,
          masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
      Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
    }

    Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
    Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());

    peerCluster.stop();
  }
  @Test
  public void dataReplicatedToCorrectTable() throws Exception {
    MiniAccumuloConfigImpl peerCfg =
        new MiniAccumuloConfigImpl(
            createTestDir(
                this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
            ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);

    peer1Cluster.start();

    try {
      Connector connMaster = getConnector();
      Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));

      String peerClusterName = "peer";
      String peerUserName = "******", peerPassword = "******";

      // Create local user
      connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));

      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);

      // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
      connMaster
          .instanceOperations()
          .setProperty(
              Property.REPLICATION_PEERS.getKey() + peerClusterName,
              ReplicaSystemFactory.getPeerConfigurationValue(
                  AccumuloReplicaSystem.class,
                  AccumuloReplicaSystem.buildConfiguration(
                      peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));

      String masterTable1 = "master1",
          peerTable1 = "peer1",
          masterTable2 = "master2",
          peerTable2 = "peer2";

      // Create tables
      connMaster.tableOperations().create(masterTable1);
      String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
      Assert.assertNotNull(masterTableId1);

      connMaster.tableOperations().create(masterTable2);
      String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
      Assert.assertNotNull(masterTableId2);

      connPeer.tableOperations().create(peerTable1);
      String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
      Assert.assertNotNull(peerTableId1);

      connPeer.tableOperations().create(peerTable2);
      String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
      Assert.assertNotNull(peerTableId2);

      // Grant write permission
      connPeer
          .securityOperations()
          .grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
      connPeer
          .securityOperations()
          .grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);

      // Replicate this table to the peerClusterName in a table with the peerTableId table id
      connMaster
          .tableOperations()
          .setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
      connMaster
          .tableOperations()
          .setProperty(
              masterTable1,
              Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName,
              peerTableId1);

      connMaster
          .tableOperations()
          .setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
      connMaster
          .tableOperations()
          .setProperty(
              masterTable2,
              Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName,
              peerTableId2);

      // Wait for zookeeper updates (configuration) to propogate
      sleepUninterruptibly(3, TimeUnit.SECONDS);

      // Write some data to table1
      BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
      long masterTable1Records = 0l;
      for (int rows = 0; rows < 2500; rows++) {
        Mutation m = new Mutation(masterTable1 + rows);
        for (int cols = 0; cols < 100; cols++) {
          String value = Integer.toString(cols);
          m.put(value, "", value);
          masterTable1Records++;
        }
        bw.addMutation(m);
      }

      bw.close();

      // Write some data to table2
      bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
      long masterTable2Records = 0l;
      for (int rows = 0; rows < 2500; rows++) {
        Mutation m = new Mutation(masterTable2 + rows);
        for (int cols = 0; cols < 100; cols++) {
          String value = Integer.toString(cols);
          m.put(value, "", value);
          masterTable2Records++;
        }
        bw.addMutation(m);
      }

      bw.close();

      log.info("Wrote all data to master cluster");

      Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1),
          filesFor2 = connMaster.replicationOperations().referencedFiles(masterTable2);

      while (!ReplicationTable.isOnline(connMaster)) {
        Thread.sleep(500);
      }

      // Restart the tserver to force a close on the WAL
      for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
        cluster.killProcess(ServerType.TABLET_SERVER, proc);
      }
      cluster.exec(TabletServer.class);

      log.info("Restarted the tserver");

      // Read the data -- the tserver is back up and running
      Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator());

      // Wait for both tables to be replicated
      log.info("Waiting for {} for {}", filesFor1, masterTable1);
      connMaster.replicationOperations().drain(masterTable1, filesFor1);

      log.info("Waiting for {} for {}", filesFor2, masterTable2);
      connMaster.replicationOperations().drain(masterTable2, filesFor2);

      long countTable = 0l;
      for (int i = 0; i < 5; i++) {
        countTable = 0l;
        for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
          countTable++;
          Assert.assertTrue(
              "Found unexpected key-value"
                  + entry.getKey().toStringNoTruncate()
                  + " "
                  + entry.getValue(),
              entry.getKey().getRow().toString().startsWith(masterTable1));
        }

        log.info("Found {} records in {}", countTable, peerTable1);

        if (masterTable1Records != countTable) {
          log.warn(
              "Did not find {} expected records in {}, only found {}",
              masterTable1Records,
              peerTable1,
              countTable);
        }
      }

      Assert.assertEquals(masterTable1Records, countTable);

      for (int i = 0; i < 5; i++) {
        countTable = 0l;
        for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
          countTable++;
          Assert.assertTrue(
              "Found unexpected key-value"
                  + entry.getKey().toStringNoTruncate()
                  + " "
                  + entry.getValue(),
              entry.getKey().getRow().toString().startsWith(masterTable2));
        }

        log.info("Found {} records in {}", countTable, peerTable2);

        if (masterTable2Records != countTable) {
          log.warn(
              "Did not find {} expected records in {}, only found {}",
              masterTable2Records,
              peerTable2,
              countTable);
        }
      }

      Assert.assertEquals(masterTable2Records, countTable);

    } finally {
      peer1Cluster.stop();
    }
  }
  @Test
  public void dataWasReplicatedToThePeer() throws Exception {
    MiniAccumuloConfigImpl peerCfg =
        new MiniAccumuloConfigImpl(
            createTestDir(
                this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
            ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);

    peerCluster.start();

    try {
      final Connector connMaster = getConnector();
      final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));

      ReplicationTable.setOnline(connMaster);

      String peerUserName = "******", peerPassword = "******";

      String peerClusterName = "peer";

      connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));

      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
      connMaster
          .instanceOperations()
          .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);

      // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
      connMaster
          .instanceOperations()
          .setProperty(
              Property.REPLICATION_PEERS.getKey() + peerClusterName,
              ReplicaSystemFactory.getPeerConfigurationValue(
                  AccumuloReplicaSystem.class,
                  AccumuloReplicaSystem.buildConfiguration(
                      peerCluster.getInstanceName(), peerCluster.getZooKeepers())));

      final String masterTable = "master", peerTable = "peer";

      connMaster.tableOperations().create(masterTable);
      String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
      Assert.assertNotNull(masterTableId);

      connPeer.tableOperations().create(peerTable);
      String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
      Assert.assertNotNull(peerTableId);

      connPeer
          .securityOperations()
          .grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);

      // Replicate this table to the peerClusterName in a table with the peerTableId table id
      connMaster
          .tableOperations()
          .setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
      connMaster
          .tableOperations()
          .setProperty(
              masterTable,
              Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName,
              peerTableId);

      // Wait for zookeeper updates (configuration) to propagate
      sleepUninterruptibly(3, TimeUnit.SECONDS);

      // Write some data to table1
      BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
      for (int rows = 0; rows < 5000; rows++) {
        Mutation m = new Mutation(Integer.toString(rows));
        for (int cols = 0; cols < 100; cols++) {
          String value = Integer.toString(cols);
          m.put(value, "", value);
        }
        bw.addMutation(m);
      }

      bw.close();

      log.info("Wrote all data to master cluster");

      final Set<String> filesNeedingReplication =
          connMaster.replicationOperations().referencedFiles(masterTable);

      for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
        cluster.killProcess(ServerType.TABLET_SERVER, proc);
      }
      cluster.exec(TabletServer.class);

      log.info("TabletServer restarted");
      Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
      log.info("TabletServer is online");

      log.info("");
      log.info("Fetching metadata records:");
      for (Entry<Key, Value> kv :
          connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
          log.info(
              kv.getKey().toStringNoTruncate()
                  + " "
                  + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
        } else {
          log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
        }
      }

      log.info("");
      log.info("Fetching replication records:");
      for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
        log.info(
            kv.getKey().toStringNoTruncate()
                + " "
                + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
      }

      Future<Boolean> future =
          executor.submit(
              new Callable<Boolean>() {

                @Override
                public Boolean call() throws Exception {
                  connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
                  log.info("Drain completed");
                  return true;
                }
              });

      long timeoutSeconds = timeoutFactor * 30;
      try {
        future.get(timeoutSeconds, TimeUnit.SECONDS);
      } catch (TimeoutException e) {
        future.cancel(true);
        Assert.fail("Drain did not finish within " + timeoutSeconds + " seconds");
      }

      log.info("drain completed");

      log.info("");
      log.info("Fetching metadata records:");
      for (Entry<Key, Value> kv :
          connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
          log.info(
              kv.getKey().toStringNoTruncate()
                  + " "
                  + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
        } else {
          log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
        }
      }

      log.info("");
      log.info("Fetching replication records:");
      for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
        log.info(
            kv.getKey().toStringNoTruncate()
                + " "
                + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
      }

      Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY),
          peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
      Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator();
      Entry<Key, Value> masterEntry = null, peerEntry = null;
      while (masterIter.hasNext() && peerIter.hasNext()) {
        masterEntry = masterIter.next();
        peerEntry = peerIter.next();
        Assert.assertEquals(
            masterEntry.getKey() + " was not equal to " + peerEntry.getKey(),
            0,
            masterEntry
                .getKey()
                .compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
        Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
      }

      log.info("Last master entry: " + masterEntry);
      log.info("Last peer entry: " + peerEntry);

      Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
      Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
    } finally {
      peerCluster.stop();
    }
  }