@Test(timeout = 300000)
  public void testCyclicReplication() throws Exception {
    LOG.info("testCyclicReplication");
    utility1.startMiniCluster();
    utility2.startMiniCluster();
    utility3.startMiniCluster();
    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
    ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
    ReplicationAdmin admin3 = new ReplicationAdmin(conf3);

    new HBaseAdmin(conf1).createTable(table);
    new HBaseAdmin(conf2).createTable(table);
    new HBaseAdmin(conf3).createTable(table);
    HTable htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    HTable htable2 = new HTable(conf2, tableName);
    htable2.setWriteBufferSize(1024);
    HTable htable3 = new HTable(conf3, tableName);
    htable3.setWriteBufferSize(1024);

    admin1.addPeer("1", clusterKey2);
    admin2.addPeer("1", clusterKey3);
    admin3.addPeer("1", clusterKey1);

    // put "row" and wait 'til it got around
    putAndWait(row, famName, htable1, htable3);
    // it should have passed through table2
    check(row, famName, htable2);

    putAndWait(row1, famName, htable2, htable1);
    check(row, famName, htable3);
    putAndWait(row2, famName, htable3, htable2);
    check(row, famName, htable1);

    deleteAndWait(row, htable1, htable3);
    deleteAndWait(row1, htable2, htable1);
    deleteAndWait(row2, htable3, htable2);

    assertEquals("Puts were replicated back ", 3, getCount(htable1, put));
    assertEquals("Puts were replicated back ", 3, getCount(htable2, put));
    assertEquals("Puts were replicated back ", 3, getCount(htable3, put));
    assertEquals("Deletes were replicated back ", 3, getCount(htable1, delete));
    assertEquals("Deletes were replicated back ", 3, getCount(htable2, delete));
    assertEquals("Deletes were replicated back ", 3, getCount(htable3, delete));
    utility3.shutdownMiniCluster();
    utility2.shutdownMiniCluster();
    utility1.shutdownMiniCluster();
  }
    protected void batchUpdate(DeleteBuffer kvBuff, boolean flushCommits) throws HiveException {
      try {

        HTable htable = HTableFactory.getHTable(configMap);
        // Disable auto flush when specified so in the config map
        if (disableAutoFlush) htable.setAutoFlushTo(false);

        // Overwrite the write buffer size when config map specifies to do so
        if (writeBufferSizeBytes > 0) htable.setWriteBufferSize(writeBufferSizeBytes);
        System.out.println("deleting" + kvBuff.deleteList + "size" + kvBuff.deleteList.size());
        if (flushCommits) htable.flushCommits();
        numDeleteRecords += kvBuff.deleteList.size();
        if (kvBuff.deleteList.size() > 0)
          LOG.info(
              " Doing Batch Delete "
                  + kvBuff.deleteList.size()
                  + " records; Total delete records = "
                  + numDeleteRecords
                  + " ; Start = "
                  + (new String(kvBuff.deleteList.get(0).getRow()))
                  + " ; End = "
                  + (new String(kvBuff.deleteList.get(kvBuff.deleteList.size() - 1).getRow())));
        else LOG.info(" Doing Batch Delete with ZERO 0 records");

        getReporter()
            .getCounter(BatchDeleteUDAFCounter.NUMBER_OF_SUCCESSFUL_DELETES)
            .increment(kvBuff.deleteList.size());
        getReporter().getCounter(BatchDeleteUDAFCounter.NUMBER_OF_BATCH_OPERATIONS).increment(1);
        htable.delete(kvBuff.deleteList);
        kvBuff.deleteList.clear();
      } catch (IOException e) {
        throw new HiveException(e);
      }
    }
 @SuppressWarnings("resource")
 private HTable[] getHTablesOnClusters(byte[] tableName) throws Exception {
   int numClusters = utilities.length;
   HTable[] htables = new HTable[numClusters];
   for (int i = 0; i < numClusters; i++) {
     HTable htable = new HTable(configurations[i], tableName);
     htable.setWriteBufferSize(1024);
     htables[i] = htable;
   }
   return htables;
 }
  /**
   * Add a row to a table in each cluster, check it's replicated, delete it, check's gone Also check
   * the puts and deletes are not replicated back to the originating cluster.
   */
  @Test(timeout = 300000)
  public void testSimplePutDelete() throws Exception {
    LOG.info("testSimplePutDelete");
    utility1.startMiniCluster();
    utility2.startMiniCluster();

    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
    ReplicationAdmin admin2 = new ReplicationAdmin(conf2);

    new HBaseAdmin(conf1).createTable(table);
    new HBaseAdmin(conf2).createTable(table);
    HTable htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    HTable htable2 = new HTable(conf2, tableName);
    htable2.setWriteBufferSize(1024);

    // set M-M
    admin1.addPeer("1", clusterKey2);
    admin2.addPeer("1", clusterKey1);

    // add rows to both clusters,
    // make sure they are both replication
    putAndWait(row, famName, htable1, htable2);
    putAndWait(row1, famName, htable2, htable1);

    // make sure "row" did not get replicated back.
    assertEquals("Puts were replicated back ", 2, getCount(htable1, put));

    // delete "row" and wait
    deleteAndWait(row, htable1, htable2);

    // make the 2nd cluster replicated back
    assertEquals("Puts were replicated back ", 2, getCount(htable2, put));

    deleteAndWait(row1, htable2, htable1);

    assertEquals("Deletes were replicated back ", 2, getCount(htable1, delete));
    utility2.shutdownMiniCluster();
    utility1.shutdownMiniCluster();
  }
  @Test(timeout = 300000)
  public void testMultiSlaveReplication() throws Exception {
    LOG.info("testCyclicReplication");
    MiniHBaseCluster master = utility1.startMiniCluster();
    utility2.startMiniCluster();
    utility3.startMiniCluster();
    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);

    new HBaseAdmin(conf1).createTable(table);
    new HBaseAdmin(conf2).createTable(table);
    new HBaseAdmin(conf3).createTable(table);
    HTable htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    HTable htable2 = new HTable(conf2, tableName);
    htable2.setWriteBufferSize(1024);
    HTable htable3 = new HTable(conf3, tableName);
    htable3.setWriteBufferSize(1024);

    admin1.addPeer("1", utility2.getClusterKey());

    // put "row" and wait 'til it got around, then delete
    putAndWait(row, famName, htable1, htable2);
    deleteAndWait(row, htable1, htable2);
    // check it wasn't replication to cluster 3
    checkRow(row, 0, htable3);

    putAndWait(row2, famName, htable1, htable2);

    // now roll the region server's logs
    new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());
    // after the log was rolled put a new row
    putAndWait(row3, famName, htable1, htable2);

    admin1.addPeer("2", utility3.getClusterKey());

    // put a row, check it was replicated to all clusters
    putAndWait(row1, famName, htable1, htable2, htable3);
    // delete and verify
    deleteAndWait(row1, htable1, htable2, htable3);

    // make sure row2 did not get replicated after
    // cluster 3 was added
    checkRow(row2, 0, htable3);

    // row3 will get replicated, because it was in the
    // latest log
    checkRow(row3, 1, htable3);

    Put p = new Put(row);
    p.add(famName, row, row);
    htable1.put(p);
    // now roll the logs again
    new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());

    // cleanup "row2", also conveniently use this to wait replication
    // to finish
    deleteAndWait(row2, htable1, htable2, htable3);
    // Even if the log was rolled in the middle of the replication
    // "row" is still replication.
    checkRow(row, 1, htable2, htable3);

    // cleanup the rest
    deleteAndWait(row, htable1, htable2, htable3);
    deleteAndWait(row3, htable1, htable2, htable3);

    utility3.shutdownMiniCluster();
    utility2.shutdownMiniCluster();
    utility1.shutdownMiniCluster();
  }
  /** @throws java.lang.Exception */
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // smaller log roll size to trigger more events
    conf1.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
    conf1.setInt("replication.source.size.capacity", 10240);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf1.setBoolean("dfs.support.append", true);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);

    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
    admin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");

    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf2.setBoolean("dfs.support.append", true);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);

    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);

    admin.addPeer("2", utility2.getClusterKey());

    LOG.info("Setup second Zk");
    CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
    utility1.startMiniCluster(2);
    utility2.startMiniCluster(2);

    HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setMaxVersions(3);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    fam = new HColumnDescriptor(noRepfamName);
    table.addFamily(fam);
    HBaseAdmin admin1 = new HBaseAdmin(conf1);
    HBaseAdmin admin2 = new HBaseAdmin(conf2);
    admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    htable2 = new HTable(conf2, tableName);
  }
 public void setWriteBufferSize(long writeBufferSize) throws IOException {
   super.setWriteBufferSize(writeBufferSize);
 }