@Test(timeout = 30000)
  public void testCreateDeleteTable() throws IOException {
    // Create table then get the single region for our new table.
    HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable");
    hdt.setRegionReplication(NB_SERVERS);
    hdt.addCoprocessor(SlowMeCopro.class.getName());
    Table table = HTU.createTable(hdt, new byte[][] {f}, HTU.getConfiguration());

    Put p = new Put(row);
    p.add(f, row, row);
    table.put(p);

    Get g = new Get(row);
    Result r = table.get(g);
    Assert.assertFalse(r.isStale());

    try {
      // But if we ask for stale we will get it
      SlowMeCopro.cdl.set(new CountDownLatch(1));
      g = new Get(row);
      g.setConsistency(Consistency.TIMELINE);
      r = table.get(g);
      Assert.assertTrue(r.isStale());
      SlowMeCopro.cdl.get().countDown();
    } finally {
      SlowMeCopro.cdl.get().countDown();
      SlowMeCopro.sleepTime.set(0);
    }

    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());
  }
 @Test
 public void testDeleteTable() throws Exception {
   String namespace = prefix + "_dummy";
   NamespaceDescriptor nspDesc =
       NamespaceDescriptor.create(namespace)
           .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "100")
           .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "3")
           .build();
   ADMIN.createNamespace(nspDesc);
   assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(namespace));
   NamespaceTableAndRegionInfo stateInfo = getNamespaceState(nspDesc.getName());
   assertNotNull("Namespace state found null for " + namespace, stateInfo);
   HTableDescriptor tableDescOne =
       new HTableDescriptor(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table1"));
   HTableDescriptor tableDescTwo =
       new HTableDescriptor(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table2"));
   ADMIN.createTable(tableDescOne);
   ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5);
   stateInfo = getNamespaceState(nspDesc.getName());
   assertNotNull("Namespace state found to be null.", stateInfo);
   assertEquals(2, stateInfo.getTables().size());
   assertEquals(5, stateInfo.getRegionCountOfTable(tableDescTwo.getTableName()));
   assertEquals(6, stateInfo.getRegionCount());
   ADMIN.disableTable(tableDescOne.getTableName());
   deleteTable(tableDescOne.getTableName());
   stateInfo = getNamespaceState(nspDesc.getName());
   assertNotNull("Namespace state found to be null.", stateInfo);
   assertEquals(5, stateInfo.getRegionCount());
   assertEquals(1, stateInfo.getTables().size());
   ADMIN.disableTable(tableDescTwo.getTableName());
   deleteTable(tableDescTwo.getTableName());
   ADMIN.deleteNamespace(namespace);
   stateInfo = getNamespaceState(namespace);
   assertNull("Namespace state not found to be null.", stateInfo);
 }
 @After
 public void tearDown() throws Exception {
   ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
   for (HTableDescriptor htd : UTIL.getHBaseAdmin().listTables()) {
     LOG.info("Tear down, remove table=" + htd.getTableName());
     UTIL.deleteTable(htd.getTableName());
   }
 }
 @After
 public void cleanup() throws Exception, KeeperException {
   for (HTableDescriptor table : ADMIN.listTables()) {
     ADMIN.disableTable(table.getTableName());
     deleteTable(table.getTableName());
   }
   for (NamespaceDescriptor ns : ADMIN.listNamespaceDescriptors()) {
     if (ns.getName().startsWith(prefix)) {
       ADMIN.deleteNamespace(ns.getName());
     }
   }
   assertTrue(
       "Quota manager not enabled",
       UTIL.getHBaseCluster().getMaster().getMasterQuotaManager().isQuotaEnabled());
 }
 /**
  * Before each test, use a different HRI, so the different tests don't interfere with each other.
  * This allows us to use just a single ZK cluster for the whole suite.
  */
 @Before
 public void setupHRI() {
   TEST_HRI =
       new HRegionInfo(
           TEST_HTD.getTableName(), Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1));
   testIndex++;
 }
 /**
  * Test that if we fail a flush, abort gets set on close.
  *
  * @see <a href="https://issues.apache.org/jira/browse/HBASE-4270">HBASE-4270</a>
  * @throws IOException
  * @throws NodeExistsException
  * @throws KeeperException
  */
 @Test
 public void testFailedFlushAborts() throws IOException, NodeExistsException, KeeperException {
   final Server server = new MockServer(HTU, false);
   final RegionServerServices rss = HTU.createMockRegionServerService();
   HTableDescriptor htd = TEST_HTD;
   final HRegionInfo hri =
       new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW);
   HRegion region = HTU.createLocalHRegion(hri, htd);
   try {
     assertNotNull(region);
     // Spy on the region so can throw exception when close is called.
     HRegion spy = Mockito.spy(region);
     final boolean abort = false;
     Mockito.when(spy.close(abort)).thenThrow(new RuntimeException("Mocked failed close!"));
     // The CloseRegionHandler will try to get an HRegion that corresponds
     // to the passed hri -- so insert the region into the online region Set.
     rss.addToOnlineRegions(spy);
     // Assert the Server is NOT stopped before we call close region.
     assertFalse(server.isStopped());
     CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, false, -1);
     boolean throwable = false;
     try {
       handler.process();
     } catch (Throwable t) {
       throwable = true;
     } finally {
       assertTrue(throwable);
       // Abort calls stop so stopped flag should be set.
       assertTrue(server.isStopped());
     }
   } finally {
     HRegion.closeHRegion(region);
   }
 }
 private HRegion openRegion(
     final FileSystem fs, final Path dir, final HTableDescriptor htd, final HLog hlog)
     throws IOException {
   // Initialize HRegion
   HRegionInfo regionInfo = new HRegionInfo(htd.getTableName());
   return HRegion.createHRegion(regionInfo, dir, getConf(), htd, hlog);
 }
  private static void createTable() throws Exception {
    try {
      Configuration configuration = HBaseConfiguration.create();
      HBaseAdmin.checkHBaseAvailable(configuration);
      Connection connection = ConnectionFactory.createConnection(configuration);

      // Instantiating HbaseAdmin class
      Admin admin = connection.getAdmin();

      // Instantiating table descriptor class
      HTableDescriptor stockTableDesc =
          new HTableDescriptor(TableName.valueOf(Constants.STOCK_DATES_TABLE));

      // Adding column families to table descriptor
      HColumnDescriptor stock_0414 = new HColumnDescriptor(Constants.STOCK_DATES_CF);
      stockTableDesc.addFamily(stock_0414);

      // Execute the table through admin
      if (!admin.tableExists(stockTableDesc.getTableName())) {
        admin.createTable(stockTableDesc);
        System.out.println("Stock table created !!!");
      }

      // Load hbase-site.xml
      HBaseConfiguration.addHbaseResources(configuration);
    } catch (ServiceException e) {
      log.error("Error occurred while creating HBase tables", e);
      throw new Exception("Error occurred while creating HBase tables", e);
    }
  }
  private void runTest(
      String testName,
      HTableDescriptor htd,
      BloomType bloomType,
      boolean preCreateTable,
      byte[][] tableSplitKeys,
      byte[][][] hfileRanges)
      throws Exception {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(
          util.getConfiguration(),
          fs,
          new Path(familyDir, "hfile_" + hfileIdx++),
          FAMILY,
          QUALIFIER,
          from,
          to,
          1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
    String[] args = {dir.toString(), tableName.toString()};
    loader.run(args);

    Table table = new HTable(util.getConfiguration(), tableName);
    try {
      assertEquals(expectedRows, util.countRows(table));
    } finally {
      table.close();
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue(
            "Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
  @Test(timeout = 120000)
  public void testChangeTable() throws Exception {
    HTableDescriptor hdt = HTU.createTableDescriptor("testChangeTable");
    hdt.setRegionReplication(NB_SERVERS);
    hdt.addCoprocessor(SlowMeCopro.class.getName());
    Table table = HTU.createTable(hdt, new byte[][] {f}, HTU.getConfiguration());

    // basic test: it should work.
    Put p = new Put(row);
    p.add(f, row, row);
    table.put(p);

    Get g = new Get(row);
    Result r = table.get(g);
    Assert.assertFalse(r.isStale());

    // Add a CF, it should work.
    HTableDescriptor bHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName());
    HColumnDescriptor hcd = new HColumnDescriptor(row);
    hdt.addFamily(hcd);
    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.getHBaseAdmin().modifyTable(hdt.getTableName(), hdt);
    HTU.getHBaseAdmin().enableTable(hdt.getTableName());
    HTableDescriptor nHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName());
    Assert.assertEquals(
        "fams=" + Arrays.toString(nHdt.getColumnFamilies()),
        bHdt.getColumnFamilies().length + 1,
        nHdt.getColumnFamilies().length);

    p = new Put(row);
    p.add(row, row, row);
    table.put(p);

    g = new Get(row);
    r = table.get(g);
    Assert.assertFalse(r.isStale());

    try {
      SlowMeCopro.cdl.set(new CountDownLatch(1));
      g = new Get(row);
      g.setConsistency(Consistency.TIMELINE);
      r = table.get(g);
      Assert.assertTrue(r.isStale());
    } finally {
      SlowMeCopro.cdl.get().countDown();
      SlowMeCopro.sleepTime.set(0);
    }

    Admin admin = HTU.getHBaseAdmin();
    nHdt = admin.getTableDescriptor(hdt.getTableName());
    Assert.assertEquals(
        "fams=" + Arrays.toString(nHdt.getColumnFamilies()),
        bHdt.getColumnFamilies().length + 1,
        nHdt.getColumnFamilies().length);

    admin.disableTable(hdt.getTableName());
    admin.deleteTable(hdt.getTableName());
    admin.close();
  }
 @Test(timeout = 60000)
 public void testTableOperations() throws Exception {
   String nsp = prefix + "_np2";
   NamespaceDescriptor nspDesc =
       NamespaceDescriptor.create(nsp)
           .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5")
           .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2")
           .build();
   ADMIN.createNamespace(nspDesc);
   assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp));
   assertEquals(ADMIN.listNamespaceDescriptors().length, 3);
   HTableDescriptor tableDescOne =
       new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1"));
   HTableDescriptor tableDescTwo =
       new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2"));
   HTableDescriptor tableDescThree =
       new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table3"));
   ADMIN.createTable(tableDescOne);
   boolean constraintViolated = false;
   try {
     ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5);
   } catch (Exception exp) {
     assertTrue(exp instanceof IOException);
     constraintViolated = true;
   } finally {
     assertTrue(
         "Constraint not violated for table " + tableDescTwo.getTableName(), constraintViolated);
   }
   ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
   NamespaceTableAndRegionInfo nspState = getQuotaManager().getState(nsp);
   assertNotNull(nspState);
   assertTrue(nspState.getTables().size() == 2);
   assertTrue(nspState.getRegionCount() == 5);
   constraintViolated = false;
   try {
     ADMIN.createTable(tableDescThree);
   } catch (Exception exp) {
     assertTrue(exp instanceof IOException);
     constraintViolated = true;
   } finally {
     assertTrue(
         "Constraint not violated for table " + tableDescThree.getTableName(), constraintViolated);
   }
 }
  protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey)
      throws IOException {

    LOG.info("Before delete:");
    HTableDescriptor htd = tbl.getTableDescriptor();
    dumpMeta(htd);

    List<HRegionLocation> regions;
    try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
      regions = rl.getAllRegionLocations();
    }

    for (HRegionLocation e : regions) {
      HRegionInfo hri = e.getRegionInfo();
      ServerName hsa = e.getServerName();
      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
          && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {

        LOG.info("RegionName: " + hri.getRegionNameAsString());
        byte[] deleteRow = hri.getRegionName();
        TEST_UTIL.getHBaseAdmin().unassign(deleteRow, true);

        LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
        Path rootDir = FSUtils.getRootDir(conf);
        FileSystem fs = rootDir.getFileSystem(conf);
        Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
        fs.delete(p, true);

        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
          Delete delete = new Delete(deleteRow);
          meta.delete(delete);
        }
      }
      LOG.info(hri.toString() + hsa.toString());
    }

    TEST_UTIL.getMetaTableRows(htd.getTableName());
    LOG.info("After delete:");
    dumpMeta(htd);
  }
 @Override
 public void preModifyTableHandler(
     ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName, HTableDescriptor htd)
     throws IOException {
   HTableDescriptor oldDesc =
       ctx.getEnvironment().getMasterServices().getTableDescriptors().get(tableName);
   if (oldDesc.getValue(IndexLoadBalancer.PARENT_TABLE_KEY) == null
       && htd.getValue(IndexLoadBalancer.PARENT_TABLE_KEY) != null) {
     TableName userTableName = TableName.valueOf(htd.getValue(IndexLoadBalancer.PARENT_TABLE_KEY));
     balancer.addTablesToColocate(userTableName, htd.getTableName());
   }
   super.preModifyTableHandler(ctx, tableName, htd);
 }
 /**
  * Create a Mob table.
  *
  * @param util
  * @param tableName
  * @param families
  * @return An HTable instance for the created table.
  * @throws IOException
  */
 public static Table createMobTable(
     final HBaseTestingUtility util, final TableName tableName, final byte[]... families)
     throws IOException {
   HTableDescriptor htd = new HTableDescriptor(tableName);
   for (byte[] family : families) {
     HColumnDescriptor hcd = new HColumnDescriptor(family);
     // Disable blooms (they are on by default as of 0.95) but we disable them
     // here because
     // tests have hard coded counts of what to expect in block cache, etc.,
     // and blooms being
     // on is interfering.
     hcd.setBloomFilterType(BloomType.NONE);
     hcd.setMobEnabled(true);
     hcd.setMobThreshold(0L);
     htd.addFamily(hcd);
   }
   util.getHBaseAdmin().createTable(htd);
   // HBaseAdmin only waits for regions to appear in hbase:meta we should wait
   // until they are assigned
   util.waitUntilAllRegionsAssigned(htd.getTableName());
   return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName());
 }
 @Override
 public void preCreateTableHandler(
     ObserverContext<MasterCoprocessorEnvironment> ctx,
     HTableDescriptor desc,
     HRegionInfo[] regions)
     throws IOException {
   TableName userTableName = null;
   if (balancer != null && desc.getValue(IndexLoadBalancer.PARENT_TABLE_KEY) != null) {
     userTableName = TableName.valueOf(desc.getValue(IndexLoadBalancer.PARENT_TABLE_KEY));
     balancer.addTablesToColocate(userTableName, desc.getTableName());
   }
   if (userTableName != null) balancer.populateRegionLocations(userTableName);
   super.preCreateTableHandler(ctx, desc, regions);
 }
  private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId)
      throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());

    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId);

    HRegionFileSystem fs =
        new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info);
    final Configuration walConf = new Configuration(conf);
    FSUtils.setRootDir(walConf, tableDir);
    final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
    HRegion region =
        new HRegion(
            fs,
            wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()),
            conf,
            htd,
            null);

    region.initialize();

    return region;
  }
Exemple #17
0
 @Test(timeout = 300000)
 public void testGetTableDescriptor() throws IOException {
   HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
   HColumnDescriptor fam2 = new HColumnDescriptor("fam2");
   HColumnDescriptor fam3 = new HColumnDescriptor("fam3");
   HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("myTestTable"));
   htd.addFamily(fam1);
   htd.addFamily(fam2);
   htd.addFamily(fam3);
   this.admin.createTable(htd);
   Table table = new HTable(TEST_UTIL.getConfiguration(), htd.getTableName());
   HTableDescriptor confirmedHtd = table.getTableDescriptor();
   assertEquals(htd.compareTo(confirmedHtd), 0);
   table.close();
 }
Exemple #18
0
 // lifted from TestAtomicOperation
 private HRegion createHRegion(
     byte[] tableName, String callingMethod, WAL log, Durability durability) throws IOException {
   HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
   htd.setDurability(durability);
   HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
   htd.addFamily(hcd);
   HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
   Path path = new Path(DIR + callingMethod);
   if (FS.exists(path)) {
     if (!FS.delete(path, true)) {
       throw new IOException("Failed delete of " + path);
     }
   }
   return HRegion.createHRegion(info, path, CONF, htd, log);
 }
  private HRegion openRegion(
      final FileSystem fs,
      final Path dir,
      final HTableDescriptor htd,
      final WALFactory wals,
      final long whenToRoll,
      final LogRoller roller)
      throws IOException {
    // Initialize HRegion
    HRegionInfo regionInfo = new HRegionInfo(htd.getTableName());
    // Initialize WAL
    final WAL wal = wals.getWAL(regionInfo.getEncodedNameAsBytes());
    // If we haven't already, attach a listener to this wal to handle rolls and metrics.
    if (walsListenedTo.add(wal)) {
      roller.addWAL(wal);
      wal.registerWALActionsListener(
          new WALActionsListener.Base() {
            private int appends = 0;

            @Override
            public void visitLogEntryBeforeWrite(
                HTableDescriptor htd, WALKey logKey, WALEdit logEdit) {
              this.appends++;
              if (this.appends % whenToRoll == 0) {
                LOG.info("Rolling after " + appends + " edits");
                // We used to do explicit call to rollWriter but changed it to a request
                // to avoid dead lock (there are less threads going on in this class than
                // in the regionserver -- regionserver does not have the issue).
                DefaultWALProvider.requestLogRoll(wal);
              }
            }

            @Override
            public void postSync(final long timeInNanos, final int handlerSyncs) {
              syncMeter.mark();
              syncHistogram.update(timeInNanos);
              syncCountHistogram.update(handlerSyncs);
            }

            @Override
            public void postAppend(final long size, final long elapsedTime) {
              appendMeter.mark(size);
            }
          });
    }

    return HRegion.createHRegion(regionInfo, dir, getConf(), htd, wal);
  }
Exemple #20
0
 HRegion createRegion(final Path testdir, final WALFactory wals) throws IOException {
   // Make a region with start and end keys. Use 'aaa', to 'AAA'.  The load
   // region utility will add rows between 'aaa' and 'zzz'.
   HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
   HColumnDescriptor hcd = new HColumnDescriptor(CF);
   htd.addFamily(hcd);
   HRegionInfo hri = new HRegionInfo(htd.getTableName(), STARTROW, ENDROW);
   HRegion r =
       HBaseTestingUtility.createRegionAndWAL(hri, testdir, TEST_UTIL.getConfiguration(), htd);
   HBaseTestingUtility.closeRegionAndWAL(r);
   return HRegion.openHRegion(
       testdir,
       hri,
       htd,
       wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()),
       TEST_UTIL.getConfiguration());
 }
Exemple #21
0
  public static void configureIncrementalLoadMap(Job job, HTableDescriptor tableDescriptor)
      throws IOException {
    Configuration conf = job.getConfiguration();

    job.setOutputKeyClass(ImmutableBytesWritable.class);
    job.setOutputValueClass(KeyValue.class);
    job.setOutputFormatClass(HFileOutputFormat2.class);

    // Set compression algorithms based on column families
    configureCompression(conf, tableDescriptor);
    configureBloomType(tableDescriptor, conf);
    configureBlockSize(tableDescriptor, conf);
    configureDataBlockEncoding(tableDescriptor, conf);

    TableMapReduceUtil.addDependencyJars(job);
    TableMapReduceUtil.initCredentials(job);
    LOG.info("Incremental table " + tableDescriptor.getTableName() + " output configured.");
  }
 HRegion initHRegion(
     byte[] tableName, String callingMethod, Configuration conf, byte[]... families)
     throws IOException {
   HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
   for (byte[] family : families) {
     htd.addFamily(new HColumnDescriptor(family));
   }
   HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
   Path path = new Path(DIR + callingMethod);
   HRegion r = HRegion.createHRegion(info, path, conf, htd);
   // this following piece is a hack. currently a coprocessorHost
   // is secretly loaded at OpenRegionHandler. we don't really
   // start a region server here, so just manually create cphost
   // and set it to region.
   RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
   r.setCoprocessorHost(host);
   return r;
 }
  @Before
  public void setUp() throws Exception {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME_BYTES));
    htd.addFamily(new HColumnDescriptor(FAMILY_NAME_BYTES));
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    this.region =
        HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

    Put put = new Put(ROW_BYTES);
    for (int i = 0; i < 10; i += 2) {
      // puts 0, 2, 4, 6 and 8
      put.add(
          FAMILY_NAME_BYTES,
          Bytes.toBytes(QUALIFIER_PREFIX + i),
          i,
          Bytes.toBytes(VALUE_PREFIX + i));
    }
    this.region.put(put);
    this.region.flushcache();
  }
  /**
   * Restore the specified snapshot. The restore will fail if the destination table has a snapshot
   * or restore in progress.
   *
   * @param snapshot Snapshot Descriptor
   * @param hTableDescriptor Table Descriptor
   * @param nonceGroup unique value to prevent duplicated RPC
   * @param nonce unique value to prevent duplicated RPC
   * @return procId the ID of the restore snapshot procedure
   */
  private synchronized long restoreSnapshot(
      final SnapshotDescription snapshot,
      final HTableDescriptor hTableDescriptor,
      final long nonceGroup,
      final long nonce)
      throws HBaseSnapshotException {
    TableName tableName = hTableDescriptor.getTableName();

    // make sure we aren't running a snapshot on the same table
    if (isTakingSnapshot(tableName)) {
      throw new RestoreSnapshotException("Snapshot in progress on the restore table=" + tableName);
    }

    // make sure we aren't running a restore on the same table
    if (isRestoringTable(tableName)) {
      throw new RestoreSnapshotException("Restore already in progress on the table=" + tableName);
    }

    try {
      long procId =
          master
              .getMasterProcedureExecutor()
              .submitProcedure(
                  new RestoreSnapshotProcedure(
                      master.getMasterProcedureExecutor().getEnvironment(),
                      hTableDescriptor,
                      snapshot),
                  nonceGroup,
                  nonce);
      this.restoreTableToProcIdMap.put(tableName, procId);
      return procId;
    } catch (Exception e) {
      String msg =
          "Couldn't restore the snapshot="
              + ClientSnapshotDescriptionUtils.toString(snapshot)
              + " on table="
              + tableName;
      LOG.error(msg, e);
      throw new RestoreSnapshotException(msg, e);
    }
  }
  @Override
  public void deserializeStateData(final InputStream stream) throws IOException {
    super.deserializeStateData(stream);

    MasterProcedureProtos.TruncateTableStateData state =
        MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream);
    user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
    if (state.hasTableSchema()) {
      hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
      tableName = hTableDescriptor.getTableName();
    } else {
      tableName = ProtobufUtil.toTableName(state.getTableName());
    }
    preserveSplits = state.getPreserveSplits();
    if (state.getRegionInfoCount() == 0) {
      regions = null;
    } else {
      regions = new ArrayList<HRegionInfo>(state.getRegionInfoCount());
      for (HBaseProtos.RegionInfo hri : state.getRegionInfoList()) {
        regions.add(HRegionInfo.convert(hri));
      }
    }
  }
Exemple #26
0
  /**
   * Reproduce locking up that happens when we get an inopportune sync during setup for zigzaglatch
   * wait. See HBASE-14317. If below is broken, we will see this test timeout because it is locked
   * up.
   *
   * <p>First I need to set up some mocks for Server and RegionServerServices. I also need to set up
   * a dodgy WAL that will throw an exception when we go to append to it.
   */
  @Test(timeout = 20000)
  public void testLockupWhenSyncInMiddleOfZigZagSetup() throws IOException {
    // A WAL that we can have throw exceptions when a flag is set.
    class DodgyFSLog extends FSHLog {
      // Set this when want the WAL to start throwing exceptions.
      volatile boolean throwException = false;

      // Latch to hold up processing until after another operation has had time to run.
      CountDownLatch latch = new CountDownLatch(1);

      public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf)
          throws IOException {
        super(fs, root, logDir, conf);
      }

      @Override
      protected void afterCreatingZigZagLatch() {
        // If throwException set, then append will throw an exception causing the WAL to be
        // rolled. We'll come in here. Hold up processing until a sync can get in before
        // the zigzag has time to complete its setup and get its own sync in. This is what causes
        // the lock up we've seen in production.
        if (throwException) {
          try {
            LOG.info("LATCHED");
            // So, timing can have it that the test can run and the bad flush below happens
            // before we get here. In this case, we'll be stuck waiting on this latch but there
            // is nothing in the WAL pipeline to get us to the below beforeWaitOnSafePoint...
            // because all WALs have rolled. In this case, just give up on test.
            if (!this.latch.await(5, TimeUnit.SECONDS)) {
              LOG.warn("GIVE UP! Failed waiting on latch...Test is ABORTED!");
            }
          } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
          }
        }
      }

      @Override
      protected void beforeWaitOnSafePoint() {
        if (throwException) {
          LOG.info("COUNTDOWN");
          // Don't countdown latch until someone waiting on it otherwise, the above
          // afterCreatingZigZagLatch will get to the latch and no one will ever free it and we'll
          // be stuck; test won't go down
          while (this.latch.getCount() <= 0) Threads.sleep(1);
          this.latch.countDown();
        }
      }

      @Override
      protected Writer createWriterInstance(Path path) throws IOException {
        final Writer w = super.createWriterInstance(path);
        return new Writer() {
          @Override
          public void close() throws IOException {
            w.close();
          }

          @Override
          public void sync() throws IOException {
            if (throwException) {
              throw new IOException("FAKE! Failed to replace a bad datanode...SYNC");
            }
            w.sync();
          }

          @Override
          public void append(Entry entry) throws IOException {
            if (throwException) {
              throw new IOException("FAKE! Failed to replace a bad datanode...APPEND");
            }
            w.append(entry);
          }

          @Override
          public long getLength() {
            return w.getLength();
          }
        };
      }
    }

    // Mocked up server and regionserver services. Needed below.
    Server server = Mockito.mock(Server.class);
    Mockito.when(server.getConfiguration()).thenReturn(CONF);
    Mockito.when(server.isStopped()).thenReturn(false);
    Mockito.when(server.isAborted()).thenReturn(false);
    RegionServerServices services = Mockito.mock(RegionServerServices.class);

    // OK. Now I have my mocked up Server & RegionServerServices and dodgy WAL, go ahead with test.
    FileSystem fs = FileSystem.get(CONF);
    Path rootDir = new Path(dir + getName());
    DodgyFSLog dodgyWAL = new DodgyFSLog(fs, rootDir, getName(), CONF);
    Path originalWAL = dodgyWAL.getCurrentFileName();
    // I need a log roller running.
    LogRoller logRoller = new LogRoller(server, services);
    logRoller.addWAL(dodgyWAL);
    // There is no 'stop' once a logRoller is running.. it just dies.
    logRoller.start();
    // Now get a region and start adding in edits.
    HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
    final HRegion region = initHRegion(tableName, null, null, dodgyWAL);
    byte[] bytes = Bytes.toBytes(getName());
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    scopes.put(COLUMN_FAMILY_BYTES, 0);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    try {
      // First get something into memstore. Make a Put and then pull the Cell out of it. Will
      // manage append and sync carefully in below to manufacture hang. We keep adding same
      // edit. WAL subsystem doesn't care.
      Put put = new Put(bytes);
      put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes);
      WALKey key =
          new WALKey(
              region.getRegionInfo().getEncodedNameAsBytes(),
              htd.getTableName(),
              System.currentTimeMillis(),
              mvcc,
              scopes);
      WALEdit edit = new WALEdit();
      CellScanner CellScanner = put.cellScanner();
      assertTrue(CellScanner.advance());
      edit.add(CellScanner.current());
      // Put something in memstore and out in the WAL. Do a big number of appends so we push
      // out other side of the ringbuffer. If small numbers, stuff doesn't make it to WAL
      for (int i = 0; i < 1000; i++) {
        region.put(put);
      }
      // Set it so we start throwing exceptions.
      LOG.info("SET throwing of exception on append");
      dodgyWAL.throwException = true;
      // This append provokes a WAL roll request
      dodgyWAL.append(region.getRegionInfo(), key, edit, true);
      boolean exception = false;
      try {
        dodgyWAL.sync();
      } catch (Exception e) {
        exception = true;
      }
      assertTrue("Did not get sync exception", exception);

      // Get a memstore flush going too so we have same hung profile as up in the issue over
      // in HBASE-14317. Flush hangs trying to get sequenceid because the ringbuffer is held up
      // by the zigzaglatch waiting on syncs to come home.
      Thread t =
          new Thread("Flusher") {
            public void run() {
              try {
                if (region.getMemstoreSize() <= 0) {
                  throw new IOException("memstore size=" + region.getMemstoreSize());
                }
                region.flush(false);
              } catch (IOException e) {
                // Can fail trying to flush in middle of a roll. Not a failure. Will succeed later
                // when roll completes.
                LOG.info("In flush", e);
              }
              LOG.info("Exiting");
            };
          };
      t.setDaemon(true);
      t.start();
      // Wait until
      while (dodgyWAL.latch.getCount() > 0) Threads.sleep(1);
      // Now assert I got a new WAL file put in place even though loads of errors above.
      assertTrue(originalWAL != dodgyWAL.getCurrentFileName());
      // Can I append to it?
      dodgyWAL.throwException = false;
      try {
        region.put(put);
      } catch (Exception e) {
        LOG.info("In the put", e);
      }
    } finally {
      // To stop logRoller, its server has to say it is stopped.
      Mockito.when(server.isStopped()).thenReturn(true);
      if (logRoller != null) logRoller.close();
      try {
        if (region != null) region.close();
        if (dodgyWAL != null) dodgyWAL.close();
      } catch (Exception e) {
        LOG.info("On way out", e);
      }
    }
  }
Exemple #27
0
  /**
   * Reproduce locking up that happens when there's no further syncs after append fails, and causing
   * an isolated sync then infinite wait. See HBASE-16960. If below is broken, we will see this test
   * timeout because it is locked up.
   *
   * <p>Steps for reproduce:<br>
   * 1. Trigger server abort through dodgyWAL1<br>
   * 2. Add a {@link DummyWALActionsListener} to dodgyWAL2 to cause ringbuffer event handler thread
   * sleep for a while thus keeping {@code endOfBatch} false<br>
   * 3. Publish a sync then an append which will throw exception, check whether the sync could
   * return
   */
  @Test(timeout = 20000)
  public void testLockup16960() throws IOException {
    // A WAL that we can have throw exceptions when a flag is set.
    class DodgyFSLog extends FSHLog {
      // Set this when want the WAL to start throwing exceptions.
      volatile boolean throwException = false;

      public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf)
          throws IOException {
        super(fs, root, logDir, conf);
      }

      @Override
      protected Writer createWriterInstance(Path path) throws IOException {
        final Writer w = super.createWriterInstance(path);
        return new Writer() {
          @Override
          public void close() throws IOException {
            w.close();
          }

          @Override
          public void sync() throws IOException {
            if (throwException) {
              throw new IOException("FAKE! Failed to replace a bad datanode...SYNC");
            }
            w.sync();
          }

          @Override
          public void append(Entry entry) throws IOException {
            if (throwException) {
              throw new IOException("FAKE! Failed to replace a bad datanode...APPEND");
            }
            w.append(entry);
          }

          @Override
          public long getLength() {
            return w.getLength();
          }
        };
      }

      @Override
      protected long doReplaceWriter(Path oldPath, Path newPath, Writer nextWriter)
          throws IOException {
        if (throwException) {
          throw new FailedLogCloseException("oldPath=" + oldPath + ", newPath=" + newPath);
        }
        long oldFileLen = 0L;
        oldFileLen = super.doReplaceWriter(oldPath, newPath, nextWriter);
        return oldFileLen;
      }
    }

    // Mocked up server and regionserver services. Needed below.
    Server server =
        new DummyServer(CONF, ServerName.valueOf("hostname1.example.org", 1234, 1L).toString());
    RegionServerServices services = Mockito.mock(RegionServerServices.class);

    CONF.setLong("hbase.regionserver.hlog.sync.timeout", 10000);

    // OK. Now I have my mocked up Server & RegionServerServices and dodgy WAL,
    // go ahead with test.
    FileSystem fs = FileSystem.get(CONF);
    Path rootDir = new Path(dir + getName());
    DodgyFSLog dodgyWAL1 = new DodgyFSLog(fs, rootDir, getName(), CONF);

    Path rootDir2 = new Path(dir + getName() + "2");
    final DodgyFSLog dodgyWAL2 = new DodgyFSLog(fs, rootDir2, getName() + "2", CONF);
    // Add a listener to force ringbuffer event handler sleep for a while
    dodgyWAL2.registerWALActionsListener(new DummyWALActionsListener());

    // I need a log roller running.
    LogRoller logRoller = new LogRoller(server, services);
    logRoller.addWAL(dodgyWAL1);
    logRoller.addWAL(dodgyWAL2);
    // There is no 'stop' once a logRoller is running.. it just dies.
    logRoller.start();
    // Now get a region and start adding in edits.
    HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
    final HRegion region = initHRegion(tableName, null, null, dodgyWAL1);
    byte[] bytes = Bytes.toBytes(getName());
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    scopes.put(COLUMN_FAMILY_BYTES, 0);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    try {
      Put put = new Put(bytes);
      put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes);
      WALKey key =
          new WALKey(
              region.getRegionInfo().getEncodedNameAsBytes(),
              htd.getTableName(),
              System.currentTimeMillis(),
              mvcc,
              scopes);
      WALEdit edit = new WALEdit();
      CellScanner CellScanner = put.cellScanner();
      assertTrue(CellScanner.advance());
      edit.add(CellScanner.current());

      LOG.info("SET throwing of exception on append");
      dodgyWAL1.throwException = true;
      // This append provokes a WAL roll request
      dodgyWAL1.append(region.getRegionInfo(), key, edit, true);
      boolean exception = false;
      try {
        dodgyWAL1.sync();
      } catch (Exception e) {
        exception = true;
      }
      assertTrue("Did not get sync exception", exception);

      // LogRoller call dodgyWAL1.rollWriter get FailedLogCloseException and
      // cause server abort.
      try {
        // wait LogRoller exit.
        Thread.sleep(50);
      } catch (InterruptedException e) {
        e.printStackTrace();
      }

      final CountDownLatch latch = new CountDownLatch(1);

      // make RingBufferEventHandler sleep 1s, so the following sync
      // endOfBatch=false
      key =
          new WALKey(
              region.getRegionInfo().getEncodedNameAsBytes(),
              TableName.valueOf("sleep"),
              System.currentTimeMillis(),
              mvcc,
              scopes);
      dodgyWAL2.append(region.getRegionInfo(), key, edit, true);

      Thread t =
          new Thread("Sync") {
            public void run() {
              try {
                dodgyWAL2.sync();
              } catch (IOException e) {
                LOG.info("In sync", e);
              }
              latch.countDown();
              LOG.info("Sync exiting");
            };
          };
      t.setDaemon(true);
      t.start();
      try {
        // make sure sync have published.
        Thread.sleep(100);
      } catch (InterruptedException e1) {
        e1.printStackTrace();
      }
      // make append throw DamagedWALException
      key =
          new WALKey(
              region.getRegionInfo().getEncodedNameAsBytes(),
              TableName.valueOf("DamagedWALException"),
              System.currentTimeMillis(),
              mvcc,
              scopes);
      dodgyWAL2.append(region.getRegionInfo(), key, edit, true);

      while (latch.getCount() > 0) {
        Threads.sleep(100);
      }
      assertTrue(server.isAborted());
    } finally {
      if (logRoller != null) {
        logRoller.close();
      }
      try {
        if (region != null) {
          region.close();
        }
        if (dodgyWAL1 != null) {
          dodgyWAL1.close();
        }
        if (dodgyWAL2 != null) {
          dodgyWAL2.close();
        }
      } catch (Exception e) {
        LOG.info("On way out", e);
      }
    }
  }
Exemple #28
0
  @Test(timeout = 300000)
  public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException {
    // Test we get exception if we try to
    final TableName nonexistentTable = TableName.valueOf("nonexistent");
    final byte[] nonexistentColumn = Bytes.toBytes("nonexistent");
    HColumnDescriptor nonexistentHcd = new HColumnDescriptor(nonexistentColumn);
    Exception exception = null;
    try {
      this.admin.addColumn(nonexistentTable, nonexistentHcd);
    } catch (IOException e) {
      exception = e;
    }
    assertTrue(exception instanceof TableNotFoundException);

    exception = null;
    try {
      this.admin.deleteTable(nonexistentTable);
    } catch (IOException e) {
      exception = e;
    }
    assertTrue(exception instanceof TableNotFoundException);

    exception = null;
    try {
      this.admin.deleteColumn(nonexistentTable, nonexistentColumn);
    } catch (IOException e) {
      exception = e;
    }
    assertTrue(exception instanceof TableNotFoundException);

    exception = null;
    try {
      this.admin.disableTable(nonexistentTable);
    } catch (IOException e) {
      exception = e;
    }
    assertTrue(exception instanceof TableNotFoundException);

    exception = null;
    try {
      this.admin.enableTable(nonexistentTable);
    } catch (IOException e) {
      exception = e;
    }
    assertTrue(exception instanceof TableNotFoundException);

    exception = null;
    try {
      this.admin.modifyColumn(nonexistentTable, nonexistentHcd);
    } catch (IOException e) {
      exception = e;
    }
    assertTrue(exception instanceof TableNotFoundException);

    exception = null;
    try {
      HTableDescriptor htd = new HTableDescriptor(nonexistentTable);
      htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
      this.admin.modifyTable(htd.getTableName(), htd);
    } catch (IOException e) {
      exception = e;
    }
    assertTrue(exception instanceof TableNotFoundException);

    // Now make it so at least the table exists and then do tests against a
    // nonexistent column family -- see if we get right exceptions.
    final TableName tableName =
        TableName.valueOf(
            "testDeleteEditUnknownColumnFamilyAndOrTable" + System.currentTimeMillis());
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("cf"));
    this.admin.createTable(htd);
    try {
      exception = null;
      try {
        this.admin.deleteColumn(htd.getTableName(), nonexistentHcd.getName());
      } catch (IOException e) {
        exception = e;
      }
      assertTrue(
          "found=" + exception.getClass().getName(),
          exception instanceof InvalidFamilyOperationException);

      exception = null;
      try {
        this.admin.modifyColumn(htd.getTableName(), nonexistentHcd);
      } catch (IOException e) {
        exception = e;
      }
      assertTrue(
          "found=" + exception.getClass().getName(),
          exception instanceof InvalidFamilyOperationException);
    } finally {
      this.admin.disableTable(tableName);
      this.admin.deleteTable(tableName);
    }
  }
  @Test(timeout = 30000)
  public void testBulkLoad() throws IOException {
    // Create table then get the single region for our new table.
    LOG.debug("Creating test table");
    HTableDescriptor hdt = HTU.createTableDescriptor("testBulkLoad");
    hdt.setRegionReplication(NB_SERVERS);
    hdt.addCoprocessor(SlowMeCopro.class.getName());
    Table table = HTU.createTable(hdt, new byte[][] {f}, HTU.getConfiguration());

    // create hfiles to load.
    LOG.debug("Creating test data");
    Path dir = HTU.getDataTestDirOnTestFS("testBulkLoad");
    final int numRows = 10;
    final byte[] qual = Bytes.toBytes("qual");
    final byte[] val = Bytes.toBytes("val");
    final List<Pair<byte[], String>> famPaths = new ArrayList<Pair<byte[], String>>();
    for (HColumnDescriptor col : hdt.getColumnFamilies()) {
      Path hfile = new Path(dir, col.getNameAsString());
      TestHRegionServerBulkLoad.createHFile(
          HTU.getTestFileSystem(), hfile, col.getName(), qual, val, numRows);
      famPaths.add(new Pair<byte[], String>(col.getName(), hfile.toString()));
    }

    // bulk load HFiles
    LOG.debug("Loading test data");
    @SuppressWarnings("deprecation")
    final HConnection conn = HTU.getHBaseAdmin().getConnection();
    RegionServerCallable<Void> callable =
        new RegionServerCallable<Void>(
            conn, hdt.getTableName(), TestHRegionServerBulkLoad.rowkey(0)) {
          @Override
          public Void call(int timeout) throws Exception {
            LOG.debug(
                "Going to connect to server "
                    + getLocation()
                    + " for row "
                    + Bytes.toStringBinary(getRow()));
            byte[] regionName = getLocation().getRegionInfo().getRegionName();
            BulkLoadHFileRequest request =
                RequestConverter.buildBulkLoadHFileRequest(famPaths, regionName, true);
            getStub().bulkLoadHFile(null, request);
            return null;
          }
        };
    RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(HTU.getConfiguration());
    RpcRetryingCaller<Void> caller = factory.<Void>newCaller();
    caller.callWithRetries(callable, 10000);

    // verify we can read them from the primary
    LOG.debug("Verifying data load");
    for (int i = 0; i < numRows; i++) {
      byte[] row = TestHRegionServerBulkLoad.rowkey(i);
      Get g = new Get(row);
      Result r = table.get(g);
      Assert.assertFalse(r.isStale());
    }

    // verify we can read them from the replica
    LOG.debug("Verifying replica queries");
    try {
      SlowMeCopro.cdl.set(new CountDownLatch(1));
      for (int i = 0; i < numRows; i++) {
        byte[] row = TestHRegionServerBulkLoad.rowkey(i);
        Get g = new Get(row);
        g.setConsistency(Consistency.TIMELINE);
        Result r = table.get(g);
        Assert.assertTrue(r.isStale());
      }
      SlowMeCopro.cdl.get().countDown();
    } finally {
      SlowMeCopro.cdl.get().countDown();
      SlowMeCopro.sleepTime.set(0);
    }

    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());
  }
  @SuppressWarnings("deprecation")
  @Test(timeout = 300000)
  public void testReplicaAndReplication() throws Exception {
    HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaAndReplication");
    hdt.setRegionReplication(NB_SERVERS);

    HColumnDescriptor fam = new HColumnDescriptor(row);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    hdt.addFamily(fam);

    hdt.addCoprocessor(SlowMeCopro.class.getName());
    HTU.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);

    Configuration conf2 = HBaseConfiguration.create(HTU.getConfiguration());
    conf2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    MiniZooKeeperCluster miniZK = HTU.getZkCluster();

    HTU2 = new HBaseTestingUtility(conf2);
    HTU2.setZkCluster(miniZK);
    HTU2.startMiniCluster(NB_SERVERS);
    LOG.info("Setup second Zk");
    HTU2.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);

    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
    admin.addPeer("2", HTU2.getClusterKey());
    admin.close();

    Put p = new Put(row);
    p.add(row, row, row);
    final Table table = HTU.getConnection().getTable(hdt.getTableName());
    table.put(p);

    HTU.getHBaseAdmin().flush(table.getName());
    LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster.");

    Waiter.waitFor(
        HTU.getConfiguration(),
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            try {
              SlowMeCopro.cdl.set(new CountDownLatch(1));
              Get g = new Get(row);
              g.setConsistency(Consistency.TIMELINE);
              Result r = table.get(g);
              Assert.assertTrue(r.isStale());
              return !r.isEmpty();
            } finally {
              SlowMeCopro.cdl.get().countDown();
              SlowMeCopro.sleepTime.set(0);
            }
          }
        });
    table.close();
    LOG.info("stale get on the first cluster done. Now for the second.");

    final Table table2 = HTU.getConnection().getTable(hdt.getTableName());
    Waiter.waitFor(
        HTU.getConfiguration(),
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            try {
              SlowMeCopro.cdl.set(new CountDownLatch(1));
              Get g = new Get(row);
              g.setConsistency(Consistency.TIMELINE);
              Result r = table2.get(g);
              Assert.assertTrue(r.isStale());
              return !r.isEmpty();
            } finally {
              SlowMeCopro.cdl.get().countDown();
              SlowMeCopro.sleepTime.set(0);
            }
          }
        });
    table2.close();

    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());

    HTU2.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU2.deleteTable(hdt.getTableName());

    // We shutdown HTU2 minicluster later, in afterClass(), as shutting down
    // the minicluster has negative impact of deleting all HConnections in JVM.
  }