@Test(timeout = 60000)
 public void testExceptionDuringInitialization() throws Exception {
   Configuration conf = TEST_UTIL.getConfiguration();
   conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
   conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
   conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
   TEST_UTIL.startMiniCluster(2);
   try {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     // Trigger one regionserver to fail as if it came up with a coprocessor
     // that fails during initialization
     final HRegionServer regionServer = cluster.getRegionServer(0);
     conf.set(
         CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
         FailedInitializationObserver.class.getName());
     regionServer
         .getRegionServerCoprocessorHost()
         .loadSystemCoprocessors(conf, CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
     TEST_UTIL.waitFor(
         10000,
         1000,
         new Predicate<Exception>() {
           @Override
           public boolean evaluate() throws Exception {
             return regionServer.isAborted();
           }
         });
   } finally {
     TEST_UTIL.shutdownMiniCluster();
   }
 }
Beispiel #2
0
 public HBaseTcpServer(HRegionServer regionServer) {
   ZooKeeperAdmin.createBaseZNodes();
   initConf(regionServer.getConfiguration());
   tcpPort = getRegionServerTcpPort(regionServer.getConfiguration());
   serverName = regionServer.getServerName();
   this.regionServer = regionServer;
   init(regionServer.getConfiguration());
 }
 /**
  * Flush and log stats on flush
  *
  * @param r
  * @param server
  * @throws IOException
  */
 private void flush(final Region r, final HRegionServer server) throws IOException {
   LOG.info(
       "Flush "
           + r.toString()
           + " on "
           + server.getServerName()
           + ", "
           + r.flush(true)
           + ", size="
           + server.getRegionServerAccounting().getGlobalMemstoreSize());
 }
 private static HRegionServer setDrainingServer(final HRegionServer hrs) throws KeeperException {
   LOG.info(
       "Making "
           + hrs.getServerName()
           + " the draining server; "
           + "it has "
           + hrs.getNumberOfOnlineRegions()
           + " online regions");
   ZooKeeperWatcher zkw = hrs.getZooKeeper();
   String hrsDrainingZnode = ZKUtil.joinZNode(zkw.drainingZNode, hrs.getServerName().toString());
   ZKUtil.createWithParents(zkw, hrsDrainingZnode);
   return hrs;
 }
  @Test(timeout = 60000)
  public void testExceptionFromCoprocessorDuringPut() throws Exception {
    // set configure to indicate which cp should be loaded
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, BuggyRegionObserver.class.getName());
    conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
    TEST_UTIL.startMiniCluster(2);
    try {
      // When we try to write to TEST_TABLE, the buggy coprocessor will
      // cause a NullPointerException, which will cause the regionserver (which
      // hosts the region we attempted to write to) to abort.
      final byte[] TEST_FAMILY = Bytes.toBytes("aaa");

      Table table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, TEST_FAMILY);
      TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);

      // Note which regionServer will abort (after put is attempted).
      final HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);

      try {
        final byte[] ROW = Bytes.toBytes("aaa");
        Put put = new Put(ROW);
        put.add(TEST_FAMILY, ROW, ROW);
        table.put(put);
      } catch (IOException e) {
        // The region server is going to be aborted.
        // We may get an exception if we retry,
        // which is not guaranteed.
      }

      // Wait 10 seconds for the regionserver to abort: expected result is that
      // it will abort.
      boolean aborted = false;
      for (int i = 0; i < 10; i++) {
        aborted = regionServer.isAborted();
        if (aborted) {
          break;
        }
        try {
          Thread.sleep(1000);
        } catch (InterruptedException e) {
          fail("InterruptedException while waiting for regionserver " + "zk node to be deleted.");
        }
      }
      Assert.assertTrue("The region server should have aborted", aborted);
      table.close();
    } finally {
      TEST_UTIL.shutdownMiniCluster();
    }
  }
 @Test
 public void test() throws IOException, InterruptedException {
   testUtil
       .getHBaseAdmin()
       .createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
   Table table = testUtil.createTable(tableName, families);
   table.put(
       new Put(Bytes.toBytes("k")).addColumn(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
   MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
   List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
   Region region = null;
   for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
     HRegionServer hrs = rsts.get(i).getRegionServer();
     for (Region r : hrs.getOnlineRegions(tableName)) {
       region = r;
       break;
     }
   }
   assertNotNull(region);
   Thread.sleep(2000);
   RegionStoreSequenceIds ids =
       testUtil
           .getHBaseCluster()
           .getMaster()
           .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
   assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId());
   // This will be the sequenceid just before that of the earliest edit in memstore.
   long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId();
   assertTrue(storeSequenceId > 0);
   testUtil.getHBaseAdmin().flush(tableName);
   Thread.sleep(2000);
   ids =
       testUtil
           .getHBaseCluster()
           .getMaster()
           .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
   assertTrue(
       ids.getLastFlushedSequenceId() + " > " + storeSequenceId,
       ids.getLastFlushedSequenceId() > storeSequenceId);
   assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId());
   table.close();
 }
Beispiel #7
0
  @Test
  public void testCloseRegionWhenServerNameIsEmpty() throws Exception {
    String tbName = "TestHBACloseRegionWhenServerNameIsEmpty";
    byte[] TABLENAME = Bytes.toBytes(tbName);
    HBaseAdmin admin = createTable(TABLENAME);

    HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME);

    try {
      List<HRegionInfo> onlineRegions = rs.getOnlineRegions();
      for (HRegionInfo regionInfo : onlineRegions) {
        if (!regionInfo.isMetaRegion() && !regionInfo.isRootRegion()) {
          if (regionInfo.getRegionNameAsString().contains(tbName)) {
            admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), " ");
          }
        }
      }
      fail("The test should throw exception if the servername passed is empty.");
    } catch (IllegalArgumentException e) {
    }
  }
Beispiel #8
0
  @Test
  public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception {
    String tbName = "TestCloseRegionIfInvalidRegionName";
    byte[] TABLENAME = Bytes.toBytes(tbName);
    HBaseAdmin admin = createTable(TABLENAME);

    HRegionInfo info = null;
    HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME);
    List<HRegionInfo> onlineRegions = rs.getOnlineRegions();
    for (HRegionInfo regionInfo : onlineRegions) {
      if (!regionInfo.isMetaRegion() && !regionInfo.isRootRegion()) {
        if (regionInfo.getRegionNameAsString().contains(tbName)) {
          info = regionInfo;
          admin.closeRegionWithEncodedRegionName("sample", rs.getServerInfo().getHostnamePort());
        }
      }
    }
    onlineRegions = rs.getOnlineRegions();
    assertTrue(
        "The region should be present in online regions list.", onlineRegions.contains(info));
  }
Beispiel #9
0
  @Test
  public void testShouldCloseTheRegionBasedOnTheEncodedRegionName() throws Exception {
    String tbName = "TestHBACloseRegion";
    byte[] TABLENAME = Bytes.toBytes(tbName);
    HBaseAdmin admin = createTable(TABLENAME);

    HRegionInfo info = null;
    HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME);
    List<HRegionInfo> onlineRegions = rs.getOnlineRegions();
    for (HRegionInfo regionInfo : onlineRegions) {
      if (!regionInfo.isMetaRegion() && !regionInfo.isRootRegion()) {
        info = regionInfo;
        admin.closeRegionWithEncodedRegionName(
            regionInfo.getEncodedName(), rs.getServerInfo().getHostnamePort());
      }
    }
    Thread.sleep(1000);
    onlineRegions = rs.getOnlineRegions();
    assertFalse(
        "The region should not be present in online regions list.", onlineRegions.contains(info));
  }
 /**
  * Test adding server to draining servers and then move regions off it. Make sure that no regions
  * are moved back to the draining server.
  *
  * @throws IOException
  * @throws KeeperException
  */
 @Test // (timeout=30000)
 public void testDrainingServerOffloading() throws Exception {
   // I need master in the below.
   HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
   HRegionInfo hriToMoveBack = null;
   // Set first server as draining server.
   HRegionServer drainingServer =
       setDrainingServer(TEST_UTIL.getMiniHBaseCluster().getRegionServer(0));
   try {
     final int regionsOnDrainingServer = drainingServer.getNumberOfOnlineRegions();
     Assert.assertTrue(regionsOnDrainingServer > 0);
     List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(drainingServer);
     for (HRegionInfo hri : hris) {
       // Pass null and AssignmentManager will chose a random server BUT it
       // should exclude draining servers.
       master.moveRegion(
           null, RequestConverter.buildMoveRegionRequest(hri.getEncodedNameAsBytes(), null));
       // Save off region to move back.
       hriToMoveBack = hri;
     }
     // Wait for regions to come back on line again.
     waitForAllRegionsOnline();
     Assert.assertEquals(0, drainingServer.getNumberOfOnlineRegions());
   } finally {
     unsetDrainingServer(drainingServer);
   }
   // Now we've unset the draining server, we should be able to move a region
   // to what was the draining server.
   master.moveRegion(
       null,
       RequestConverter.buildMoveRegionRequest(
           hriToMoveBack.getEncodedNameAsBytes(),
           Bytes.toBytes(drainingServer.getServerName().toString())));
   // Wait for regions to come back on line again.
   waitForAllRegionsOnline();
   Assert.assertEquals(1, drainingServer.getNumberOfOnlineRegions());
 }
Beispiel #11
0
  @Override
  public void run() {
    HBaseTcpServer server = new HBaseTcpServer(this);
    server.start();

    HBasePgServer pgServer = null;
    if (HBasePgServer.isPgServerEnable(getConfiguration())) {
      pgServer = new HBasePgServer(this);
      pgServer.start();
    }

    try {
      super.run();
    } finally {
      server.stop();
      if (pgServer != null) pgServer.stop();
    }
  }
  /**
   * Test that draining servers are ignored even after killing regionserver(s). Verify that the
   * draining server is not given any of the dead servers regions.
   *
   * @throws KeeperException
   * @throws IOException
   */
  @Test(timeout = 30000)
  public void testDrainingServerWithAbort() throws KeeperException, Exception {
    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();

    waitForAllRegionsOnline();

    final long regionCount = TEST_UTIL.getMiniHBaseCluster().countServedRegions();

    // Let's get a copy of the regions today.
    Collection<HRegion> regions = new ArrayList<HRegion>();
    for (int i = 0; i < NB_SLAVES; i++) {
      HRegionServer hrs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i);
      regions.addAll(hrs.getCopyOfOnlineRegionsSortedBySize().values());
    }

    // Choose the draining server
    HRegionServer drainingServer = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
    final int regionsOnDrainingServer = drainingServer.getNumberOfOnlineRegions();
    Assert.assertTrue(regionsOnDrainingServer > 0);

    ServerManager sm = master.getServerManager();

    Collection<HRegion> regionsBefore =
        drainingServer.getCopyOfOnlineRegionsSortedBySize().values();
    LOG.info("Regions of drained server are: " + regionsBefore);

    try {
      // Add first server to draining servers up in zk.
      setDrainingServer(drainingServer);

      // wait for the master to receive and manage the event
      while (sm.createDestinationServersList().contains(drainingServer.getServerName())) {
        Thread.sleep(1);
      }

      LOG.info("The available servers are: " + sm.createDestinationServersList());

      Assert.assertEquals(
          "Nothing should have happened here.",
          regionsOnDrainingServer,
          drainingServer.getNumberOfOnlineRegions());
      Assert.assertFalse(
          "We should not have regions in transition here. List is: "
              + master.getAssignmentManager().getRegionStates().getRegionsInTransition(),
          master.getAssignmentManager().getRegionStates().isRegionsInTransition());

      // Kill a few regionservers.
      for (int aborted = 0; aborted <= 2; aborted++) {
        HRegionServer hrs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(aborted + 1);
        hrs.abort("Aborting");
      }

      // Wait for regions to come back online again.
      waitForAllRegionsOnline();

      Collection<HRegion> regionsAfter =
          drainingServer.getCopyOfOnlineRegionsSortedBySize().values();
      LOG.info("Regions of drained server are: " + regionsAfter);

      Assert.assertEquals(
          "Test conditions are not met: regions were" + " created/deleted during the test. ",
          regionCount,
          TEST_UTIL.getMiniHBaseCluster().countServedRegions());

      // Assert the draining server still has the same regions.
      StringBuilder result = new StringBuilder();
      for (HRegion r : regionsAfter) {
        if (!regionsBefore.contains(r)) {
          result.append(r).append(" was added after the drain");
          if (regions.contains(r)) {
            result.append("(existing region");
          } else {
            result.append("(new region)");
          }
          result.append("; ");
        }
      }
      for (HRegion r : regionsBefore) {
        if (!regionsAfter.contains(r)) {
          result.append(r).append(" was removed after the drain; ");
        }
      }
      Assert.assertTrue("Errors are: " + result.toString(), result.length() == 0);

    } finally {
      unsetDrainingServer(drainingServer);
    }
  }
 private static HRegionServer unsetDrainingServer(final HRegionServer hrs) throws KeeperException {
   ZooKeeperWatcher zkw = hrs.getZooKeeper();
   String hrsDrainingZnode = ZKUtil.joinZNode(zkw.drainingZNode, hrs.getServerName().toString());
   ZKUtil.deleteNode(zkw, hrsDrainingZnode);
   return hrs;
 }
Beispiel #14
0
  public void doTest(Class<?> regionClass, boolean distributedLogReplay) throws Exception {
    Configuration c = TEST_UTIL.getConfiguration();
    c.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, distributedLogReplay);
    // Insert our custom region
    c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class);
    c.setBoolean("dfs.support.append", true);
    // Encourage plenty of flushes
    c.setLong("hbase.hregion.memstore.flush.size", 200000);
    c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName());
    // Only run compaction when we tell it to
    c.setInt("hbase.hstore.compactionThreshold", 1000);
    c.setLong("hbase.hstore.blockingStoreFiles", 1000);
    // Compact quickly after we tell it to!
    c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
    LOG.info("Starting mini cluster");
    TEST_UTIL.startMiniCluster(1);
    CompactionBlockerRegion compactingRegion = null;
    Admin admin = null;
    try {
      LOG.info("Creating admin");
      admin = TEST_UTIL.getConnection().getAdmin();
      LOG.info("Creating table");
      TEST_UTIL.createTable(TABLE_NAME, FAMILY);
      Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
      LOG.info("Loading test table");
      // Find the region
      List<HRegion> testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME);
      assertEquals(1, testRegions.size());
      compactingRegion = (CompactionBlockerRegion) testRegions.get(0);
      LOG.info("Blocking compactions");
      compactingRegion.stopCompactions();
      long lastFlushTime = compactingRegion.getLastFlushTime();
      // Load some rows
      TEST_UTIL.loadNumericRows(table, FAMILY, 0, FIRST_BATCH_COUNT);

      // add a compaction from an older (non-existing) region to see whether we successfully skip
      // those entries
      HRegionInfo oldHri =
          new HRegionInfo(table.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
      CompactionDescriptor compactionDescriptor =
          ProtobufUtil.toCompactionDescriptor(
              oldHri,
              FAMILY,
              Lists.newArrayList(new Path("/a")),
              Lists.newArrayList(new Path("/b")),
              new Path("store_dir"));
      WALUtil.writeCompactionMarker(
          compactingRegion.getWAL(),
          table.getTableDescriptor(),
          oldHri,
          compactionDescriptor,
          new AtomicLong(Long.MAX_VALUE - 100));

      // Wait till flush has happened, otherwise there won't be multiple store files
      long startWaitTime = System.currentTimeMillis();
      while (compactingRegion.getLastFlushTime() <= lastFlushTime
          || compactingRegion.countStoreFiles() <= 1) {
        LOG.info("Waiting for the region to flush " + compactingRegion.getRegionNameAsString());
        Thread.sleep(1000);
        assertTrue(
            "Timed out waiting for the region to flush",
            System.currentTimeMillis() - startWaitTime < 30000);
      }
      assertTrue(compactingRegion.countStoreFiles() > 1);
      final byte REGION_NAME[] = compactingRegion.getRegionName();
      LOG.info("Asking for compaction");
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      LOG.info("Waiting for compaction to be about to start");
      compactingRegion.waitForCompactionToBlock();
      LOG.info("Starting a new server");
      RegionServerThread newServerThread = TEST_UTIL.getMiniHBaseCluster().startRegionServer();
      final HRegionServer newServer = newServerThread.getRegionServer();
      LOG.info("Killing region server ZK lease");
      TEST_UTIL.expireRegionServerSession(0);
      CompactionBlockerRegion newRegion = null;
      startWaitTime = System.currentTimeMillis();
      LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME));

      // wait for region to be assigned and to go out of log replay if applicable
      Waiter.waitFor(
          c,
          60000,
          new Waiter.Predicate<Exception>() {
            @Override
            public boolean evaluate() throws Exception {
              HRegion newRegion = newServer.getOnlineRegion(REGION_NAME);
              return newRegion != null && !newRegion.isRecovering();
            }
          });

      newRegion = (CompactionBlockerRegion) newServer.getOnlineRegion(REGION_NAME);

      LOG.info("Allowing compaction to proceed");
      compactingRegion.allowCompactions();
      while (compactingRegion.compactCount == 0) {
        Thread.sleep(1000);
      }
      // The server we killed stays up until the compaction that was started before it was killed
      // completes.  In logs
      // you should see the old regionserver now going down.
      LOG.info("Compaction finished");
      // After compaction of old region finishes on the server that was going down, make sure that
      // all the files we expect are still working when region is up in new location.
      FileSystem fs = newRegion.getFilesystem();
      for (String f : newRegion.getStoreFileList(new byte[][] {FAMILY})) {
        assertTrue("After compaction, does not exist: " + f, fs.exists(new Path(f)));
      }
      // If we survive the split keep going...
      // Now we make sure that the region isn't totally confused.  Load up more rows.
      TEST_UTIL.loadNumericRows(
          table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      startWaitTime = System.currentTimeMillis();
      while (newRegion.compactCount == 0) {
        Thread.sleep(1000);
        assertTrue(
            "New region never compacted", System.currentTimeMillis() - startWaitTime < 180000);
      }
      assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, TEST_UTIL.countRows(table));
    } finally {
      if (compactingRegion != null) {
        compactingRegion.allowCompactions();
      }
      admin.close();
      TEST_UTIL.shutdownMiniCluster();
    }
  }
Beispiel #15
0
  @Test
  public void testRegionMerge() throws Exception {
    String nsp1 = prefix + "_regiontest";
    NamespaceDescriptor nspDesc =
        NamespaceDescriptor.create(nsp1)
            .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3")
            .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2")
            .build();
    ADMIN.createNamespace(nspDesc);
    final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
    byte[] columnFamily = Bytes.toBytes("info");
    HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo);
    tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
    final int initialRegions = 3;
    ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions);
    Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
    try (Table table = connection.getTable(tableTwo)) {
      UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
    }
    ADMIN.flush(tableTwo);
    List<HRegionInfo> hris = ADMIN.getTableRegions(tableTwo);
    Collections.sort(hris);
    // merge the two regions
    final Set<String> encodedRegionNamesToMerge =
        Sets.newHashSet(hris.get(0).getEncodedName(), hris.get(1).getEncodedName());
    ADMIN.mergeRegions(
        hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
    UTIL.waitFor(
        10000,
        100,
        new Waiter.ExplainingPredicate<Exception>() {

          @Override
          public boolean evaluate() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                return false;
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return false;
              }
            }
            return true;
          }

          @Override
          public String explainFailure() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                return hri + " which is expected to be merged is still online";
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return hri + " is still in not opened";
              }
            }
            return "Unknown";
          }
        });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions - 1, hris.size());
    Collections.sort(hris);

    final HRegionInfo hriToSplit = hris.get(1);
    ADMIN.split(tableTwo, Bytes.toBytes("500"));

    UTIL.waitFor(
        10000,
        100,
        new Waiter.ExplainingPredicate<Exception>() {

          @Override
          public boolean evaluate() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                return false;
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return false;
              }
            }
            return true;
          }

          @Override
          public String explainFailure() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                return hriToSplit + " which is expected to be split is still online";
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return hri + " is still in not opened";
              }
            }
            return "Unknown";
          }
        });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);

    // fail region merge through Coprocessor hook
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HRegionServer regionServer = cluster.getRegionServer(0);
    RegionServerCoprocessorHost cpHost = regionServer.getRegionServerCoprocessorHost();
    Coprocessor coprocessor = cpHost.findCoprocessor(CPRegionServerObserver.class.getName());
    CPRegionServerObserver regionServerObserver = (CPRegionServerObserver) coprocessor;
    regionServerObserver.failMerge(true);
    regionServerObserver.triggered = false;

    ADMIN.mergeRegions(
        hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
    regionServerObserver.waitUtilTriggered();
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);
    // verify that we cannot split
    HRegionInfo hriToSplit2 = hris.get(1);
    ADMIN.split(
        tableTwo,
        TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(), hriToSplit2.getEndKey(), true));
    Thread.sleep(2000);
    assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size());
  }
  /**
   * Test the global mem store size in the region server is equal to sum of each region's mem store
   * size
   *
   * @throws Exception
   */
  @Test
  public void testGlobalMemStore() throws Exception {
    // Start the cluster
    LOG.info("Starting cluster");
    Configuration conf = HBaseConfiguration.create();
    TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(1, regionServerNum);
    cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();

    // Create a table with regions
    TableName table = TableName.valueOf("TestGlobalMemStoreSize");
    byte[] family = Bytes.toBytes("family");
    LOG.info("Creating table with " + regionNum + " regions");
    Table ht = TEST_UTIL.createMultiRegionTable(table, family, regionNum);
    int numRegions = -1;
    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) {
      numRegions = r.getStartKeys().length;
    }
    assertEquals(regionNum, numRegions);
    waitForAllRegionsAssigned();

    for (HRegionServer server : getOnlineRegionServers()) {
      long globalMemStoreSize = 0;
      for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
        globalMemStoreSize +=
            server.getFromOnlineRegions(regionInfo.getEncodedName()).getMemstoreSize();
      }
      assertEquals(server.getRegionServerAccounting().getGlobalMemstoreSize(), globalMemStoreSize);
    }

    // check the global memstore size after flush
    int i = 0;
    for (HRegionServer server : getOnlineRegionServers()) {
      LOG.info(
          "Starting flushes on "
              + server.getServerName()
              + ", size="
              + server.getRegionServerAccounting().getGlobalMemstoreSize());

      for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
        Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
        flush(r, server);
      }
      LOG.info("Post flush on " + server.getServerName());
      long now = System.currentTimeMillis();
      long timeout = now + 1000;
      while (server.getRegionServerAccounting().getGlobalMemstoreSize() != 0
          && timeout < System.currentTimeMillis()) {
        Threads.sleep(10);
      }
      long size = server.getRegionServerAccounting().getGlobalMemstoreSize();
      if (size > 0) {
        // If size > 0, see if its because the meta region got edits while
        // our test was running....
        for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
          Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
          long l = r.getMemstoreSize();
          if (l > 0) {
            // Only meta could have edits at this stage.  Give it another flush
            // clear them.
            assertTrue(regionInfo.isMetaRegion());
            LOG.info(r.toString() + " " + l + ", reflushing");
            r.flush(true);
          }
        }
      }
      size = server.getRegionServerAccounting().getGlobalMemstoreSize();
      assertEquals("Server=" + server.getServerName() + ", i=" + i++, 0, size);
    }

    ht.close();
    TEST_UTIL.shutdownMiniCluster();
  }
  /**
   * Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do
   * the usual compaction mechanism on the region, rather than going through the backdoor to the
   * region
   */
  @Test
  public void testRegionObserverCompactionTimeStacking() throws Exception {
    // setup a mini cluster so we can do a real compaction on a region
    Configuration conf = UTIL.getConfiguration();
    conf.setClass(HConstants.REGION_IMPL, CompactionCompletionNotifyingRegion.class, HRegion.class);
    conf.setInt("hbase.hstore.compaction.min", 2);
    UTIL.startMiniCluster();
    String tableName = "testRegionObserverCompactionTimeStacking";
    byte[] ROW = Bytes.toBytes("testRow");
    byte[] A = Bytes.toBytes("A");
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    desc.addFamily(new HColumnDescriptor(A));
    desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null);
    desc.addCoprocessor(
        NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST, null);

    Admin admin = UTIL.getHBaseAdmin();
    admin.createTable(desc);

    Table table = UTIL.getConnection().getTable(desc.getTableName());

    // put a row and flush it to disk
    Put put = new Put(ROW);
    put.add(A, A, A);
    table.put(put);

    HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
    List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
    assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
    HRegion region = regions.get(0);
    admin.flushRegion(region.getRegionName());
    CountDownLatch latch =
        ((CompactionCompletionNotifyingRegion) region).getCompactionStateChangeLatch();

    // put another row and flush that too
    put = new Put(Bytes.toBytes("anotherrow"));
    put.add(A, A, A);
    table.put(put);
    admin.flushRegion(region.getRegionName());

    // run a compaction, which normally would should get rid of the data
    // wait for the compaction checker to complete
    latch.await();
    // check both rows to ensure that they aren't there
    Get get = new Get(ROW);
    Result r = table.get(get);
    assertNull(
        "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
            + r,
        r.listCells());

    get = new Get(Bytes.toBytes("anotherrow"));
    r = table.get(get);
    assertNull(
        "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
            + r,
        r.listCells());

    table.close();
    UTIL.shutdownMiniCluster();
  }