Ejemplo n.º 1
0
 /**
  * Set the table's replication switch if the table's replication switch is already not set.
  *
  * @param tableName name of the table
  * @param isRepEnabled is replication switch enable or disable
  * @throws IOException if a remote or network exception occurs
  */
 private void setTableRep(final TableName tableName, boolean isRepEnabled) throws IOException {
   Admin admin = null;
   try {
     admin = this.connection.getAdmin();
     HTableDescriptor htd = admin.getTableDescriptor(tableName);
     if (isTableRepEnabled(htd) ^ isRepEnabled) {
       boolean isOnlineSchemaUpdateEnabled =
           this.connection
               .getConfiguration()
               .getBoolean("hbase.online.schema.update.enable", true);
       if (!isOnlineSchemaUpdateEnabled) {
         admin.disableTable(tableName);
       }
       for (HColumnDescriptor hcd : htd.getFamilies()) {
         hcd.setScope(
             isRepEnabled
                 ? HConstants.REPLICATION_SCOPE_GLOBAL
                 : HConstants.REPLICATION_SCOPE_LOCAL);
       }
       admin.modifyTable(tableName, htd);
       if (!isOnlineSchemaUpdateEnabled) {
         admin.enableTable(tableName);
       }
     }
   } finally {
     if (admin != null) {
       try {
         admin.close();
       } catch (IOException e) {
         LOG.warn("Failed to close admin connection.");
         LOG.debug("Details on failure to close admin connection.", e);
       }
     }
   }
 }
Ejemplo n.º 2
0
  /**
   * Find all column families that are replicated from this cluster
   *
   * @return the full list of the replicated column families of this cluster as: tableName, family
   *     name, replicationType
   *     <p>Currently replicationType is Global. In the future, more replication types may be
   *     extended here. For example 1) the replication may only apply to selected peers instead of
   *     all peers 2) the replicationType may indicate the host Cluster servers as Slave for the
   *     table:columnFam.
   */
  public List<HashMap<String, String>> listReplicated() throws IOException {
    List<HashMap<String, String>> replicationColFams = new ArrayList<HashMap<String, String>>();

    Admin admin = connection.getAdmin();
    HTableDescriptor[] tables;
    try {
      tables = admin.listTables();
    } finally {
      if (admin != null) admin.close();
    }

    for (HTableDescriptor table : tables) {
      HColumnDescriptor[] columns = table.getColumnFamilies();
      String tableName = table.getNameAsString();
      for (HColumnDescriptor column : columns) {
        if (column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) {
          // At this moment, the columfam is replicated to all peers
          HashMap<String, String> replicationEntry = new HashMap<String, String>();
          replicationEntry.put(TNAME, tableName);
          replicationEntry.put(CFNAME, column.getNameAsString());
          replicationEntry.put(REPLICATIONTYPE, REPLICATIONGLOBAL);
          replicationColFams.add(replicationEntry);
        }
      }
    }

    return replicationColFams;
  }
    @Override
    void perform() throws IOException {
      NamespaceDescriptor selected = selectNamespace(namespaceMap);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        String namespaceName = selected.getName();
        LOG.info("Modifying namespace :" + selected);
        NamespaceDescriptor modifiedNsd = NamespaceDescriptor.create(namespaceName).build();
        String nsValueNew;
        do {
          nsValueNew = String.format("%010d", RandomUtils.nextInt(Integer.MAX_VALUE));
        } while (selected.getConfigurationValue(nsTestConfigKey).equals(nsValueNew));
        modifiedNsd.setConfiguration(nsTestConfigKey, nsValueNew);
        admin.modifyNamespace(modifiedNsd);
        NamespaceDescriptor freshNamespaceDesc = admin.getNamespaceDescriptor(namespaceName);
        Assert.assertTrue(
            "Namespace: " + selected + " was not modified",
            freshNamespaceDesc.getConfigurationValue(nsTestConfigKey).equals(nsValueNew));
        LOG.info("Modified namespace :" + freshNamespaceDesc);
        namespaceMap.put(namespaceName, freshNamespaceDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyNamespaces();
    }
    @Override
    void perform() throws IOException {
      HTableDescriptor selected = selectTable(disabledTables);
      HColumnDescriptor cfd = selectFamily(selected);
      if (selected == null || cfd == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        if (selected.getColumnFamilies().length < 2) {
          LOG.info("No enough column families to delete in table " + selected.getTableName());
          return;
        }
        TableName tableName = selected.getTableName();
        LOG.info("Deleting column family: " + cfd + " from table: " + tableName);
        admin.deleteColumnFamily(tableName, cfd.getName());
        // assertion
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        Assert.assertFalse(
            "Column family: " + cfd + " was not added", freshTableDesc.hasFamily(cfd.getName()));
        LOG.info("Deleted column family: " + cfd + " from table: " + tableName);
        disabledTables.put(tableName, freshTableDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }
    @Override
    void perform() throws IOException {
      NamespaceDescriptor selected = selectNamespace(namespaceMap);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        String namespaceName = selected.getName();
        LOG.info("Deleting namespace :" + selected);
        admin.deleteNamespace(namespaceName);
        try {
          if (admin.getNamespaceDescriptor(namespaceName) != null) {
            // the namespace still exists.
            Assert.assertTrue("Namespace: " + selected + " was not deleted", false);
          } else {
            LOG.info("Deleted namespace :" + selected);
          }
        } catch (NamespaceNotFoundException nsnfe) {
          // This is expected result
          LOG.info("Deleted namespace :" + selected);
        }
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyNamespaces();
    }
 @Override
 void perform() throws IOException {
   Admin admin = connection.getAdmin();
   try {
     HTableDescriptor htd = createTableDesc();
     TableName tableName = htd.getTableName();
     if (admin.tableExists(tableName)) {
       return;
     }
     String numRegionKey = String.format(NUM_REGIONS_KEY, this.getClass().getSimpleName());
     numRegions = getConf().getInt(numRegionKey, DEFAULT_NUM_REGIONS);
     byte[] startKey = Bytes.toBytes("row-0000000000");
     byte[] endKey = Bytes.toBytes("row-" + Integer.MAX_VALUE);
     LOG.info("Creating table:" + htd);
     admin.createTable(htd, startKey, endKey, numRegions);
     Assert.assertTrue("Table: " + htd + " was not created", admin.tableExists(tableName));
     HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
     enabledTables.put(tableName, freshTableDesc);
     LOG.info("Created table:" + freshTableDesc);
   } catch (Exception e) {
     LOG.warn("Caught exception in action: " + this.getClass());
     throw e;
   } finally {
     admin.close();
   }
   verifyTables();
 }
    @Override
    void perform() throws IOException {
      HTableDescriptor selected = selectTable(disabledTables);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        HColumnDescriptor cfd = createFamilyDesc();
        if (selected.hasFamily(cfd.getName())) {
          LOG.info(
              new String(cfd.getName()) + " already exists in table " + selected.getTableName());
          return;
        }
        TableName tableName = selected.getTableName();
        LOG.info("Adding column family: " + cfd + " to table: " + tableName);
        admin.addColumn(tableName, cfd);
        // assertion
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        Assert.assertTrue(
            "Column family: " + cfd + " was not added", freshTableDesc.hasFamily(cfd.getName()));
        LOG.info("Added column family: " + cfd + " to table: " + tableName);
        disabledTables.put(tableName, freshTableDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }
Ejemplo n.º 8
0
 /**
  * Sweeps the mob files on one column family. It deletes the unused mob files and merges the small
  * mob files into bigger ones.
  *
  * @param tableName The current table name in string format.
  * @param familyName The column family name.
  * @return 0 if success, 2 if job aborted with an exception, 3 if unable to start due to other
  *     compaction,4 if mr job was unsuccessful
  * @throws IOException
  * @throws InterruptedException
  * @throws ClassNotFoundException
  * @throws KeeperException
  * @throws ServiceException
  */
 int sweepFamily(String tableName, String familyName)
     throws IOException, InterruptedException, ClassNotFoundException, KeeperException,
         ServiceException {
   Configuration conf = getConf();
   // make sure the target HBase exists.
   HBaseAdmin.checkHBaseAvailable(conf);
   Connection connection = ConnectionFactory.createConnection(getConf());
   Admin admin = connection.getAdmin();
   try {
     FileSystem fs = FileSystem.get(conf);
     TableName tn = TableName.valueOf(tableName);
     HTableDescriptor htd = admin.getTableDescriptor(tn);
     HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
     if (family == null || !family.isMobEnabled()) {
       throw new IOException("Column family " + familyName + " is not a MOB column family");
     }
     SweepJob job = new SweepJob(conf, fs);
     // Run the sweeping
     return job.sweep(tn, family);
   } catch (Exception e) {
     System.err.println("Job aborted due to exception " + e);
     return 2; // job failed
   } finally {
     try {
       admin.close();
     } catch (IOException e) {
       System.out.println("Failed to close the HBaseAdmin: " + e.getMessage());
     }
     try {
       connection.close();
     } catch (IOException e) {
       System.out.println("Failed to close the connection: " + e.getMessage());
     }
   }
 }
Ejemplo n.º 9
0
 @Override
 protected int doWork() throws Exception {
   Connection connection = null;
   Admin admin = null;
   try {
     connection = ConnectionFactory.createConnection(getConf());
     admin = connection.getAdmin();
     HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
     if (snapshotType != null) {
       type = ProtobufUtil.createProtosSnapShotDescType(snapshotName);
     }
     admin.snapshot(
         new SnapshotDescription(snapshotName, tableName, ProtobufUtil.createSnapshotType(type)));
   } catch (Exception e) {
     return -1;
   } finally {
     if (admin != null) {
       admin.close();
     }
     if (connection != null) {
       connection.close();
     }
   }
   return 0;
 }
    @Override
    void perform() throws IOException {
      HTableDescriptor selected = selectTable(disabledTables);
      if (selected == null) {
        return;
      }
      HColumnDescriptor columnDesc = selectFamily(selected);
      if (columnDesc == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      int versions = RandomUtils.nextInt(10) + 3;
      try {
        TableName tableName = selected.getTableName();
        LOG.info(
            "Altering versions of column family: "
                + columnDesc
                + " to: "
                + versions
                + " in table: "
                + tableName);
        columnDesc.setMinVersions(versions);
        columnDesc.setMaxVersions(versions);
        admin.modifyTable(tableName, selected);
        // assertion
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        HColumnDescriptor freshColumnDesc = freshTableDesc.getFamily(columnDesc.getName());
        Assert.assertEquals(
            "Column family: " + columnDesc + " was not altered",
            freshColumnDesc.getMaxVersions(),
            versions);
        Assert.assertEquals(
            "Column family: " + freshColumnDesc + " was not altered",
            freshColumnDesc.getMinVersions(),
            versions);
        LOG.info(
            "Altered versions of column family: "
                + columnDesc
                + " to: "
                + versions
                + " in table: "
                + tableName);
        disabledTables.put(tableName, freshTableDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }
    @Override
    void perform() throws IOException {
      HTableDescriptor selected = selectTable(disabledTables);
      if (selected == null) {
        return;
      }
      HColumnDescriptor columnDesc = selectFamily(selected);
      if (columnDesc == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        TableName tableName = selected.getTableName();
        // possible DataBlockEncoding ids
        int[] possibleIds = {0, 2, 3, 4, 6};
        short id = (short) possibleIds[RandomUtils.nextInt(possibleIds.length)];
        LOG.info(
            "Altering encoding of column family: "
                + columnDesc
                + " to: "
                + id
                + " in table: "
                + tableName);
        columnDesc.setDataBlockEncoding(DataBlockEncoding.getEncodingById(id));
        admin.modifyTable(tableName, selected);
        // assertion
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        HColumnDescriptor freshColumnDesc = freshTableDesc.getFamily(columnDesc.getName());
        Assert.assertEquals(
            "Encoding of column family: " + columnDesc + " was not altered",
            freshColumnDesc.getDataBlockEncoding().getId(),
            id);
        LOG.info(
            "Altered encoding of column family: "
                + freshColumnDesc
                + " to: "
                + id
                + " in table: "
                + tableName);
        disabledTables.put(tableName, freshTableDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }
 protected void verifyNamespaces() throws IOException {
   Connection connection = getConnection();
   Admin admin = connection.getAdmin();
   // iterating concurrent map
   for (String nsName : namespaceMap.keySet()) {
     try {
       Assert.assertTrue(
           "Namespace: " + nsName + " in namespaceMap does not exist",
           admin.getNamespaceDescriptor(nsName) != null);
     } catch (NamespaceNotFoundException nsnfe) {
       Assert.fail(
           "Namespace: " + nsName + " in namespaceMap does not exist: " + nsnfe.getMessage());
     }
   }
   admin.close();
 }
    // populate tables
    @Override
    void perform() throws IOException {
      HTableDescriptor selected = selectTable(enabledTables);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      TableName tableName = selected.getTableName();
      try (Table table = connection.getTable(tableName)) {
        ArrayList<HRegionInfo> regionInfos =
            new ArrayList<HRegionInfo>(admin.getTableRegions(selected.getTableName()));
        int numRegions = regionInfos.size();
        // average number of rows to be added per action to each region
        int average_rows = 1;
        int numRows = average_rows * numRegions;
        LOG.info("Adding " + numRows + " rows to table: " + selected);
        for (int i = 0; i < numRows; i++) {
          // nextInt(Integer.MAX_VALUE)) to return positive numbers only
          byte[] rowKey =
              Bytes.toBytes(
                  "row-" + String.format("%010d", RandomUtils.nextInt(Integer.MAX_VALUE)));
          HColumnDescriptor cfd = selectFamily(selected);
          if (cfd == null) {
            return;
          }
          byte[] family = cfd.getName();
          byte[] qualifier = Bytes.toBytes("col-" + RandomUtils.nextInt(Integer.MAX_VALUE) % 10);
          byte[] value = Bytes.toBytes("val-" + RandomStringUtils.randomAlphanumeric(10));
          Put put = new Put(rowKey);
          put.addColumn(family, qualifier, value);
          table.put(put);
        }
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        enabledTables.put(tableName, freshTableDesc);
        LOG.info("Added " + numRows + " rows to table: " + selected);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }
    @Override
    void perform() throws IOException {

      HTableDescriptor selected = selectTable(disabledTables);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        TableName tableName = selected.getTableName();
        LOG.info("Enabling table :" + selected);
        admin.enableTable(tableName);
        Assert.assertTrue(
            "Table: " + selected + " was not enabled", admin.isTableEnabled(tableName));
        HTableDescriptor freshTableDesc = admin.getTableDescriptor(tableName);
        enabledTables.put(tableName, freshTableDesc);
        LOG.info("Enabled table :" + freshTableDesc);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        // TODO workaround
        // loose restriction for TableNotDisabledException/TableNotEnabledException thrown in sync
        // operations 1) when enable/disable starts, the table state is changed to
        // ENABLING/DISABLING (ZK node in 1.x), which will be further changed to ENABLED/DISABLED
        // once the operation completes 2) if master failover happens in the middle of the
        // enable/disable operation, the new master will try to recover the tables in
        // ENABLING/DISABLING state, as programmed in
        // AssignmentManager#recoverTableInEnablingState() and
        // AssignmentManager#recoverTableInDisablingState()
        // 3) after the new master initialization completes, the procedure tries to re-do the
        // enable/disable operation, which was already done. Ignore those exceptions before
        // change of behaviors of AssignmentManager in presence of PV2
        if (e instanceof TableNotDisabledException) {
          LOG.warn("Caught TableNotDisabledException in action: " + this.getClass());
          e.printStackTrace();
        } else {
          throw e;
        }
      } finally {
        admin.close();
      }
      verifyTables();
    }
 protected void verifyTables() throws IOException {
   Connection connection = getConnection();
   Admin admin = connection.getAdmin();
   // iterating concurrent map
   for (TableName tableName : enabledTables.keySet()) {
     Assert.assertTrue(
         "Table: " + tableName + " in enabledTables is not enabled",
         admin.isTableEnabled(tableName));
   }
   for (TableName tableName : disabledTables.keySet()) {
     Assert.assertTrue(
         "Table: " + tableName + " in disabledTables is not disabled",
         admin.isTableDisabled(tableName));
   }
   for (TableName tableName : deletedTables.keySet()) {
     Assert.assertFalse(
         "Table: " + tableName + " in deletedTables is not deleted", admin.tableExists(tableName));
   }
   admin.close();
 }
 @Override
 void perform() throws IOException {
   Admin admin = connection.getAdmin();
   try {
     NamespaceDescriptor nsd;
     while (true) {
       nsd = createNamespaceDesc();
       try {
         if (admin.getNamespaceDescriptor(nsd.getName()) != null) {
           // the namespace has already existed.
           continue;
         } else {
           // currently, the code never return null - always throws exception if
           // namespace is not found - this just a defensive programming to make
           // sure null situation is handled in case the method changes in the
           // future.
           break;
         }
       } catch (NamespaceNotFoundException nsnfe) {
         // This is expected for a random generated NamespaceDescriptor
         break;
       }
     }
     LOG.info("Creating namespace:" + nsd);
     admin.createNamespace(nsd);
     NamespaceDescriptor freshNamespaceDesc = admin.getNamespaceDescriptor(nsd.getName());
     Assert.assertTrue("Namespace: " + nsd + " was not created", freshNamespaceDesc != null);
     LOG.info("Created namespace:" + freshNamespaceDesc);
     namespaceMap.put(nsd.getName(), freshNamespaceDesc);
   } catch (Exception e) {
     LOG.warn("Caught exception in action: " + this.getClass());
     throw e;
   } finally {
     admin.close();
   }
   verifyNamespaces();
 }
    @Override
    void perform() throws IOException {

      HTableDescriptor selected = selectTable(disabledTables);
      if (selected == null) {
        return;
      }

      Admin admin = connection.getAdmin();
      try {
        TableName tableName = selected.getTableName();
        LOG.info("Deleting table :" + selected);
        admin.deleteTable(tableName);
        Assert.assertFalse("Table: " + selected + " was not deleted", admin.tableExists(tableName));
        deletedTables.put(tableName, selected);
        LOG.info("Deleted table :" + selected);
      } catch (Exception e) {
        LOG.warn("Caught exception in action: " + this.getClass());
        throw e;
      } finally {
        admin.close();
      }
      verifyTables();
    }
 @After
 public void close() throws IOException {
   deleteTable();
   admin.close();
   conn.close();
 }
Ejemplo n.º 19
0
 @After
 public void tearDown() throws Exception {
   admin.disableTable(TableName.valueOf(tableName));
   admin.deleteTable(TableName.valueOf(tableName));
   admin.close();
 }
Ejemplo n.º 20
0
  public void doTest(Class<?> regionClass, boolean distributedLogReplay) throws Exception {
    Configuration c = TEST_UTIL.getConfiguration();
    c.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, distributedLogReplay);
    // Insert our custom region
    c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class);
    c.setBoolean("dfs.support.append", true);
    // Encourage plenty of flushes
    c.setLong("hbase.hregion.memstore.flush.size", 200000);
    c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName());
    // Only run compaction when we tell it to
    c.setInt("hbase.hstore.compactionThreshold", 1000);
    c.setLong("hbase.hstore.blockingStoreFiles", 1000);
    // Compact quickly after we tell it to!
    c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
    LOG.info("Starting mini cluster");
    TEST_UTIL.startMiniCluster(1);
    CompactionBlockerRegion compactingRegion = null;
    Admin admin = null;
    try {
      LOG.info("Creating admin");
      admin = TEST_UTIL.getConnection().getAdmin();
      LOG.info("Creating table");
      TEST_UTIL.createTable(TABLE_NAME, FAMILY);
      Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
      LOG.info("Loading test table");
      // Find the region
      List<HRegion> testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME);
      assertEquals(1, testRegions.size());
      compactingRegion = (CompactionBlockerRegion) testRegions.get(0);
      LOG.info("Blocking compactions");
      compactingRegion.stopCompactions();
      long lastFlushTime = compactingRegion.getLastFlushTime();
      // Load some rows
      TEST_UTIL.loadNumericRows(table, FAMILY, 0, FIRST_BATCH_COUNT);

      // add a compaction from an older (non-existing) region to see whether we successfully skip
      // those entries
      HRegionInfo oldHri =
          new HRegionInfo(table.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
      CompactionDescriptor compactionDescriptor =
          ProtobufUtil.toCompactionDescriptor(
              oldHri,
              FAMILY,
              Lists.newArrayList(new Path("/a")),
              Lists.newArrayList(new Path("/b")),
              new Path("store_dir"));
      WALUtil.writeCompactionMarker(
          compactingRegion.getWAL(),
          table.getTableDescriptor(),
          oldHri,
          compactionDescriptor,
          new AtomicLong(Long.MAX_VALUE - 100));

      // Wait till flush has happened, otherwise there won't be multiple store files
      long startWaitTime = System.currentTimeMillis();
      while (compactingRegion.getLastFlushTime() <= lastFlushTime
          || compactingRegion.countStoreFiles() <= 1) {
        LOG.info("Waiting for the region to flush " + compactingRegion.getRegionNameAsString());
        Thread.sleep(1000);
        assertTrue(
            "Timed out waiting for the region to flush",
            System.currentTimeMillis() - startWaitTime < 30000);
      }
      assertTrue(compactingRegion.countStoreFiles() > 1);
      final byte REGION_NAME[] = compactingRegion.getRegionName();
      LOG.info("Asking for compaction");
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      LOG.info("Waiting for compaction to be about to start");
      compactingRegion.waitForCompactionToBlock();
      LOG.info("Starting a new server");
      RegionServerThread newServerThread = TEST_UTIL.getMiniHBaseCluster().startRegionServer();
      final HRegionServer newServer = newServerThread.getRegionServer();
      LOG.info("Killing region server ZK lease");
      TEST_UTIL.expireRegionServerSession(0);
      CompactionBlockerRegion newRegion = null;
      startWaitTime = System.currentTimeMillis();
      LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME));

      // wait for region to be assigned and to go out of log replay if applicable
      Waiter.waitFor(
          c,
          60000,
          new Waiter.Predicate<Exception>() {
            @Override
            public boolean evaluate() throws Exception {
              HRegion newRegion = newServer.getOnlineRegion(REGION_NAME);
              return newRegion != null && !newRegion.isRecovering();
            }
          });

      newRegion = (CompactionBlockerRegion) newServer.getOnlineRegion(REGION_NAME);

      LOG.info("Allowing compaction to proceed");
      compactingRegion.allowCompactions();
      while (compactingRegion.compactCount == 0) {
        Thread.sleep(1000);
      }
      // The server we killed stays up until the compaction that was started before it was killed
      // completes.  In logs
      // you should see the old regionserver now going down.
      LOG.info("Compaction finished");
      // After compaction of old region finishes on the server that was going down, make sure that
      // all the files we expect are still working when region is up in new location.
      FileSystem fs = newRegion.getFilesystem();
      for (String f : newRegion.getStoreFileList(new byte[][] {FAMILY})) {
        assertTrue("After compaction, does not exist: " + f, fs.exists(new Path(f)));
      }
      // If we survive the split keep going...
      // Now we make sure that the region isn't totally confused.  Load up more rows.
      TEST_UTIL.loadNumericRows(
          table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      startWaitTime = System.currentTimeMillis();
      while (newRegion.compactCount == 0) {
        Thread.sleep(1000);
        assertTrue(
            "New region never compacted", System.currentTimeMillis() - startWaitTime < 180000);
      }
      assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, TEST_UTIL.countRows(table));
    } finally {
      if (compactingRegion != null) {
        compactingRegion.allowCompactions();
      }
      admin.close();
      TEST_UTIL.shutdownMiniCluster();
    }
  }