private Table createTableAndLoadData(
      HMaster master, TableName tablename, int numRegions, int replication) throws Exception {
    assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions);
    byte[][] splitRows = new byte[numRegions - 1][];
    for (int i = 0; i < splitRows.length; i++) {
      splitRows[i] = ROWS[(i + 1) * ROWSIZE / numRegions];
    }

    Table table = TEST_UTIL.createTable(tablename, FAMILYNAME, splitRows);
    if (replication > 1) {
      HBaseTestingUtility.setReplicas(admin, tablename, replication);
    }
    loadData(table);
    verifyRowCount(table, ROWSIZE);

    // sleep here is an ugly hack to allow region transitions to finish
    long timeout = System.currentTimeMillis() + waitTime;
    List<Pair<HRegionInfo, ServerName>> tableRegions;
    while (System.currentTimeMillis() < timeout) {
      tableRegions =
          MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
      if (tableRegions.size() == numRegions * replication) break;
      Thread.sleep(250);
    }

    tableRegions = MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
    assertEquals(numRegions * replication, tableRegions.size());
    return table;
  }
 @Test
 public void testMergeWithReplicas() throws Exception {
   final TableName tableName = TableName.valueOf("testMergeWithReplicas");
   // Create table and load data.
   createTableAndLoadData(master, tableName, 5, 2);
   List<Pair<HRegionInfo, ServerName>> initialRegionToServers =
       MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName);
   // Merge 1st and 2nd region
   PairOfSameType<HRegionInfo> mergedRegions =
       mergeRegionsAndVerifyRegionNum(master, tableName, 0, 2, 5 * 2 - 2);
   List<Pair<HRegionInfo, ServerName>> currentRegionToServers =
       MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName);
   List<HRegionInfo> initialRegions = new ArrayList<HRegionInfo>();
   for (Pair<HRegionInfo, ServerName> p : initialRegionToServers) {
     initialRegions.add(p.getFirst());
   }
   List<HRegionInfo> currentRegions = new ArrayList<HRegionInfo>();
   for (Pair<HRegionInfo, ServerName> p : currentRegionToServers) {
     currentRegions.add(p.getFirst());
   }
   assertTrue(initialRegions.contains(mergedRegions.getFirst())); // this is the first region
   assertTrue(
       initialRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               mergedRegions.getFirst(), 1))); // this is the replica of the first region
   assertTrue(initialRegions.contains(mergedRegions.getSecond())); // this is the second region
   assertTrue(
       initialRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               mergedRegions.getSecond(), 1))); // this is the replica of the second region
   assertTrue(!initialRegions.contains(currentRegions.get(0))); // this is the new region
   assertTrue(
       !initialRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               currentRegions.get(0), 1))); // replica of the new region
   assertTrue(
       currentRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               currentRegions.get(0), 1))); // replica of the new region
   assertTrue(
       !currentRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               mergedRegions.getFirst(), 1))); // replica of the merged region
   assertTrue(
       !currentRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               mergedRegions.getSecond(), 1))); // replica of the merged region
 }
 private PairOfSameType<HRegionInfo> requestMergeRegion(
     HMaster master, TableName tablename, int regionAnum, int regionBnum) throws Exception {
   List<Pair<HRegionInfo, ServerName>> tableRegions =
       MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
   HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
   HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
   TEST_UTIL
       .getHBaseAdmin()
       .mergeRegions(regionA.getEncodedNameAsBytes(), regionB.getEncodedNameAsBytes(), false);
   return new PairOfSameType<HRegionInfo>(regionA, regionB);
 }
Пример #4
0
  @Test
  @SuppressWarnings("deprecation")
  public void testMasterOpsWhileSplitting() throws Exception {
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster m = cluster.getMaster();

    try (Table ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {
      assertTrue(m.getTableStateManager().isTableState(TABLENAME, TableState.State.ENABLED));
      TEST_UTIL.loadTable(ht, FAMILYNAME, false);
    }

    List<Pair<HRegionInfo, ServerName>> tableRegions =
        MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME);
    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
    assertEquals(1, tableRegions.size());
    assertArrayEquals(HConstants.EMPTY_START_ROW, tableRegions.get(0).getFirst().getStartKey());
    assertArrayEquals(HConstants.EMPTY_END_ROW, tableRegions.get(0).getFirst().getEndKey());

    // Now trigger a split and stop when the split is in progress
    LOG.info("Splitting table");
    TEST_UTIL.getAdmin().split(TABLENAME);
    LOG.info("Waiting for split result to be about to open");
    RegionStates regionStates = m.getAssignmentManager().getRegionStates();
    while (regionStates.getRegionsOfTable(TABLENAME).size() <= 1) {
      Thread.sleep(100);
    }
    LOG.info("Making sure we can call getTableRegions while opening");
    tableRegions =
        MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME, false);

    LOG.info("Regions: " + Joiner.on(',').join(tableRegions));
    // We have three regions because one is split-in-progress
    assertEquals(3, tableRegions.size());
    LOG.info("Making sure we can call getTableRegionClosest while opening");
    Pair<HRegionInfo, ServerName> pair = m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
    LOG.info("Result is: " + pair);
    Pair<HRegionInfo, ServerName> tableRegionFromName =
        MetaTableAccessor.getRegion(m.getConnection(), pair.getFirst().getRegionName());
    assertEquals(tableRegionFromName.getFirst(), pair.getFirst());
  }
  private void waitAndVerifyRegionNum(HMaster master, TableName tablename, int expectedRegionNum)
      throws Exception {
    List<Pair<HRegionInfo, ServerName>> tableRegionsInMeta;
    List<HRegionInfo> tableRegionsInMaster;
    long timeout = System.currentTimeMillis() + waitTime;
    while (System.currentTimeMillis() < timeout) {
      tableRegionsInMeta =
          MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
      tableRegionsInMaster =
          master.getAssignmentManager().getRegionStates().getRegionsOfTable(tablename);
      if (tableRegionsInMeta.size() == expectedRegionNum
          && tableRegionsInMaster.size() == expectedRegionNum) {
        break;
      }
      Thread.sleep(250);
    }

    tableRegionsInMeta =
        MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
    LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
    assertEquals(expectedRegionNum, tableRegionsInMeta.size());
  }
  @SuppressWarnings("deprecation")
  @Test
  public void testCleanMergeReference() throws Exception {
    LOG.info("Starting testCleanMergeReference");
    admin.enableCatalogJanitor(false);
    try {
      final TableName tableName = TableName.valueOf("testCleanMergeReference");
      // Create table and load data.
      Table table = createTableAndLoadData(master, tableName);
      // Merge 1st and 2nd region
      mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1, INITIAL_REGION_NUM - 1);
      verifyRowCount(table, ROWSIZE);
      table.close();

      List<Pair<HRegionInfo, ServerName>> tableRegions =
          MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName);
      HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
      HTableDescriptor tableDescritor = master.getTableDescriptors().get(tableName);
      Result mergedRegionResult =
          MetaTableAccessor.getRegionResult(
              master.getConnection(), mergedRegionInfo.getRegionName());

      // contains merge reference in META
      assertTrue(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER)
              != null);
      assertTrue(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER)
              != null);

      // merging regions' directory are in the file system all the same
      PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(mergedRegionResult);
      HRegionInfo regionA = p.getFirst();
      HRegionInfo regionB = p.getSecond();
      FileSystem fs = master.getMasterFileSystem().getFileSystem();
      Path rootDir = master.getMasterFileSystem().getRootDir();

      Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTable());
      Path regionAdir = new Path(tabledir, regionA.getEncodedName());
      Path regionBdir = new Path(tabledir, regionB.getEncodedName());
      assertTrue(fs.exists(regionAdir));
      assertTrue(fs.exists(regionBdir));

      admin.compactRegion(mergedRegionInfo.getRegionName());
      // wait until merged region doesn't have reference file
      long timeout = System.currentTimeMillis() + waitTime;
      HRegionFileSystem hrfs =
          new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo);
      while (System.currentTimeMillis() < timeout) {
        if (!hrfs.hasReferences(tableDescritor)) {
          break;
        }
        Thread.sleep(50);
      }
      assertFalse(hrfs.hasReferences(tableDescritor));

      // run CatalogJanitor to clean merge references in hbase:meta and archive the
      // files of merging regions
      int cleaned = admin.runCatalogScan();
      assertTrue(cleaned > 0);
      assertFalse(fs.exists(regionAdir));
      assertFalse(fs.exists(regionBdir));

      mergedRegionResult =
          MetaTableAccessor.getRegionResult(
              master.getConnection(), mergedRegionInfo.getRegionName());
      assertFalse(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER)
              != null);
      assertFalse(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER)
              != null);

    } finally {
      admin.enableCatalogJanitor(true);
    }
  }
Пример #7
0
  /**
   * Execute the core common portions of taking a snapshot. The {@link #snapshotRegions(List)} call
   * should get implemented for each snapshot flavor.
   */
  @Override
  @edu.umd.cs.findbugs.annotations.SuppressWarnings(
      value = "REC_CATCH_EXCEPTION",
      justification = "Intentional")
  public void process() {
    String msg =
        "Running "
            + snapshot.getType()
            + " table snapshot "
            + snapshot.getName()
            + " "
            + eventType
            + " on table "
            + snapshotTable;
    LOG.info(msg);
    status.setStatus(msg);
    try {
      // If regions move after this meta scan, the region specific snapshot should fail, triggering
      // an external exception that gets captured here.

      // write down the snapshot info in the working directory
      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
      snapshotManifest.addTableDescriptor(this.htd);
      monitor.rethrowException();

      List<Pair<HRegionInfo, ServerName>> regionsAndLocations;
      if (TableName.META_TABLE_NAME.equals(snapshotTable)) {
        regionsAndLocations =
            new MetaTableLocator().getMetaRegionsAndLocations(server.getZooKeeper());
      } else {
        regionsAndLocations =
            MetaTableAccessor.getTableRegionsAndLocations(
                server.getConnection(), snapshotTable, false);
      }

      // run the snapshot
      snapshotRegions(regionsAndLocations);
      monitor.rethrowException();

      // extract each pair to separate lists
      Set<String> serverNames = new HashSet<String>();
      for (Pair<HRegionInfo, ServerName> p : regionsAndLocations) {
        if (p != null && p.getFirst() != null && p.getSecond() != null) {
          HRegionInfo hri = p.getFirst();
          if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) continue;
          serverNames.add(p.getSecond().toString());
        }
      }

      // flush the in-memory state, and write the single manifest
      status.setStatus("Consolidate snapshot: " + snapshot.getName());
      snapshotManifest.consolidate();

      // verify the snapshot is valid
      status.setStatus("Verifying snapshot: " + snapshot.getName());
      verifier.verifySnapshot(this.workingDir, serverNames);

      // complete the snapshot, atomically moving from tmp to .snapshot dir.
      completeSnapshot(this.snapshotDir, this.workingDir, this.fs);
      msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
      status.markComplete(msg);
      LOG.info(msg);
      metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
    } catch (Exception e) { // FindBugs: REC_CATCH_EXCEPTION
      status.abort(
          "Failed to complete snapshot "
              + snapshot.getName()
              + " on table "
              + snapshotTable
              + " because "
              + e.getMessage());
      String reason =
          "Failed taking snapshot "
              + ClientSnapshotDescriptionUtils.toString(snapshot)
              + " due to exception:"
              + e.getMessage();
      LOG.error(reason, e);
      ForeignException ee = new ForeignException(reason, e);
      monitor.receive(ee);
      // need to mark this completed to close off and allow cleanup to happen.
      cancel(reason);
    } finally {
      LOG.debug("Launching cleanup of working dir:" + workingDir);
      try {
        // if the working dir is still present, the snapshot has failed.  it is present we delete
        // it.
        if (fs.exists(workingDir) && !this.fs.delete(workingDir, true)) {
          LOG.error("Couldn't delete snapshot working directory:" + workingDir);
        }
      } catch (IOException e) {
        LOG.error("Couldn't delete snapshot working directory:" + workingDir);
      }
      releaseTableLock();
    }
  }