private static Put addEmptyLocation(final Put p, int replicaId) {
   p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(replicaId), null);
   p.addImmutable(
       HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(replicaId), null);
   p.addImmutable(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(replicaId), null);
   return p;
 }
  public void prepareMutationsForMerge(
      HRegionInfo mergedRegion,
      HRegionInfo regionA,
      HRegionInfo regionB,
      ServerName serverName,
      List<Mutation> mutations,
      int regionReplication)
      throws IOException {
    HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);

    // Put for parent
    Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(copyOfMerged);
    putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, regionA.toByteArray());
    putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, regionB.toByteArray());
    mutations.add(putOfMerged);
    // Deletes for merging regions
    Delete deleteA = MetaTableAccessor.makeDeleteFromRegionInfo(regionA);
    Delete deleteB = MetaTableAccessor.makeDeleteFromRegionInfo(regionB);
    mutations.add(deleteA);
    mutations.add(deleteB);

    // Add empty locations for region replicas of the merged region so that number of replicas can
    // be cached whenever the primary region is looked up from meta
    for (int i = 1; i < regionReplication; i++) {
      addEmptyLocation(putOfMerged, i);
    }

    // The merged is a new region, openSeqNum = 1 is fine.
    addLocation(putOfMerged, serverName, 1);
  }
  private Table createTableAndLoadData(
      HMaster master, TableName tablename, int numRegions, int replication) throws Exception {
    assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions);
    byte[][] splitRows = new byte[numRegions - 1][];
    for (int i = 0; i < splitRows.length; i++) {
      splitRows[i] = ROWS[(i + 1) * ROWSIZE / numRegions];
    }

    Table table = TEST_UTIL.createTable(tablename, FAMILYNAME, splitRows);
    if (replication > 1) {
      HBaseTestingUtility.setReplicas(admin, tablename, replication);
    }
    loadData(table);
    verifyRowCount(table, ROWSIZE);

    // sleep here is an ugly hack to allow region transitions to finish
    long timeout = System.currentTimeMillis() + waitTime;
    List<Pair<HRegionInfo, ServerName>> tableRegions;
    while (System.currentTimeMillis() < timeout) {
      tableRegions =
          MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
      if (tableRegions.size() == numRegions * replication) break;
      Thread.sleep(250);
    }

    tableRegions = MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
    assertEquals(numRegions * replication, tableRegions.size());
    return table;
  }
 @Test
 public void testMergeWithReplicas() throws Exception {
   final TableName tableName = TableName.valueOf("testMergeWithReplicas");
   // Create table and load data.
   createTableAndLoadData(master, tableName, 5, 2);
   List<Pair<HRegionInfo, ServerName>> initialRegionToServers =
       MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName);
   // Merge 1st and 2nd region
   PairOfSameType<HRegionInfo> mergedRegions =
       mergeRegionsAndVerifyRegionNum(master, tableName, 0, 2, 5 * 2 - 2);
   List<Pair<HRegionInfo, ServerName>> currentRegionToServers =
       MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName);
   List<HRegionInfo> initialRegions = new ArrayList<HRegionInfo>();
   for (Pair<HRegionInfo, ServerName> p : initialRegionToServers) {
     initialRegions.add(p.getFirst());
   }
   List<HRegionInfo> currentRegions = new ArrayList<HRegionInfo>();
   for (Pair<HRegionInfo, ServerName> p : currentRegionToServers) {
     currentRegions.add(p.getFirst());
   }
   assertTrue(initialRegions.contains(mergedRegions.getFirst())); // this is the first region
   assertTrue(
       initialRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               mergedRegions.getFirst(), 1))); // this is the replica of the first region
   assertTrue(initialRegions.contains(mergedRegions.getSecond())); // this is the second region
   assertTrue(
       initialRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               mergedRegions.getSecond(), 1))); // this is the replica of the second region
   assertTrue(!initialRegions.contains(currentRegions.get(0))); // this is the new region
   assertTrue(
       !initialRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               currentRegions.get(0), 1))); // replica of the new region
   assertTrue(
       currentRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               currentRegions.get(0), 1))); // replica of the new region
   assertTrue(
       !currentRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               mergedRegions.getFirst(), 1))); // replica of the merged region
   assertTrue(
       !currentRegions.contains(
           RegionReplicaUtil.getRegionInfoForReplica(
               mergedRegions.getSecond(), 1))); // replica of the merged region
 }
示例#5
0
  /**
   * Get the HRegionInfo from cache, if not there, from the hbase:meta table. Be careful. Does RPC.
   * Do not hold a lock or synchronize when you call this method.
   *
   * @param regionName
   * @return HRegionInfo for the region
   */
  @SuppressWarnings("deprecation")
  protected HRegionInfo getRegionInfo(final byte[] regionName) {
    String encodedName = HRegionInfo.encodeRegionName(regionName);
    RegionState regionState = getRegionState(encodedName);
    if (regionState != null) {
      return regionState.getRegion();
    }

    try {
      Pair<HRegionInfo, ServerName> p =
          MetaTableAccessor.getRegion(server.getConnection(), regionName);
      HRegionInfo hri = p == null ? null : p.getFirst();
      if (hri != null) {
        createRegionState(hri);
      }
      return hri;
    } catch (IOException e) {
      server.abort(
          "Aborting because error occoured while reading "
              + Bytes.toStringBinary(regionName)
              + " from hbase:meta",
          e);
      return null;
    }
  }
 private PairOfSameType<HRegionInfo> requestMergeRegion(
     HMaster master, TableName tablename, int regionAnum, int regionBnum) throws Exception {
   List<Pair<HRegionInfo, ServerName>> tableRegions =
       MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
   HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
   HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
   TEST_UTIL
       .getHBaseAdmin()
       .mergeRegions(regionA.getEncodedNameAsBytes(), regionB.getEncodedNameAsBytes(), false);
   return new PairOfSameType<HRegionInfo>(regionA, regionB);
 }
  /**
   * Initialize the region assignment snapshot by scanning the hbase:meta table
   *
   * @throws IOException
   */
  public void initialize() throws IOException {
    LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot");
    // TODO: at some point this code could live in the MetaTableAccessor
    Visitor v =
        new Visitor() {
          @Override
          public boolean visit(Result result) throws IOException {
            try {
              if (result == null || result.isEmpty()) return true;
              RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
              if (rl == null) return true;
              HRegionInfo hri = rl.getRegionLocation(0).getRegionInfo();
              if (hri == null) return true;
              if (hri.getTable() == null) return true;
              if (disabledTables.contains(hri.getTable())) {
                return true;
              }
              // Are we to include split parents in the list?
              if (excludeOfflinedSplitParents && hri.isSplit()) return true;
              HRegionLocation[] hrls = rl.getRegionLocations();

              // Add the current assignment to the snapshot for all replicas
              for (int i = 0; i < hrls.length; i++) {
                if (hrls[i] == null) continue;
                hri = hrls[i].getRegionInfo();
                if (hri == null) continue;
                addAssignment(hri, hrls[i].getServerName());
                addRegion(hri);
              }

              // the code below is to handle favored nodes
              byte[] favoredNodes =
                  result.getValue(
                      HConstants.CATALOG_FAMILY,
                      FavoredNodeAssignmentHelper.FAVOREDNODES_QUALIFIER);
              if (favoredNodes == null) return true;
              // Add the favored nodes into assignment plan
              ServerName[] favoredServerList =
                  FavoredNodeAssignmentHelper.getFavoredNodesList(favoredNodes);
              // Add the favored nodes into assignment plan
              existingAssignmentPlan.updateFavoredNodesMap(hri, Arrays.asList(favoredServerList));
              return true;
            } catch (RuntimeException e) {
              LOG.error("Catche remote exception " + e.getMessage() + " when processing" + result);
              throw e;
            }
          }
        };
    // Scan hbase:meta to pick up user regions
    MetaTableAccessor.fullScan(hConnection, v);
    // regionToRegionServerMap = regions;
    LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot");
  }
示例#8
0
  @Test
  @SuppressWarnings("deprecation")
  public void testMasterOpsWhileSplitting() throws Exception {
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster m = cluster.getMaster();

    try (Table ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {
      assertTrue(m.getTableStateManager().isTableState(TABLENAME, TableState.State.ENABLED));
      TEST_UTIL.loadTable(ht, FAMILYNAME, false);
    }

    List<Pair<HRegionInfo, ServerName>> tableRegions =
        MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME);
    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
    assertEquals(1, tableRegions.size());
    assertArrayEquals(HConstants.EMPTY_START_ROW, tableRegions.get(0).getFirst().getStartKey());
    assertArrayEquals(HConstants.EMPTY_END_ROW, tableRegions.get(0).getFirst().getEndKey());

    // Now trigger a split and stop when the split is in progress
    LOG.info("Splitting table");
    TEST_UTIL.getAdmin().split(TABLENAME);
    LOG.info("Waiting for split result to be about to open");
    RegionStates regionStates = m.getAssignmentManager().getRegionStates();
    while (regionStates.getRegionsOfTable(TABLENAME).size() <= 1) {
      Thread.sleep(100);
    }
    LOG.info("Making sure we can call getTableRegions while opening");
    tableRegions =
        MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME, false);

    LOG.info("Regions: " + Joiner.on(',').join(tableRegions));
    // We have three regions because one is split-in-progress
    assertEquals(3, tableRegions.size());
    LOG.info("Making sure we can call getTableRegionClosest while opening");
    Pair<HRegionInfo, ServerName> pair = m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
    LOG.info("Result is: " + pair);
    Pair<HRegionInfo, ServerName> tableRegionFromName =
        MetaTableAccessor.getRegion(m.getConnection(), pair.getFirst().getRegionName());
    assertEquals(tableRegionFromName.getFirst(), pair.getFirst());
  }
  private void waitAndVerifyRegionNum(HMaster master, TableName tablename, int expectedRegionNum)
      throws Exception {
    List<Pair<HRegionInfo, ServerName>> tableRegionsInMeta;
    List<HRegionInfo> tableRegionsInMaster;
    long timeout = System.currentTimeMillis() + waitTime;
    while (System.currentTimeMillis() < timeout) {
      tableRegionsInMeta =
          MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
      tableRegionsInMaster =
          master.getAssignmentManager().getRegionStates().getRegionsOfTable(tablename);
      if (tableRegionsInMeta.size() == expectedRegionNum
          && tableRegionsInMaster.size() == expectedRegionNum) {
        break;
      }
      Thread.sleep(250);
    }

    tableRegionsInMeta =
        MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename);
    LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
    assertEquals(expectedRegionNum, tableRegionsInMeta.size());
  }
 private void mergeRegionsAndPutMetaEntries(
     HConnection hConnection,
     HRegionInfo mergedRegion,
     HRegionInfo regionA,
     HRegionInfo regionB,
     ServerName serverName,
     List<Mutation> metaEntries,
     int regionReplication)
     throws IOException {
   prepareMutationsForMerge(
       mergedRegion, regionA, regionB, serverName, metaEntries, regionReplication);
   MetaTableAccessor.mutateMetaTable(hConnection, metaEntries);
 }
 /**
  * Checks if the given region has merge qualifier in hbase:meta
  *
  * @param services
  * @param regionName name of specified region
  * @return true if the given region has merge qualifier in META.(It will be cleaned by
  *     CatalogJanitor)
  * @throws IOException
  */
 boolean hasMergeQualifierInMeta(final RegionServerServices services, final byte[] regionName)
     throws IOException {
   if (services == null) return false;
   // Get merge regions if it is a merged region and already has merge
   // qualifier
   Pair<HRegionInfo, HRegionInfo> mergeRegions =
       MetaTableAccessor.getRegionsFromMergeQualifier(services.getConnection(), regionName);
   if (mergeRegions != null
       && (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) {
     // It has merge qualifier
     return true;
   }
   return false;
 }
示例#12
0
 /**
  * This method does an RPC to hbase:meta. Do not call this method with a lock/synchronize held.
  *
  * @param hris The hris to check if empty in hbase:meta and if so, clean them up.
  */
 private void cleanIfNoMetaEntry(Set<HRegionInfo> hris) {
   if (hris.isEmpty()) return;
   for (HRegionInfo hri : hris) {
     try {
       // This is RPC to meta table. It is done while we have a synchronize on
       // regionstates. No progress will be made if meta is not available at this time.
       // This is a cleanup task. Not critical.
       if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes())
           == null) {
         regionOffline(hri);
         FSUtils.deleteRegionDir(server.getConfiguration(), hri);
       }
     } catch (IOException e) {
       LOG.warn("Got exception while deleting " + hri + " directories from file system.", e);
     }
   }
 }
示例#13
0
      @Override
      public void run() {
        while (!isStopped()) {
          try {
            List<HRegionInfo> regions =
                MetaScanner.listAllRegions(TEST_UTIL.getConfiguration(), connection, false);

            // select a random region
            HRegionInfo parent = regions.get(random.nextInt(regions.size()));
            if (parent == null || !TABLENAME.equals(parent.getTable())) {
              continue;
            }

            long startKey = 0, endKey = Long.MAX_VALUE;
            byte[] start = parent.getStartKey();
            byte[] end = parent.getEndKey();
            if (!Bytes.equals(HConstants.EMPTY_START_ROW, parent.getStartKey())) {
              startKey = Bytes.toLong(parent.getStartKey());
            }
            if (!Bytes.equals(HConstants.EMPTY_END_ROW, parent.getEndKey())) {
              endKey = Bytes.toLong(parent.getEndKey());
            }
            if (startKey == endKey) {
              continue;
            }

            long midKey =
                BigDecimal.valueOf(startKey)
                    .add(BigDecimal.valueOf(endKey))
                    .divideToIntegralValue(BigDecimal.valueOf(2))
                    .longValue();

            HRegionInfo splita = new HRegionInfo(TABLENAME, start, Bytes.toBytes(midKey));
            HRegionInfo splitb = new HRegionInfo(TABLENAME, Bytes.toBytes(midKey), end);

            MetaTableAccessor.splitRegion(
                connection, parent, splita, splitb, ServerName.valueOf("fooserver", 1, 0));

            Threads.sleep(random.nextInt(200));
          } catch (Throwable e) {
            ex = e;
            Assert.fail(StringUtils.stringifyException(e));
          }
        }
      }
  protected HRegionInfo createRegion(
      Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException {
    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
    HTableDescriptor htd = htbl.getTableDescriptor();
    HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

    LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
    Path rootDir = FSUtils.getRootDir(conf);
    FileSystem fs = rootDir.getFileSystem(conf);
    Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName());
    fs.mkdirs(p);
    Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
    FSDataOutputStream out = fs.create(riPath);
    out.write(hri.toDelimitedByteArray());
    out.close();

    // add to meta.
    MetaTableAccessor.addRegionToMeta(meta, hri);
    meta.close();
    return hri;
  }
示例#15
0
  /**
   * Restore or Clone the specified snapshot
   *
   * @param reqSnapshot
   * @param nonceGroup unique value to prevent duplicated RPC
   * @param nonce unique value to prevent duplicated RPC
   * @throws IOException
   */
  public long restoreOrCloneSnapshot(
      SnapshotDescription reqSnapshot, final long nonceGroup, final long nonce) throws IOException {
    FileSystem fs = master.getMasterFileSystem().getFileSystem();
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);

    // check if the snapshot exists
    if (!fs.exists(snapshotDir)) {
      LOG.error("A Snapshot named '" + reqSnapshot.getName() + "' does not exist.");
      throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(reqSnapshot));
    }

    // Get snapshot info from file system. The reqSnapshot is a "fake" snapshotInfo with
    // just the snapshot "name" and table name to restore. It does not contains the "real" snapshot
    // information.
    SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
    SnapshotManifest manifest =
        SnapshotManifest.open(master.getConfiguration(), fs, snapshotDir, snapshot);
    HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
    TableName tableName = TableName.valueOf(reqSnapshot.getTable());

    // stop tracking "abandoned" handlers
    cleanupSentinels();

    // Verify snapshot validity
    SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);

    // Execute the restore/clone operation
    long procId;
    if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
      procId =
          restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceGroup, nonce);
    } else {
      procId =
          cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceGroup, nonce);
    }
    return procId;
  }
示例#16
0
  /** Removes the table from hbase:meta and archives the HDFS files. */
  protected void removeTableData(final List<HRegionInfo> regions)
      throws IOException, CoordinatedStateException {
    try {
      // 1. Remove regions from META
      LOG.debug("Deleting regions from META");
      MetaTableAccessor.deleteRegions(this.server.getConnection(), regions);

      // -----------------------------------------------------------------------
      // NOTE: At this point we still have data on disk, but nothing in hbase:meta
      //       if the rename below fails, hbck will report an inconsistency.
      // -----------------------------------------------------------------------

      // 2. Move the table in /hbase/.tmp
      MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
      Path tempTableDir = mfs.moveTableToTemp(tableName);

      // 3. Archive regions from FS (temp directory)
      FileSystem fs = mfs.getFileSystem();
      for (HRegionInfo hri : regions) {
        LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
        HFileArchiver.archiveRegion(
            fs,
            mfs.getRootDir(),
            tempTableDir,
            HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
      }

      // 4. Delete table directory from FS (temp directory)
      if (!fs.delete(tempTableDir, true)) {
        LOG.error("Couldn't delete " + tempTableDir);
      }

      LOG.debug("Table '" + tableName + "' archived!");
    } finally {
      cleanupTableState();
    }
  }
示例#17
0
  /**
   * Execute the core common portions of taking a snapshot. The {@link #snapshotRegions(List)} call
   * should get implemented for each snapshot flavor.
   */
  @Override
  @edu.umd.cs.findbugs.annotations.SuppressWarnings(
      value = "REC_CATCH_EXCEPTION",
      justification = "Intentional")
  public void process() {
    String msg =
        "Running "
            + snapshot.getType()
            + " table snapshot "
            + snapshot.getName()
            + " "
            + eventType
            + " on table "
            + snapshotTable;
    LOG.info(msg);
    status.setStatus(msg);
    try {
      // If regions move after this meta scan, the region specific snapshot should fail, triggering
      // an external exception that gets captured here.

      // write down the snapshot info in the working directory
      SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
      snapshotManifest.addTableDescriptor(this.htd);
      monitor.rethrowException();

      List<Pair<HRegionInfo, ServerName>> regionsAndLocations;
      if (TableName.META_TABLE_NAME.equals(snapshotTable)) {
        regionsAndLocations =
            new MetaTableLocator().getMetaRegionsAndLocations(server.getZooKeeper());
      } else {
        regionsAndLocations =
            MetaTableAccessor.getTableRegionsAndLocations(
                server.getConnection(), snapshotTable, false);
      }

      // run the snapshot
      snapshotRegions(regionsAndLocations);
      monitor.rethrowException();

      // extract each pair to separate lists
      Set<String> serverNames = new HashSet<String>();
      for (Pair<HRegionInfo, ServerName> p : regionsAndLocations) {
        if (p != null && p.getFirst() != null && p.getSecond() != null) {
          HRegionInfo hri = p.getFirst();
          if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) continue;
          serverNames.add(p.getSecond().toString());
        }
      }

      // flush the in-memory state, and write the single manifest
      status.setStatus("Consolidate snapshot: " + snapshot.getName());
      snapshotManifest.consolidate();

      // verify the snapshot is valid
      status.setStatus("Verifying snapshot: " + snapshot.getName());
      verifier.verifySnapshot(this.workingDir, serverNames);

      // complete the snapshot, atomically moving from tmp to .snapshot dir.
      completeSnapshot(this.snapshotDir, this.workingDir, this.fs);
      msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
      status.markComplete(msg);
      LOG.info(msg);
      metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
    } catch (Exception e) { // FindBugs: REC_CATCH_EXCEPTION
      status.abort(
          "Failed to complete snapshot "
              + snapshot.getName()
              + " on table "
              + snapshotTable
              + " because "
              + e.getMessage());
      String reason =
          "Failed taking snapshot "
              + ClientSnapshotDescriptionUtils.toString(snapshot)
              + " due to exception:"
              + e.getMessage();
      LOG.error(reason, e);
      ForeignException ee = new ForeignException(reason, e);
      monitor.receive(ee);
      // need to mark this completed to close off and allow cleanup to happen.
      cancel(reason);
    } finally {
      LOG.debug("Launching cleanup of working dir:" + workingDir);
      try {
        // if the working dir is still present, the snapshot has failed.  it is present we delete
        // it.
        if (fs.exists(workingDir) && !this.fs.delete(workingDir, true)) {
          LOG.error("Couldn't delete snapshot working directory:" + workingDir);
        }
      } catch (IOException e) {
        LOG.error("Couldn't delete snapshot working directory:" + workingDir);
      }
      releaseTableLock();
    }
  }
 /**
  * Dumps hbase:meta table info
  *
  * @return # of entries in meta.
  */
 protected int scanMeta() throws IOException {
   LOG.info("Scanning META");
   MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection());
   return MetaTableAccessor.fullScanRegions(TEST_UTIL.getConnection()).size();
 }
  @SuppressWarnings("deprecation")
  @Test
  public void testCleanMergeReference() throws Exception {
    LOG.info("Starting testCleanMergeReference");
    admin.enableCatalogJanitor(false);
    try {
      final TableName tableName = TableName.valueOf("testCleanMergeReference");
      // Create table and load data.
      Table table = createTableAndLoadData(master, tableName);
      // Merge 1st and 2nd region
      mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1, INITIAL_REGION_NUM - 1);
      verifyRowCount(table, ROWSIZE);
      table.close();

      List<Pair<HRegionInfo, ServerName>> tableRegions =
          MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName);
      HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
      HTableDescriptor tableDescritor = master.getTableDescriptors().get(tableName);
      Result mergedRegionResult =
          MetaTableAccessor.getRegionResult(
              master.getConnection(), mergedRegionInfo.getRegionName());

      // contains merge reference in META
      assertTrue(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER)
              != null);
      assertTrue(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER)
              != null);

      // merging regions' directory are in the file system all the same
      PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(mergedRegionResult);
      HRegionInfo regionA = p.getFirst();
      HRegionInfo regionB = p.getSecond();
      FileSystem fs = master.getMasterFileSystem().getFileSystem();
      Path rootDir = master.getMasterFileSystem().getRootDir();

      Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTable());
      Path regionAdir = new Path(tabledir, regionA.getEncodedName());
      Path regionBdir = new Path(tabledir, regionB.getEncodedName());
      assertTrue(fs.exists(regionAdir));
      assertTrue(fs.exists(regionBdir));

      admin.compactRegion(mergedRegionInfo.getRegionName());
      // wait until merged region doesn't have reference file
      long timeout = System.currentTimeMillis() + waitTime;
      HRegionFileSystem hrfs =
          new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo);
      while (System.currentTimeMillis() < timeout) {
        if (!hrfs.hasReferences(tableDescritor)) {
          break;
        }
        Thread.sleep(50);
      }
      assertFalse(hrfs.hasReferences(tableDescritor));

      // run CatalogJanitor to clean merge references in hbase:meta and archive the
      // files of merging regions
      int cleaned = admin.runCatalogScan();
      assertTrue(cleaned > 0);
      assertFalse(fs.exists(regionAdir));
      assertFalse(fs.exists(regionBdir));

      mergedRegionResult =
          MetaTableAccessor.getRegionResult(
              master.getConnection(), mergedRegionInfo.getRegionName());
      assertFalse(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER)
              != null);
      assertFalse(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER)
              != null);

    } finally {
      admin.enableCatalogJanitor(true);
    }
  }
  /**
   * Prepare the merged region and region files.
   *
   * @param server Hosting server instance. Can be null when testing
   * @param services Used to online/offline regions.
   * @return merged region
   * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server,
   *     RegionServerServices)}
   */
  HRegion createMergedRegion(final Server server, final RegionServerServices services)
      throws IOException {
    LOG.info(
        "Starting merge of "
            + region_a
            + " and "
            + region_b.getRegionNameAsString()
            + ", forcible="
            + forcible);
    if ((server != null && server.isStopped()) || (services != null && services.isStopping())) {
      throw new IOException("Server is stopped or stopping");
    }

    if (rsCoprocessorHost != null) {
      if (rsCoprocessorHost.preMerge(this.region_a, this.region_b)) {
        throw new IOException(
            "Coprocessor bypassing regions " + this.region_a + " " + this.region_b + " merge.");
      }
    }

    // If true, no cluster to write meta edits to or to use coordination.
    boolean testing =
        server == null
            ? true
            : server.getConfiguration().getBoolean("hbase.testing.nocluster", false);

    HRegion mergedRegion = stepsBeforePONR(server, services, testing);

    @MetaMutationAnnotation List<Mutation> metaEntries = new ArrayList<Mutation>();
    if (rsCoprocessorHost != null) {
      if (rsCoprocessorHost.preMergeCommit(this.region_a, this.region_b, metaEntries)) {
        throw new IOException(
            "Coprocessor bypassing regions " + this.region_a + " " + this.region_b + " merge.");
      }
      try {
        for (Mutation p : metaEntries) {
          HRegionInfo.parseRegionName(p.getRow());
        }
      } catch (IOException e) {
        LOG.error(
            "Row key of mutation from coprocessor is not parsable as region name."
                + "Mutations from coprocessor should only be for hbase:meta table.",
            e);
        throw e;
      }
    }

    // This is the point of no return. Similar with SplitTransaction.
    // IF we reach the PONR then subsequent failures need to crash out this
    // regionserver
    this.journal.add(JournalEntry.PONR);

    // Add merged region and delete region_a and region_b
    // as an atomic update. See HBASE-7721. This update to hbase:meta makes the region
    // will determine whether the region is merged or not in case of failures.
    // If it is successful, master will roll-forward, if not, master will
    // rollback
    if (!testing && useCoordinationForAssignment) {
      if (metaEntries.isEmpty()) {
        MetaTableAccessor.mergeRegions(
            server.getConnection(),
            mergedRegion.getRegionInfo(),
            region_a.getRegionInfo(),
            region_b.getRegionInfo(),
            server.getServerName(),
            region_a.getTableDesc().getRegionReplication());
      } else {
        mergeRegionsAndPutMetaEntries(
            server.getConnection(),
            mergedRegion.getRegionInfo(),
            region_a.getRegionInfo(),
            region_b.getRegionInfo(),
            server.getServerName(),
            metaEntries,
            region_a.getTableDesc().getRegionReplication());
      }
    } else if (services != null && !useCoordinationForAssignment) {
      if (!services.reportRegionStateTransition(
          TransitionCode.MERGE_PONR,
          mergedRegionInfo,
          region_a.getRegionInfo(),
          region_b.getRegionInfo())) {
        // Passed PONR, let SSH clean it up
        throw new IOException(
            "Failed to notify master that merge passed PONR: "
                + region_a.getRegionInfo().getRegionNameAsString()
                + " and "
                + region_b.getRegionInfo().getRegionNameAsString());
      }
    }
    return mergedRegion;
  }