Example #1
0
  /**
   * Write out an info file under the region directory. Useful recovering mangled regions.
   *
   * @param regionInfoContent serialized version of the {@link HRegionInfo}
   * @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
   */
  private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final boolean useTempDir)
      throws IOException {
    Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
    if (useTempDir) {
      // Create in tmpDir and then move into place in case we crash after
      // create but before close. If we don't successfully close the file,
      // subsequent region reopens will fail the below because create is
      // registered in NN.

      // And then create the file
      Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);

      // If datanode crashes or if the RS goes down just before the close is called while trying to
      // close the created regioninfo file in the .tmp directory then on next
      // creation we will be getting AlreadyCreatedException.
      // Hence delete and create the file if exists.
      if (FSUtils.isExists(fs, tmpPath)) {
        FSUtils.delete(fs, tmpPath, true);
      }

      // Write HRI to a file in case we need to recover hbase:meta
      writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);

      // Move the created file to the original path
      if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
        throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
      }
    } else {
      // Write HRI to a file in case we need to recover hbase:meta
      writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
    }
  }
Example #2
0
 /**
  * Get the directory to archive a store directory
  *
  * @param conf {@link Configuration} to read for the archive directory name.
  * @param region parent region information under which the store currently lives
  * @param tabledir directory for the table under which the store currently lives
  * @param family name of the family in the store
  * @return {@link Path} to the directory to archive the given store or <tt>null</tt> if it should
  *     not be archived
  */
 public static Path getStoreArchivePath(
     Configuration conf, HRegionInfo region, Path tabledir, byte[] family) throws IOException {
   TableName tableName = FSUtils.getTableName(tabledir);
   Path rootDir = FSUtils.getRootDir(conf);
   Path tableArchiveDir = getTableArchivePath(rootDir, tableName);
   return HStore.getStoreHomedir(tableArchiveDir, region, family);
 }
 static boolean shouldShowAppendWarning(Configuration conf) {
   try {
     return !FSUtils.isAppendSupported(conf) && FSUtils.isHDFS(conf);
   } catch (IOException e) {
     LOG.warn("Unable to determine if append is supported", e);
     return false;
   }
 }
 /** Deletes a table's directory from the file system if exists. Used in unit tests. */
 public static void deleteTableDescriptorIfExists(String tableName, Configuration conf)
     throws IOException {
   FileSystem fs = FSUtils.getCurrentFileSystem(conf);
   FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
   // The below deleteDirectory works for either file or directory.
   if (status != null && fs.exists(status.getPath())) {
     FSUtils.deleteDirectory(fs, status.getPath());
   }
 }
Example #5
0
 @Test
 public void testFsUriSetProperly() throws Exception {
   HMaster master = UTIL.getMiniHBaseCluster().getMaster();
   MasterFileSystem fs = master.getMasterFileSystem();
   Path masterRoot = FSUtils.getRootDir(fs.conf);
   Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf());
   // make sure the fs and the found root dir have the same scheme
   LOG.debug("from fs uri:" + FileSystem.getDefaultUri(fs.getFileSystem().getConf()));
   LOG.debug("from configuration uri:" + FileSystem.getDefaultUri(fs.conf));
   // make sure the set uri matches by forcing it.
   assertEquals(masterRoot, rootDir);
 }
 /**
  * Checks to see if the file system is still accessible.
  * If not, sets closed
  * @return false if file system is not available
  */
 public boolean checkFileSystem() {
   if (this.fsOk) {
     try {
       FSUtils.checkFileSystemAvailable(this.fs);
       FSUtils.checkDfsSafeMode(this.conf);
     } catch (IOException e) {
       master.abort("Shutting down HBase cluster: file system not available", e);
       this.fsOk = false;
     }
   }
   return this.fsOk;
 }
 /**
  * @param fs
  * @param hTableDescriptor
  * @param tableDir
  * @param status
  * @return Descriptor file or null if we failed write.
  * @throws IOException
  */
 private static Path writeTableDescriptor(
     final FileSystem fs,
     final HTableDescriptor hTableDescriptor,
     final Path tableDir,
     final FileStatus status)
     throws IOException {
   // Get temporary dir into which we'll first write a file to avoid
   // half-written file phenomeon.
   Path tmpTableDir = new Path(tableDir, ".tmp");
   // What is current sequenceid?  We read the current sequenceid from
   // the current file.  After we read it, another thread could come in and
   // compete with us writing out next version of file.  The below retries
   // should help in this case some but its hard to do guarantees in face of
   // concurrent schema edits.
   int currentSequenceid = status == null ? 0 : getTableInfoSequenceid(status.getPath());
   int sequenceid = currentSequenceid;
   // Put arbitrary upperbound on how often we retry
   int retries = 10;
   int retrymax = currentSequenceid + retries;
   Path tableInfoPath = null;
   do {
     sequenceid += 1;
     Path p = getTableInfoFileName(tmpTableDir, sequenceid);
     if (fs.exists(p)) {
       LOG.debug(p + " exists; retrying up to " + retries + " times");
       continue;
     }
     try {
       writeHTD(fs, p, hTableDescriptor);
       tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
       if (!fs.rename(p, tableInfoPath)) {
         throw new IOException("Failed rename of " + p + " to " + tableInfoPath);
       }
     } catch (IOException ioe) {
       // Presume clash of names or something; go around again.
       LOG.debug("Failed write and/or rename; retrying", ioe);
       if (!FSUtils.deleteDirectory(fs, p)) {
         LOG.warn("Failed cleanup of " + p);
       }
       tableInfoPath = null;
       continue;
     }
     // Cleanup old schema file.
     if (status != null) {
       if (!FSUtils.deleteDirectory(fs, status.getPath())) {
         LOG.warn("Failed delete of " + status.getPath() + "; continuing");
       }
     }
     break;
   } while (sequenceid < retrymax);
   return tableInfoPath;
 }
Example #8
0
  /**
   * Remove the region from the table directory, archiving the region's hfiles.
   *
   * @param conf the {@link Configuration} to use
   * @param fs {@link FileSystem} from which to remove the region
   * @param tableDir {@link Path} to where the table is being stored
   * @param regionInfo {@link HRegionInfo} for region to be deleted
   * @throws IOException if the request cannot be completed
   */
  public static void deleteRegionFromFileSystem(
      final Configuration conf,
      final FileSystem fs,
      final Path tableDir,
      final HRegionInfo regionInfo)
      throws IOException {
    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
    Path regionDir = regionFs.getRegionDir();

    if (!fs.exists(regionDir)) {
      LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
      return;
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug("DELETING region " + regionDir);
    }

    // Archive region
    Path rootDir = FSUtils.getRootDir(conf);
    HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);

    // Delete empty region dir
    if (!fs.delete(regionDir, true)) {
      LOG.warn("Failed delete of " + regionDir);
    }
  }
 /**
  * Looks under the table directory in the filesystem for files with a {@link #TABLEINFO_NAME}
  * prefix. Returns reference to the 'latest' instance.
  *
  * @param fs
  * @param tabledir
  * @return The 'current' tableinfo file.
  * @throws IOException
  */
 public static FileStatus getTableInfoPath(final FileSystem fs, final Path tabledir)
     throws IOException {
   FileStatus[] status =
       FSUtils.listStatus(
           fs,
           tabledir,
           new PathFilter() {
             @Override
             public boolean accept(Path p) {
               // Accept any file that starts with TABLEINFO_NAME
               return p.getName().startsWith(TABLEINFO_NAME);
             }
           });
   if (status == null || status.length < 1) return null;
   Arrays.sort(status, new FileStatusFileNameComparator());
   if (status.length > 1) {
     // Clean away old versions of .tableinfo
     for (int i = 1; i < status.length; i++) {
       Path p = status[i].getPath();
       // Clean up old versions
       if (!fs.delete(p, false)) {
         LOG.warn("Failed cleanup of " + status);
       } else {
         LOG.debug("Cleaned up old tableinfo file " + p);
       }
     }
   }
   return status[0];
 }
  /**
   * Verify the region placement is consistent with the assignment plan
   *
   * @param isDetailMode
   * @return reports
   * @throws IOException
   */
  public List<AssignmentVerificationReport> verifyRegionPlacement(boolean isDetailMode)
      throws IOException {
    System.out.println(
        "Start to verify the region assignment and " + "generate the verification report");
    // Get the region assignment snapshot
    SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();

    // Get all the tables
    Set<TableName> tables = snapshot.getTableSet();

    // Get the region locality map
    Map<String, Map<String, Float>> regionLocalityMap = null;
    if (this.enforceLocality == true) {
      regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
    }
    List<AssignmentVerificationReport> reports = new ArrayList<AssignmentVerificationReport>();
    // Iterate all the tables to fill up the verification report
    for (TableName table : tables) {
      if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) {
        continue;
      }
      AssignmentVerificationReport report = new AssignmentVerificationReport();
      report.fillUp(table, snapshot, regionLocalityMap);
      report.print(isDetailMode);
      reports.add(report);
    }
    return reports;
  }
  /**
   * Spin up a bunch of threads and have them all append to a WAL. Roll the WAL frequently to try
   * and trigger NPE.
   *
   * @throws IOException
   * @throws InterruptedException
   */
  @Test
  public void testContendedLogRolling() throws IOException, InterruptedException {
    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
    Path dir = TEST_UTIL.getDataTestDir();
    // The implementation needs to know the 'handler' count.
    TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, THREAD_COUNT);
    final Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
    FSUtils.setRootDir(conf, dir);
    final WALFactory wals = new WALFactory(conf, null, TestLogRollingNoCluster.class.getName());
    final WAL wal = wals.getWAL(new byte[] {});

    Appender[] appenders = null;

    final int count = THREAD_COUNT;
    appenders = new Appender[count];
    try {
      for (int i = 0; i < count; i++) {
        // Have each appending thread write 'count' entries
        appenders[i] = new Appender(wal, i, count);
      }
      for (int i = 0; i < count; i++) {
        appenders[i].start();
      }
      for (int i = 0; i < count; i++) {
        // ensure that all threads are joined before closing the wal
        appenders[i].join();
      }
    } finally {
      wals.close();
    }
    for (int i = 0; i < count; i++) {
      assertFalse(appenders[i].isException());
    }
  }
Example #12
0
  /**
   * Assert that getSplitEditFilesSorted returns files in expected order and that it skips
   * moved-aside files.
   *
   * @throws IOException
   */
  @Test
  public void testGetSplitEditFilesSorted() throws IOException {
    FileSystem fs = FileSystem.get(util.getConfiguration());
    Path regiondir = util.getDataTestDir("regiondir");
    fs.delete(regiondir, true);
    fs.mkdirs(regiondir);
    Path recoverededits = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
    String first = WALSplitter.formatRecoveredEditsFileName(-1);
    createFile(fs, recoverededits, first);
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(0));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(1));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(11));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(2));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(50));
    String last = WALSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE);
    createFile(fs, recoverededits, last);
    createFile(
        fs, recoverededits, Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis());

    final Configuration walConf = new Configuration(util.getConfiguration());
    FSUtils.setRootDir(walConf, regiondir);
    (new WALFactory(walConf, null, "dummyLogName")).getWAL(new byte[] {}, null);

    NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir);
    assertEquals(7, files.size());
    assertEquals(files.pollFirst().getName(), first);
    assertEquals(files.pollLast().getName(), last);
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(0));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(1));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(2));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(11));
  }
Example #13
0
 /** Write the .regioninfo file on-disk. */
 private static void writeRegionInfoFileContent(
     final Configuration conf,
     final FileSystem fs,
     final Path regionInfoFile,
     final byte[] content)
     throws IOException {
   // First check to get the permissions
   FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
   // Write the RegionInfo file content
   FSDataOutputStream out = FSUtils.create(fs, regionInfoFile, perms, null);
   try {
     out.write(content);
   } finally {
     out.close();
   }
 }
  /**
   * Inspect the log directory to recover any log file without
   * an active region server.
   */
  void splitLogAfterStartup() {
    boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
        HLog.SPLIT_SKIP_ERRORS_DEFAULT);
    Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
    do {
      if (master.isStopped()) {
        LOG.warn("Master stopped while splitting logs");
        break;
      }
      List<ServerName> serverNames = new ArrayList<ServerName>();
      try {
        if (!this.fs.exists(logsDirPath)) return;
        FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
        // Get online servers after getting log folders to avoid log folder deletion of newly
        // checked in region servers . see HBASE-5916
        Set<ServerName> onlineServers = ((HMaster) master).getServerManager().getOnlineServers()
            .keySet();

        if (logFolders == null || logFolders.length == 0) {
          LOG.debug("No log files to split, proceeding...");
          return;
        }
        for (FileStatus status : logFolders) {
          String sn = status.getPath().getName();
          // truncate splitting suffix if present (for ServerName parsing)
          if (sn.endsWith(HLog.SPLITTING_EXT)) {
            sn = sn.substring(0, sn.length() - HLog.SPLITTING_EXT.length());
          }
          ServerName serverName = ServerName.parseServerName(sn);
          if (!onlineServers.contains(serverName)) {
            LOG.info("Log folder " + status.getPath() + " doesn't belong "
                + "to a known region server, splitting");
            serverNames.add(serverName);
          } else {
            LOG.info("Log folder " + status.getPath()
                + " belongs to an existing region server");
          }
        }
        splitLog(serverNames);
        retrySplitting = false;
      } catch (IOException ioe) {
        LOG.warn("Failed splitting of " + serverNames, ioe);
        if (!checkFileSystem()) {
          LOG.warn("Bad Filesystem, exiting");
          Runtime.getRuntime().halt(1);
        }
        try {
          if (retrySplitting) {
            Thread.sleep(conf.getInt(
              "hbase.hlog.split.failure.retry.interval", 30 * 1000));
          }
        } catch (InterruptedException e) {
          LOG.warn("Interrupted, aborting since cannot return w/o splitting");
          Thread.currentThread().interrupt();
          retrySplitting = false;
          Runtime.getRuntime().halt(1);
        }
      }
    } while (retrySplitting);
  }
Example #15
0
 @VisibleForTesting
 void assertReferenceFileCount(int expectedReferenceFileCount, Path dir) throws IOException {
   if (expectedReferenceFileCount != 0
       && expectedReferenceFileCount
           != FSUtils.getRegionReferenceFileCount(parent.getFilesystem(), dir)) {
     throw new IOException("Failing split. Expected reference file count isn't equal.");
   }
 }
Example #16
0
 /** Returns the Hdfs size of the given region in bytes. */
 public long getHdfsSize(HRegionInfo info) throws IOException {
   Path tableDir =
       HTableDescriptor.getTableDir(
           FSUtils.getRootDir(hbaseConf_), Bytes.toBytes(hbaseTableName_));
   FileSystem fs = tableDir.getFileSystem(hbaseConf_);
   Path regionDir = tableDir.suffix("/" + info.getEncodedName());
   return fs.getContentSummary(regionDir).getLength();
 }
 /**
  * Update table descriptor
  *
  * @param fs
  * @param conf
  * @param hTableDescriptor
  * @return New tableinfo or null if we failed update.
  * @throws IOException Thrown if failed update.
  */
 static Path updateHTableDescriptor(FileSystem fs, Path rootdir, HTableDescriptor hTableDescriptor)
     throws IOException {
   Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
   Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir, getTableInfoPath(fs, tableDir));
   if (p == null) throw new IOException("Failed update");
   LOG.info("Updated tableinfo=" + p);
   return p;
 }
 static TableDescriptorModtime getTableDescriptorModtime(
     FileSystem fs, Path hbaseRootDir, String tableName) throws NullPointerException, IOException {
   // ignore both -ROOT- and .META. tables
   if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
       || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
     return null;
   }
   return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName));
 }
 private Map<String, Integer> getFragmentationInfo(HMaster master, Configuration conf)
     throws IOException {
   boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
   if (showFragmentation) {
     return FSUtils.getTableFragmentation(master);
   } else {
     return null;
   }
 }
 /** Take a snapshot of the specified disabled region */
 protected void snapshotDisabledRegion(final HRegionInfo regionInfo) throws IOException {
   snapshotManifest.addRegion(FSUtils.getTableDir(rootDir, snapshotTable), regionInfo);
   monitor.rethrowException();
   status.setStatus(
       "Completed referencing HFiles for offline region "
           + regionInfo.toString()
           + " of table: "
           + snapshotTable);
 }
Example #21
0
  private List<StoreFileInfo> getStoreFiles(Path storeDir) throws IOException {
    FileStatus[] stats = FSUtils.listStatus(fs, storeDir);
    if (stats == null) return null;

    ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(stats.length);
    for (int i = 0; i < stats.length; ++i) {
      storeFiles.add(new StoreFileInfo(conf, fs, stats[i]));
    }
    return storeFiles;
  }
Example #22
0
  /** Creates, flushes, and closes a new region. */
  public static HRegion createHDFSRegionDir(
      Configuration conf, HRegionInfo hri, HTableDescriptor htd) throws IOException {
    // Create HRegion
    Path root = FSUtils.getRootDir(conf);
    HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);

    // Close the new region to flush to disk. Close log file too.
    region.close();
    return region;
  }
Example #23
0
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    CONF = TEST_UTIL.getConfiguration();
    TEST_UTIL.startMiniDFSCluster(1);

    CLUSTER = TEST_UTIL.getDFSCluster();
    FS = CLUSTER.getFileSystem();
    DIR = TEST_UTIL.getDataTestDirOnTestFS("TestDurability");
    FSUtils.setRootDir(CONF, DIR);
  }
Example #24
0
  /**
   * @return the set of families present on disk
   * @throws IOException
   */
  public Collection<String> getFamilies() throws IOException {
    FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
    if (fds == null) return null;

    ArrayList<String> families = new ArrayList<String>(fds.length);
    for (FileStatus status : fds) {
      families.add(status.getPath().getName());
    }

    return families;
  }
Example #25
0
 /**
  * Returns true if the specified family has reference files
  *
  * @param familyName Column Family Name
  * @return true if family contains reference files
  * @throws IOException
  */
 public boolean hasReferences(final String familyName) throws IOException {
   FileStatus[] files =
       FSUtils.listStatus(
           fs,
           getStoreDir(familyName),
           new PathFilter() {
             public boolean accept(Path path) {
               return StoreFileInfo.isReference(path);
             }
           });
   return files != null && files.length > 0;
 }
  protected HRegionInfo createRegion(
      Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException {
    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
    HTableDescriptor htd = htbl.getTableDescriptor();
    HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

    LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
    Path rootDir = FSUtils.getRootDir(conf);
    FileSystem fs = rootDir.getFileSystem(conf);
    Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName());
    fs.mkdirs(p);
    Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
    FSDataOutputStream out = fs.create(riPath);
    out.write(hri.toDelimitedByteArray());
    out.close();

    // add to meta.
    MetaTableAccessor.addRegionToMeta(meta, hri);
    meta.close();
    return hri;
  }
Example #27
0
 public void consolidate() throws IOException {
   if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) {
     Path rootDir = FSUtils.getRootDir(conf);
     LOG.info("Using old Snapshot Format");
     // write a copy of descriptor to the snapshot directory
     new FSTableDescriptors(conf, fs, rootDir)
         .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(htd), false);
   } else {
     LOG.debug("Convert to Single Snapshot Manifest");
     convertToV2SingleManifest();
   }
 }
  /**
   * Make sure the hbase temp directory exists and is empty.
   * NOTE that this method is only executed once just after the master becomes the active one.
   */
  private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
      throws IOException {
    // If the temp directory exists, clear the content (left over, from the previous run)
    if (fs.exists(tmpdir)) {
      // Archive table in temp, maybe left over from failed deletion,
      // if not the cleaner will take care of them.
      for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
        for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
          HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
        }
      }
      if (!fs.delete(tmpdir, true)) {
        throw new IOException("Unable to clean the temp directory: " + tmpdir);
      }
    }

    // Create the temp directory
    if (!fs.mkdirs(tmpdir)) {
      throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
    }
  }
  protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey)
      throws IOException {

    LOG.info("Before delete:");
    HTableDescriptor htd = tbl.getTableDescriptor();
    dumpMeta(htd);

    List<HRegionLocation> regions;
    try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
      regions = rl.getAllRegionLocations();
    }

    for (HRegionLocation e : regions) {
      HRegionInfo hri = e.getRegionInfo();
      ServerName hsa = e.getServerName();
      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
          && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {

        LOG.info("RegionName: " + hri.getRegionNameAsString());
        byte[] deleteRow = hri.getRegionName();
        TEST_UTIL.getHBaseAdmin().unassign(deleteRow, true);

        LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
        Path rootDir = FSUtils.getRootDir(conf);
        FileSystem fs = rootDir.getFileSystem(conf);
        Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
        fs.delete(p, true);

        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
          Delete delete = new Delete(deleteRow);
          meta.delete(delete);
        }
      }
      LOG.info(hri.toString() + hsa.toString());
    }

    TEST_UTIL.getMetaTableRows(htd.getTableName());
    LOG.info("After delete:");
    dumpMeta(htd);
  }
 @Test
 public void testReadingArchiveDirectoryFromFS() throws IOException {
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   try {
     new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration()))
         .get(HConstants.HFILE_ARCHIVE_DIRECTORY);
     fail("Shouldn't be able to read a table descriptor for the archive directory.");
   } catch (IOException e) {
     LOG.debug(
         "Correctly got error when reading a table descriptor from the archive directory: "
             + e.getMessage());
   }
 }