예제 #1
0
 /** Take a snapshot of the specified disabled region */
 protected void snapshotDisabledRegion(final HRegionInfo regionInfo) throws IOException {
   snapshotManifest.addRegion(FSUtils.getTableDir(rootDir, snapshotTable), regionInfo);
   monitor.rethrowException();
   status.setStatus(
       "Completed referencing HFiles for offline region "
           + regionInfo.toString()
           + " of table: "
           + snapshotTable);
 }
  protected HRegionInfo createRegion(
      Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException {
    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
    HTableDescriptor htd = htbl.getTableDescriptor();
    HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

    LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
    Path rootDir = FSUtils.getRootDir(conf);
    FileSystem fs = rootDir.getFileSystem(conf);
    Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName());
    fs.mkdirs(p);
    Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
    FSDataOutputStream out = fs.create(riPath);
    out.write(hri.toDelimitedByteArray());
    out.close();

    // add to meta.
    MetaTableAccessor.addRegionToMeta(meta, hri);
    meta.close();
    return hri;
  }
  protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey)
      throws IOException {

    LOG.info("Before delete:");
    HTableDescriptor htd = tbl.getTableDescriptor();
    dumpMeta(htd);

    List<HRegionLocation> regions;
    try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
      regions = rl.getAllRegionLocations();
    }

    for (HRegionLocation e : regions) {
      HRegionInfo hri = e.getRegionInfo();
      ServerName hsa = e.getServerName();
      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
          && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {

        LOG.info("RegionName: " + hri.getRegionNameAsString());
        byte[] deleteRow = hri.getRegionName();
        TEST_UTIL.getHBaseAdmin().unassign(deleteRow, true);

        LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
        Path rootDir = FSUtils.getRootDir(conf);
        FileSystem fs = rootDir.getFileSystem(conf);
        Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
        fs.delete(p, true);

        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
          Delete delete = new Delete(deleteRow);
          meta.delete(delete);
        }
      }
      LOG.info(hri.toString() + hsa.toString());
    }

    TEST_UTIL.getMetaTableRows(htd.getTableName());
    LOG.info("After delete:");
    dumpMeta(htd);
  }
  private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId)
      throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());

    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId);

    HRegionFileSystem fs =
        new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info);
    final Configuration walConf = new Configuration(conf);
    FSUtils.setRootDir(walConf, tableDir);
    final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
    HRegion region =
        new HRegion(
            fs,
            wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()),
            conf,
            htd,
            null);

    region.initialize();

    return region;
  }
  @SuppressWarnings("deprecation")
  @Test
  public void testCleanMergeReference() throws Exception {
    LOG.info("Starting testCleanMergeReference");
    admin.enableCatalogJanitor(false);
    try {
      final TableName tableName = TableName.valueOf("testCleanMergeReference");
      // Create table and load data.
      Table table = createTableAndLoadData(master, tableName);
      // Merge 1st and 2nd region
      mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1, INITIAL_REGION_NUM - 1);
      verifyRowCount(table, ROWSIZE);
      table.close();

      List<Pair<HRegionInfo, ServerName>> tableRegions =
          MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tableName);
      HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
      HTableDescriptor tableDescritor = master.getTableDescriptors().get(tableName);
      Result mergedRegionResult =
          MetaTableAccessor.getRegionResult(
              master.getConnection(), mergedRegionInfo.getRegionName());

      // contains merge reference in META
      assertTrue(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER)
              != null);
      assertTrue(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER)
              != null);

      // merging regions' directory are in the file system all the same
      PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(mergedRegionResult);
      HRegionInfo regionA = p.getFirst();
      HRegionInfo regionB = p.getSecond();
      FileSystem fs = master.getMasterFileSystem().getFileSystem();
      Path rootDir = master.getMasterFileSystem().getRootDir();

      Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTable());
      Path regionAdir = new Path(tabledir, regionA.getEncodedName());
      Path regionBdir = new Path(tabledir, regionB.getEncodedName());
      assertTrue(fs.exists(regionAdir));
      assertTrue(fs.exists(regionBdir));

      admin.compactRegion(mergedRegionInfo.getRegionName());
      // wait until merged region doesn't have reference file
      long timeout = System.currentTimeMillis() + waitTime;
      HRegionFileSystem hrfs =
          new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo);
      while (System.currentTimeMillis() < timeout) {
        if (!hrfs.hasReferences(tableDescritor)) {
          break;
        }
        Thread.sleep(50);
      }
      assertFalse(hrfs.hasReferences(tableDescritor));

      // run CatalogJanitor to clean merge references in hbase:meta and archive the
      // files of merging regions
      int cleaned = admin.runCatalogScan();
      assertTrue(cleaned > 0);
      assertFalse(fs.exists(regionAdir));
      assertFalse(fs.exists(regionBdir));

      mergedRegionResult =
          MetaTableAccessor.getRegionResult(
              master.getConnection(), mergedRegionInfo.getRegionName());
      assertFalse(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER)
              != null);
      assertFalse(
          mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER)
              != null);

    } finally {
      admin.enableCatalogJanitor(true);
    }
  }
예제 #6
0
  /**
   * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and
   * excluded from minor compaction. Without the fix of HBASE-6901, an
   * ArrayIndexOutOfBoundsException will be thrown.
   */
  @Ignore("Flakey: See HBASE-9051")
  @Test
  public void testExcludeAllFromMinorCompaction() throws Exception {
    Configuration conf = util.getConfiguration();
    conf.setInt("hbase.hstore.compaction.min", 2);
    generateRandomStartKeys(5);

    util.startMiniCluster();
    try (Connection conn = ConnectionFactory.createConnection();
        Admin admin = conn.getAdmin();
        Table table = util.createTable(TABLE_NAME, FAMILIES);
        RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) {
      final FileSystem fs = util.getDFSCluster().getFileSystem();
      assertEquals("Should start with empty table", 0, util.countRows(table));

      // deep inspection: get the StoreFile dir
      final Path storePath =
          new Path(
              FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
              new Path(
                  admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
                  Bytes.toString(FAMILIES[0])));
      assertEquals(0, fs.listStatus(storePath).length);

      // Generate two bulk load files
      conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);

      for (int i = 0; i < 2; i++) {
        Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
        runIncrementalPELoad(
            conf, table.getTableDescriptor(), conn.getRegionLocator(TABLE_NAME), testDir);
        // Perform the actual load
        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
      }

      // Ensure data shows up
      int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));

      // should have a second StoreFile now
      assertEquals(2, fs.listStatus(storePath).length);

      // minor compactions shouldn't get rid of the file
      admin.compact(TABLE_NAME);
      try {
        quickPoll(
            new Callable<Boolean>() {
              @Override
              public Boolean call() throws Exception {
                return fs.listStatus(storePath).length == 1;
              }
            },
            5000);
        throw new IOException("SF# = " + fs.listStatus(storePath).length);
      } catch (AssertionError ae) {
        // this is expected behavior
      }

      // a major compaction should work though
      admin.majorCompact(TABLE_NAME);
      quickPoll(
          new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

    } finally {
      util.shutdownMiniCluster();
    }
  }
예제 #7
0
  @Test
  public void testExcludeMinorCompaction() throws Exception {
    Configuration conf = util.getConfiguration();
    conf.setInt("hbase.hstore.compaction.min", 2);
    generateRandomStartKeys(5);

    try {
      util.startMiniCluster();
      Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
      final FileSystem fs = util.getDFSCluster().getFileSystem();
      Admin admin = util.getHBaseAdmin();
      HTable table = util.createTable(TABLE_NAME, FAMILIES);
      assertEquals("Should start with empty table", 0, util.countRows(table));

      // deep inspection: get the StoreFile dir
      final Path storePath =
          HStore.getStoreHomedir(
              FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
              admin.getTableRegions(TABLE_NAME).get(0),
              FAMILIES[0]);
      assertEquals(0, fs.listStatus(storePath).length);

      // put some data in it and flush to create a storefile
      Put p = new Put(Bytes.toBytes("test"));
      p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
      table.put(p);
      admin.flush(TABLE_NAME);
      assertEquals(1, util.countRows(table));
      quickPoll(
          new Callable<Boolean>() {
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

      // Generate a bulk load file with more rows
      conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);
      util.startMiniMapReduceCluster();
      runIncrementalPELoad(conf, table, testDir);

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows + 1,
          util.countRows(table));

      // should have a second StoreFile now
      assertEquals(2, fs.listStatus(storePath).length);

      // minor compactions shouldn't get rid of the file
      admin.compact(TABLE_NAME);
      try {
        quickPoll(
            new Callable<Boolean>() {
              public Boolean call() throws Exception {
                return fs.listStatus(storePath).length == 1;
              }
            },
            5000);
        throw new IOException("SF# = " + fs.listStatus(storePath).length);
      } catch (AssertionError ae) {
        // this is expected behavior
      }

      // a major compaction should work though
      admin.majorCompact(TABLE_NAME);
      quickPoll(
          new Callable<Boolean>() {
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

    } finally {
      util.shutdownMiniMapReduceCluster();
      util.shutdownMiniCluster();
    }
  }
예제 #8
0
 /**
  * Get the path to the table archive directory based on the configured archive directory.
  *
  * <p>Assumed that the table should already be archived.
  *
  * @param conf {@link Configuration} to read the archive directory property. Can be null
  * @param tableName Name of the table to be archived. Cannot be null.
  * @return {@link Path} to the archive directory for the table
  */
 public static Path getTableArchivePath(final Configuration conf, final TableName tableName)
     throws IOException {
   return FSUtils.getTableDir(getArchivePath(conf), tableName);
 }
예제 #9
0
 /**
  * Get the path to the table archive directory based on the configured archive directory.
  *
  * <p>Get the path to the table's archive directory.
  *
  * <p>Generally of the form: /hbase/.archive/[tablename]
  *
  * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
  *     the archive path)
  * @param tableName Name of the table to be archived. Cannot be null.
  * @return {@link Path} to the archive directory for the table
  */
 public static Path getTableArchivePath(final Path rootdir, final TableName tableName) {
   return FSUtils.getTableDir(getArchivePath(rootdir), tableName);
 }