public Map<String, Long> getRegionSizes(String tableName) { Map<String, Long> regions = new HashMap<>(); try { final Table table = connection.getTable(TableName.valueOf(tableName)); RegionLocator regionLocator = connection.getRegionLocator(table.getName()); List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations(); Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR); for (HRegionLocation regionInfo : tableRegionInfos) { tableRegions.add(regionInfo.getRegionInfo().getRegionName()); } ClusterStatus clusterStatus = connection.getAdmin().getClusterStatus(); Collection<ServerName> servers = clusterStatus.getServers(); final long megaByte = 1024L * 1024L; for (ServerName serverName : servers) { ServerLoad serverLoad = clusterStatus.getLoad(serverName); for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) { byte[] regionId = regionLoad.getName(); if (tableRegions.contains(regionId)) { long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte; regions.put(regionLoad.getNameAsString(), regionSizeBytes); } } } } catch (IOException e) { e.printStackTrace(); } return regions; }
/** * Get the split row keys of table * * @param tableName table name * @return array of split row keys * @throws IOException */ private byte[][] getTableSplitRowKeys(TableName tableName) throws IOException { try (RegionLocator locator = connection.getRegionLocator(tableName); ) { byte[][] startKeys = locator.getStartKeys(); if (startKeys.length == 1) { return null; } byte[][] splits = new byte[startKeys.length - 1][]; for (int i = 1; i < startKeys.length; i++) { splits[i - 1] = startKeys[i]; } return splits; } }
public void manualTest(String args[]) throws Exception { Configuration conf = HBaseConfiguration.create(); util = new HBaseTestingUtility(conf); if ("newtable".equals(args[0])) { TableName tname = TableName.valueOf(args[1]); byte[][] splitKeys = generateRandomSplitKeys(4); try (Table table = util.createTable(tname, FAMILIES, splitKeys)) {} } else if ("incremental".equals(args[0])) { TableName tname = TableName.valueOf(args[1]); try (Connection c = ConnectionFactory.createConnection(conf); Admin admin = c.getAdmin(); RegionLocator regionLocator = c.getRegionLocator(tname)) { Path outDir = new Path("incremental-out"); runIncrementalPELoad(conf, admin.getTableDescriptor(tname), regionLocator, outDir); } } else { throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental"); } }
protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey) throws IOException { LOG.info("Before delete:"); HTableDescriptor htd = tbl.getTableDescriptor(); dumpMeta(htd); List<HRegionLocation> regions; try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) { regions = rl.getAllRegionLocations(); } for (HRegionLocation e : regions) { HRegionInfo hri = e.getRegionInfo(); ServerName hsa = e.getServerName(); if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 && Bytes.compareTo(hri.getEndKey(), endKey) == 0) { LOG.info("RegionName: " + hri.getRegionNameAsString()); byte[] deleteRow = hri.getRegionName(); TEST_UTIL.getHBaseAdmin().unassign(deleteRow, true); LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName()); fs.delete(p, true); try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) { Delete delete = new Delete(deleteRow); meta.delete(delete); } } LOG.info(hri.toString() + hsa.toString()); } TEST_UTIL.getMetaTableRows(htd.getTableName()); LOG.info("After delete:"); dumpMeta(htd); }
/** * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and * excluded from minor compaction. Without the fix of HBASE-6901, an * ArrayIndexOutOfBoundsException will be thrown. */ @Ignore("Flakey: See HBASE-9051") @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(); Admin admin = conn.getAdmin(); Table table = util.createTable(TABLE_NAME, FAMILIES); RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) { final FileSystem fs = util.getDFSCluster().getFileSystem(); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir final Path storePath = new Path( FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), new Path( admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // Generate two bulk load files conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); runIncrementalPELoad( conf, table.getTableDescriptor(), conn.getRegionLocator(TABLE_NAME), testDir); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator); } // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals( "LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); // minor compactions shouldn't get rid of the file admin.compact(TABLE_NAME); try { quickPoll( new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); throw new IOException("SF# = " + fs.listStatus(storePath).length); } catch (AssertionError ae) { // this is expected behavior } // a major compaction should work though admin.majorCompact(TABLE_NAME); quickPoll( new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); } finally { util.shutdownMiniCluster(); } }