private HTable createTable(byte[] tableName, byte[][] columnFamilies) throws Exception { HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); for (byte[] family : columnFamilies) { HColumnDescriptor columnDesc = new HColumnDescriptor(family); columnDesc.setMaxVersions(Integer.MAX_VALUE); desc.addFamily(columnDesc); } desc.addCoprocessor(TransactionProcessor.class.getName()); hBaseAdmin.createTable(desc); testUtil.waitTableAvailable(tableName, 5000); return new HTable(testUtil.getConfiguration(), tableName); }
@Test(timeout = 300000) public void testClusterRestart() throws Exception { UTIL.startMiniCluster(3); while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { Threads.sleep(1); } LOG.info("\n\nCreating tables"); for (byte[] TABLE : TABLES) { UTIL.createTable(TABLE, FAMILY); } for (byte[] TABLE : TABLES) { UTIL.waitTableEnabled(TABLE); } List<HRegionInfo> allRegions = MetaScanner.listAllRegions(UTIL.getConfiguration(), true); assertEquals(4, allRegions.size()); LOG.info("\n\nShutting down cluster"); UTIL.shutdownMiniHBaseCluster(); LOG.info("\n\nSleeping a bit"); Thread.sleep(2000); LOG.info("\n\nStarting cluster the second time"); UTIL.restartHBaseCluster(3); // Need to use a new 'Configuration' so we make a new HConnection. // Otherwise we're reusing an HConnection that has gone stale because // the shutdown of the cluster also called shut of the connection. allRegions = MetaScanner.listAllRegions(new Configuration(UTIL.getConfiguration()), true); assertEquals(4, allRegions.size()); LOG.info("\n\nWaiting for tables to be available"); for (byte[] TABLE : TABLES) { try { UTIL.createTable(TABLE, FAMILY); assertTrue("Able to create table that should already exist", false); } catch (TableExistsException tee) { LOG.info("Table already exists as expected"); } UTIL.waitTableAvailable(TABLE); } }
private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality) throws Exception { util = new HBaseTestingUtility(); Configuration conf = util.getConfiguration(); conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality); int hostCount = 1; int regionNum = 5; if (shouldKeepLocality) { // We should change host count higher than hdfs replica count when MiniHBaseCluster supports // explicit hostnames parameter just like MiniDFSCluster does. hostCount = 3; regionNum = 20; } byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1); String[] hostnames = new String[hostCount]; for (int i = 0; i < hostCount; ++i) { hostnames[i] = "datanode_" + i; } util.startMiniCluster(1, hostCount, hostnames); Table table = util.createTable(TABLE_NAME, FAMILIES, splitKeys); Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); try (RegionLocator r = util.getConnection().getRegionLocator(TABLE_NAME); Admin admin = util.getConnection().getAdmin(); ) { assertEquals("Should start with empty table", 0, util.countRows(table)); int numRegions = r.getStartKeys().length; assertEquals("Should make " + regionNum + " regions", numRegions, regionNum); // Generate the bulk load files runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir); // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", 0, util.countRows(table)); // Make sure that a directory was created for every CF int dir = 0; for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) { for (byte[] family : FAMILIES) { if (Bytes.toString(family).equals(f.getPath().getName())) { ++dir; } } } assertEquals("Column family not found in FS.", FAMILIES.length, dir); // handle the split case if (shouldChangeRegions) { LOG.info("Changing regions in table"); admin.disableTable(table.getName()); while (util.getMiniHBaseCluster() .getMaster() .getAssignmentManager() .getRegionStates() .isRegionsInTransition()) { Threads.sleep(200); LOG.info("Waiting on table to finish disabling"); } util.deleteTable(table.getName()); byte[][] newSplitKeys = generateRandomSplitKeys(14); table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys); while (util.getConnection().getRegionLocator(TABLE_NAME).getAllRegionLocations().size() != 15 || !admin.isTableAvailable(table.getName())) { Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } } // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r); // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals( "LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table)); Scan scan = new Scan(); ResultScanner results = table.getScanner(scan); for (Result res : results) { assertEquals(FAMILIES.length, res.rawCells().length); Cell first = res.rawCells()[0]; for (Cell kv : res.rawCells()) { assertTrue(CellUtil.matchingRow(first, kv)); assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv))); } } results.close(); String tableDigestBefore = util.checksumRows(table); // Check region locality HDFSBlocksDistribution hbd = new HDFSBlocksDistribution(); for (HRegion region : util.getHBaseCluster().getRegions(TABLE_NAME)) { hbd.add(region.getHDFSBlocksDistribution()); } for (String hostname : hostnames) { float locality = hbd.getBlockLocalityIndex(hostname); LOG.info("locality of [" + hostname + "]: " + locality); assertEquals(100, (int) (locality * 100)); } // Cause regions to reopen admin.disableTable(TABLE_NAME); while (!admin.isTableDisabled(TABLE_NAME)) { Thread.sleep(200); LOG.info("Waiting for table to disable"); } admin.enableTable(TABLE_NAME); util.waitTableAvailable(TABLE_NAME); assertEquals( "Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table)); } finally { testDir.getFileSystem(conf).delete(testDir, true); util.deleteTable(TABLE_NAME); util.shutdownMiniCluster(); } }
private void doIncrementalLoadTest(boolean shouldChangeRegions) throws Exception { util = new HBaseTestingUtility(); Configuration conf = util.getConfiguration(); byte[][] splitKeys = generateRandomSplitKeys(4); util.startMiniCluster(); try { HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys); Admin admin = table.getConnection().getAdmin(); Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); assertEquals("Should start with empty table", 0, util.countRows(table)); int numRegions = -1; try (RegionLocator r = table.getRegionLocator()) { numRegions = r.getStartKeys().length; } assertEquals("Should make 5 regions", numRegions, 5); // Generate the bulk load files util.startMiniMapReduceCluster(); runIncrementalPELoad(conf, table.getTableDescriptor(), table.getRegionLocator(), testDir); // This doesn't write into the table, just makes files assertEquals("HFOF should not touch actual table", 0, util.countRows(table)); // Make sure that a directory was created for every CF int dir = 0; for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) { for (byte[] family : FAMILIES) { if (Bytes.toString(family).equals(f.getPath().getName())) { ++dir; } } } assertEquals("Column family not found in FS.", FAMILIES.length, dir); // handle the split case if (shouldChangeRegions) { LOG.info("Changing regions in table"); admin.disableTable(table.getName()); while (util.getMiniHBaseCluster() .getMaster() .getAssignmentManager() .getRegionStates() .isRegionsInTransition()) { Threads.sleep(200); LOG.info("Waiting on table to finish disabling"); } util.deleteTable(table.getName()); byte[][] newSplitKeys = generateRandomSplitKeys(14); table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys); while (table.getRegionLocator().getAllRegionLocations().size() != 15 || !admin.isTableAvailable(table.getName())) { Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } } // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table); // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals( "LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table)); Scan scan = new Scan(); ResultScanner results = table.getScanner(scan); for (Result res : results) { assertEquals(FAMILIES.length, res.rawCells().length); Cell first = res.rawCells()[0]; for (Cell kv : res.rawCells()) { assertTrue(CellUtil.matchingRow(first, kv)); assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv))); } } results.close(); String tableDigestBefore = util.checksumRows(table); // Cause regions to reopen admin.disableTable(TABLE_NAME); while (!admin.isTableDisabled(TABLE_NAME)) { Thread.sleep(200); LOG.info("Waiting for table to disable"); } admin.enableTable(TABLE_NAME); util.waitTableAvailable(TABLE_NAME); assertEquals( "Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table)); } finally { util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } }
@BeforeClass public static void beforeClass() throws Exception { conf = UTIL.getConfiguration(); enableSecurity(conf); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] {"supergroup"}); // Users with global permissions USER_GLOBAL_ADMIN = User.createUserForTesting(conf, "global_admin", new String[0]); USER_GLOBAL_CREATE = User.createUserForTesting(conf, "global_create", new String[0]); USER_GLOBAL_WRITE = User.createUserForTesting(conf, "global_write", new String[0]); USER_GLOBAL_READ = User.createUserForTesting(conf, "global_read", new String[0]); USER_GLOBAL_EXEC = User.createUserForTesting(conf, "global_exec", new String[0]); USER_NS_ADMIN = User.createUserForTesting(conf, "namespace_admin", new String[0]); USER_NS_CREATE = User.createUserForTesting(conf, "namespace_create", new String[0]); USER_NS_WRITE = User.createUserForTesting(conf, "namespace_write", new String[0]); USER_NS_READ = User.createUserForTesting(conf, "namespace_read", new String[0]); USER_NS_EXEC = User.createUserForTesting(conf, "namespace_exec", new String[0]); USER_TABLE_CREATE = User.createUserForTesting(conf, "table_create", new String[0]); USER_TABLE_WRITE = User.createUserForTesting(conf, "table_write", new String[0]); USER_GROUP_ADMIN = User.createUserForTesting(conf, "user_group_admin", new String[] {GROUP_ADMIN}); USER_GROUP_NS_ADMIN = User.createUserForTesting(conf, "user_group_ns_admin", new String[] {GROUP_NS_ADMIN}); USER_GROUP_CREATE = User.createUserForTesting(conf, "user_group_create", new String[] {GROUP_CREATE}); USER_GROUP_READ = User.createUserForTesting(conf, "user_group_read", new String[] {GROUP_READ}); USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] {GROUP_WRITE}); // TODO: other table perms UTIL.startMiniCluster(); // Wait for the ACL table to become available UTIL.waitTableAvailable(AccessControlLists.ACL_TABLE_NAME.getName(), 30 * 1000); ACCESS_CONTROLLER = (AccessController) UTIL.getMiniHBaseCluster() .getMaster() .getRegionServerCoprocessorHost() .findCoprocessor(AccessController.class.getName()); UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(TEST_NAMESPACE).build()); UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(TEST_NAMESPACE2).build()); // grants on global grantGlobal(UTIL, USER_GLOBAL_ADMIN.getShortName(), Permission.Action.ADMIN); grantGlobal(UTIL, USER_GLOBAL_CREATE.getShortName(), Permission.Action.CREATE); grantGlobal(UTIL, USER_GLOBAL_WRITE.getShortName(), Permission.Action.WRITE); grantGlobal(UTIL, USER_GLOBAL_READ.getShortName(), Permission.Action.READ); grantGlobal(UTIL, USER_GLOBAL_EXEC.getShortName(), Permission.Action.EXEC); // grants on namespace grantOnNamespace(UTIL, USER_NS_ADMIN.getShortName(), TEST_NAMESPACE, Permission.Action.ADMIN); grantOnNamespace(UTIL, USER_NS_CREATE.getShortName(), TEST_NAMESPACE, Permission.Action.CREATE); grantOnNamespace(UTIL, USER_NS_WRITE.getShortName(), TEST_NAMESPACE, Permission.Action.WRITE); grantOnNamespace(UTIL, USER_NS_READ.getShortName(), TEST_NAMESPACE, Permission.Action.READ); grantOnNamespace(UTIL, USER_NS_EXEC.getShortName(), TEST_NAMESPACE, Permission.Action.EXEC); grantOnNamespace(UTIL, toGroupEntry(GROUP_NS_ADMIN), TEST_NAMESPACE, Permission.Action.ADMIN); grantOnNamespace(UTIL, USER_NS_ADMIN.getShortName(), TEST_NAMESPACE2, Permission.Action.ADMIN); grantGlobal(UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN); grantGlobal(UTIL, toGroupEntry(GROUP_CREATE), Permission.Action.CREATE); grantGlobal(UTIL, toGroupEntry(GROUP_READ), Permission.Action.READ); grantGlobal(UTIL, toGroupEntry(GROUP_WRITE), Permission.Action.WRITE); }