@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6); TEST_UTIL.startMiniCluster(3); }
/** * Test snapshotting a table that is online without flushing * * @throws Exception */ @Test public void testSkipFlushTableSnapshot() throws Exception { // make sure we don't fail on listing snapshots SnapshotTestingUtils.assertNoSnapshots(admin); // put some stuff in the table Table table = UTIL.getConnection().getTable(TABLE_NAME); UTIL.loadTable(table, TEST_FAM); UTIL.flush(TABLE_NAME); LOG.debug("FS state before snapshot:"); UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); // take a snapshot of the enabled table String snapshotString = "skipFlushTableSnapshot"; byte[] snapshot = Bytes.toBytes(snapshotString); admin.snapshot(snapshotString, TABLE_NAME, SnapshotType.SKIPFLUSH); LOG.debug("Snapshot completed."); // make sure we have the snapshot List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot LOG.debug("FS state after snapshot:"); UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); SnapshotTestingUtils.confirmSnapshotValid( UTIL, ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM); admin.deleteSnapshot(snapshot); snapshots = admin.listSnapshots(); SnapshotTestingUtils.assertNoSnapshots(admin); }
@Test(timeout = 30000) public void testInfo() { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master); assertEquals(master.getSplitPlanCount(), info.getSplitPlanCount(), 0); assertEquals(master.getMergePlanCount(), info.getMergePlanCount(), 0); assertEquals(master.getAverageLoad(), info.getAverageLoad(), 0); assertEquals(master.getClusterId(), info.getClusterId()); assertEquals(master.getMasterActiveTime(), info.getActiveTime()); assertEquals(master.getMasterStartTime(), info.getStartTime()); assertEquals(master.getMasterCoprocessors().length, info.getCoprocessors().length); assertEquals( master.getServerManager().getOnlineServersList().size(), info.getNumRegionServers()); assertEquals(5, info.getNumRegionServers()); String zkServers = info.getZookeeperQuorum(); assertEquals(zkServers.split(",").length, TEST_UTIL.getZkCluster().getZooKeeperServerNum()); final int index = 3; LOG.info("Stopping " + TEST_UTIL.getMiniHBaseCluster().getRegionServer(index)); TEST_UTIL.getMiniHBaseCluster().stopRegionServer(index, false); TEST_UTIL.getMiniHBaseCluster().waitOnRegionServer(index); // We stopped the regionserver but could take a while for the master to notice it so hang here // until it does... then move forward to see if metrics wrapper notices. while (TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() != 4) { Threads.sleep(10); } assertEquals(4, info.getNumRegionServers()); assertEquals(1, info.getNumDeadRegionServers()); assertEquals(1, info.getNumWALFiles()); }
@Test public void testAppendHook() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { Append app = new Append(Bytes.toBytes(0)); app.add(A, A, A); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"}, tableName, new Boolean[] {false, false, false}); table.append(app); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"}, tableName, new Boolean[] {true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
private void runIncrementalPELoad( Configuration conf, HTableDescriptor tableDescriptor, RegionLocator regionLocator, Path outDir) throws IOException, UnsupportedEncodingException, InterruptedException, ClassNotFoundException { Job job = new Job(conf, "testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad")); job.getConfiguration() .setStrings( "io.serializations", conf.get("io.serializations"), MutationSerialization.class.getName(), ResultSerialization.class.getName(), KeyValueSerialization.class.getName()); setupRandomGeneratorMapper(job); HFileOutputFormat2.configureIncrementalLoad(job, tableDescriptor, regionLocator); FileOutputFormat.setOutputPath(job, outDir); assertFalse(util.getTestFileSystem().exists(outDir)); assertEquals(regionLocator.getAllRegionLocations().size(), job.getNumReduceTasks()); assertTrue(job.waitForCompletion(true)); }
@Before public void setup() throws Exception { conf = TEST_UTIL.getConfiguration(); fs = FileSystem.get(conf); logFile = new Path(TEST_UTIL.getDataTestDir(), "test.log"); writeTestLog(logFile); }
@Test public void testIncrementHook() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { Increment inc = new Increment(Bytes.toBytes(0)); inc.addColumn(A, A, 1); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"}, tableName, new Boolean[] {false, false, false}); table.increment(inc); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"}, tableName, new Boolean[] {true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
@Test(timeout = 30000) public void testCreateDeleteTable() throws IOException { // Create table then get the single region for our new table. HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable"); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); Table table = HTU.createTable(hdt, new byte[][] {f}, HTU.getConfiguration()); Put p = new Put(row); p.add(f, row, row); table.put(p); Get g = new Get(row); Result r = table.get(g); Assert.assertFalse(r.isStale()); try { // But if we ask for stale we will get it SlowMeCopro.cdl.set(new CountDownLatch(1)); g = new Get(row); g.setConsistency(Consistency.TIMELINE); r = table.get(g); Assert.assertTrue(r.isStale()); SlowMeCopro.cdl.get().countDown(); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } HTU.getHBaseAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); }
private void verify(TableName tableName) throws IOException { Table table = UTIL.getConnection().getTable(tableName); boolean verified = false; long pause = UTIL.getConfiguration().getLong("hbase.client.pause", 5 * 1000); int numRetries = UTIL.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); for (int i = 0; i < numRetries; i++) { try { LOG.info("Verification attempt #" + i); verifyAttempt(table); verified = true; break; } catch (NullPointerException e) { // If here, a cell was empty. Presume its because updates came in // after the scanner had been opened. Wait a while and retry. LOG.debug("Verification attempt failed: " + e.getMessage()); } try { Thread.sleep(pause); } catch (InterruptedException e) { // continue } } assertTrue(verified); table.close(); }
@Test(timeout = 300000) public void testEnableDisableAddColumnDeleteColumn() throws Exception { ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); TableName tableName = TableName.valueOf("testMasterAdmin"); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); while (!ZKTableStateClientSideReader.isEnabledTable( zkw, TableName.valueOf("testMasterAdmin"))) { Thread.sleep(10); } this.admin.disableTable(tableName); try { new HTable(TEST_UTIL.getConfiguration(), tableName); } catch (org.apache.hadoop.hbase.DoNotRetryIOException e) { // expected } this.admin.addColumn(tableName, new HColumnDescriptor("col2")); this.admin.enableTable(tableName); try { this.admin.deleteColumn(tableName, Bytes.toBytes("col2")); } catch (TableNotDisabledException e) { LOG.info(e); } this.admin.disableTable(tableName); this.admin.deleteTable(tableName); }
/* * Add to each of the regions in .META. a value. Key is the startrow of the * region (except its 'aaa' for first region). Actual value is the row name. * @param expected * @return * @throws IOException */ private static int addToEachStartKey(final int expected) throws IOException { HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); HTable meta = new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); int rows = 0; Scan scan = new Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); ResultScanner s = meta.getScanner(scan); for (Result r = null; (r = s.next()) != null; ) { byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); if (b == null || b.length <= 0) break; HRegionInfo hri = Writables.getHRegionInfo(b); // If start key, add 'aaa'. byte[] row = getStartKey(hri); Put p = new Put(row); p.setWriteToWAL(false); p.add(getTestFamily(), getTestQualifier(), row); t.put(p); rows++; } s.close(); Assert.assertEquals(expected, rows); t.close(); meta.close(); return rows; }
@Test public void testCountReferencesFailsSplit() throws IOException { final int rowcount = TEST_UTIL.loadRegion(this.parent, CF); assertTrue(rowcount > 0); int parentRowCount = TEST_UTIL.countRows(this.parent); assertEquals(rowcount, parentRowCount); // Start transaction. HRegion spiedRegion = spy(this.parent); SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(spiedRegion); SplitTransactionImpl spiedUponSt = spy(st); doThrow(new IOException("Failing split. Expected reference file count isn't equal.")) .when(spiedUponSt) .assertReferenceFileCount( anyInt(), eq( new Path( this.parent.getRegionFileSystem().getTableDir(), st.getSecondDaughter().getEncodedName()))); // Run the execute. Look at what it returns. boolean expectedException = false; Server mockServer = Mockito.mock(Server.class); when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); try { spiedUponSt.execute(mockServer, null); } catch (IOException e) { expectedException = true; } assertTrue(expectedException); }
@Test(timeout = 60000) public void testExceptionDuringInitialization() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast. conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, ""); TEST_UTIL.startMiniCluster(2); try { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); // Trigger one regionserver to fail as if it came up with a coprocessor // that fails during initialization final HRegionServer regionServer = cluster.getRegionServer(0); conf.set( CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, FailedInitializationObserver.class.getName()); regionServer .getRegionServerCoprocessorHost() .loadSystemCoprocessors(conf, CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); TEST_UTIL.waitFor( 10000, 1000, new Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return regionServer.isAborted(); } }); } finally { TEST_UTIL.shutdownMiniCluster(); } }
@Test public void testInvalidColumnFamily() throws IOException, InterruptedException { byte[] table = Bytes.toBytes("testInvalidColumnFamily"); byte[] family = Bytes.toBytes("family"); byte[] fakecf = Bytes.toBytes("fakecf"); boolean caughtMinorCompact = false; boolean caughtMajorCompact = false; Table ht = null; try { ht = TEST_UTIL.createTable(table, family); HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); try { admin.compact(table, fakecf); } catch (IOException ioe) { caughtMinorCompact = true; } try { admin.majorCompact(table, fakecf); } catch (IOException ioe) { caughtMajorCompact = true; } } finally { if (ht != null) { TEST_UTIL.deleteTable(table); } assertTrue(caughtMinorCompact); assertTrue(caughtMajorCompact); } }
/** * Assert that getSplitEditFilesSorted returns files in expected order and that it skips * moved-aside files. * * @throws IOException */ @Test public void testGetSplitEditFilesSorted() throws IOException { FileSystem fs = FileSystem.get(util.getConfiguration()); Path regiondir = util.getDataTestDir("regiondir"); fs.delete(regiondir, true); fs.mkdirs(regiondir); Path recoverededits = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); String first = WALSplitter.formatRecoveredEditsFileName(-1); createFile(fs, recoverededits, first); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(0)); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(1)); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(11)); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(2)); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(50)); String last = WALSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE); createFile(fs, recoverededits, last); createFile( fs, recoverededits, Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis()); final Configuration walConf = new Configuration(util.getConfiguration()); FSUtils.setRootDir(walConf, regiondir); (new WALFactory(walConf, null, "dummyLogName")).getWAL(new byte[] {}, null); NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir); assertEquals(7, files.size()); assertEquals(files.pollFirst().getName(), first); assertEquals(files.pollLast().getName(), last); assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(0)); assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(1)); assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(2)); assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(11)); }
/** * Test that if we fail a flush, abort gets set on close. * * @see <a href="https://issues.apache.org/jira/browse/HBASE-4270">HBASE-4270</a> * @throws IOException * @throws NodeExistsException * @throws KeeperException */ @Test public void testFailedFlushAborts() throws IOException, NodeExistsException, KeeperException { final Server server = new MockServer(HTU, false); final RegionServerServices rss = HTU.createMockRegionServerService(); HTableDescriptor htd = TEST_HTD; final HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); HRegion region = HTU.createLocalHRegion(hri, htd); try { assertNotNull(region); // Spy on the region so can throw exception when close is called. HRegion spy = Mockito.spy(region); final boolean abort = false; Mockito.when(spy.close(abort)).thenThrow(new RuntimeException("Mocked failed close!")); // The CloseRegionHandler will try to get an HRegion that corresponds // to the passed hri -- so insert the region into the online region Set. rss.addToOnlineRegions(spy); // Assert the Server is NOT stopped before we call close region. assertFalse(server.isStopped()); CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, false, -1); boolean throwable = false; try { handler.process(); } catch (Throwable t) { throwable = true; } finally { assertTrue(throwable); // Abort calls stop so stopped flag should be set. assertTrue(server.isStopped()); } } finally { HRegion.closeHRegion(region); } }
/** @throws java.lang.Exception */ @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); admin = new ReplicationAdmin(conf); }
@Before public void setup() throws Exception { TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); conf = TEST_UTIL.getConfiguration(); // Use a different ZK wrapper instance for each tests. zkw = new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null); ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode); ZKUtil.createAndFailSilent(zkw, zkw.baseZNode); assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1); LOG.debug(zkw.baseZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.splitLogZNode); assertTrue(ZKUtil.checkExists(zkw, zkw.splitLogZNode) != -1); LOG.debug(zkw.splitLogZNode + " created"); stopped = false; resetCounters(); // By default, we let the test manage the error as before, so the server // does not appear as dead from the master point of view, only from the split log pov. Mockito.when(sm.isServerOnline(Mockito.any(ServerName.class))).thenReturn(true); Mockito.when(master.getServerManager()).thenReturn(sm); to = 4000; conf.setInt("hbase.splitlog.manager.timeout", to); conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to); conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100); to = to + 4 * 100; }
@Test public void testRowMutation() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"}, tableName, new Boolean[] {false, false, false, false, false}); Put put = new Put(ROW); put.add(A, A, A); put.add(B, B, B); put.add(C, C, C); Delete delete = new Delete(ROW); delete.deleteColumn(A, A); delete.deleteColumn(B, B); delete.deleteColumn(C, C); RowMutations arm = new RowMutations(ROW); arm.add(put); arm.add(delete); table.mutateRow(arm); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"}, tableName, new Boolean[] {false, false, true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
@Test public void testMetaMigration() throws Exception { LOG.info("Starting testMetaMigration"); final byte[] FAMILY = Bytes.toBytes("family"); HTableDescriptor htd = new HTableDescriptor("testMetaMigration"); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); htd.addFamily(hcd); Configuration conf = TEST_UTIL.getConfiguration(); byte[][] regionNames = new byte[][] { HConstants.EMPTY_START_ROW, Bytes.toBytes("region_a"), Bytes.toBytes("region_b") }; createMultiRegionsWithWritableSerialization(conf, htd.getName(), regionNames); CatalogTracker ct = TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker(); // Erase the current version of root meta for this test. undoVersionInRoot(ct); MetaReader.fullScanMetaAndPrint(ct); LOG.info("Meta Print completed.testMetaMigration"); long numMigratedRows = MetaMigrationConvertingToPB.updateMeta(TEST_UTIL.getHBaseCluster().getMaster()); MetaReader.fullScanMetaAndPrint(ct); // Should be one entry only and it should be for the table we just added. assertEquals(regionNames.length, numMigratedRows); // Assert that the flag in ROOT is updated to reflect the correct status boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated( TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker()); assertEquals(true, metaUpdated); verifyMetaRowsAreUpdated(ct); }
@Test public void testCheckAndDeleteHooks() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { Put p = new Put(Bytes.toBytes(0)); p.add(A, A, A); table.put(p); table.flushCommits(); Delete d = new Delete(Bytes.toBytes(0)); table.delete(d); verifyMethodResult( SimpleRegionObserver.class, new String[] { "hadPreCheckAndDelete", "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete" }, tableName, new Boolean[] {false, false, false}); table.checkAndDelete(Bytes.toBytes(0), A, A, A, d); verifyMethodResult( SimpleRegionObserver.class, new String[] { "hadPreCheckAndDelete", "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete" }, tableName, new Boolean[] {true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
/** * This test assumes a master crash/failure during the meta migration process and attempts to * continue the meta migration process when a new master takes over. When a master dies during the * meta migration we will have some rows of META.CatalogFamily updated with PB serialization and * some still hanging with writable serialization. When the backup master/ or fresh start of * master attempts the migration it will encounter some rows of META already updated with new HRI * and some still legacy. This test will simulate this scenario and validates that the migration * process can safely skip the updated rows and migrate any pending rows at startup. * * @throws Exception */ @Test public void testMasterCrashDuringMetaMigration() throws Exception { final byte[] FAMILY = Bytes.toBytes("family"); HTableDescriptor htd = new HTableDescriptor("testMasterCrashDuringMetaMigration"); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); htd.addFamily(hcd); Configuration conf = TEST_UTIL.getConfiguration(); // Create 10 New regions. createMultiRegionsWithPBSerialization(conf, htd.getName(), 10); // Create 10 Legacy regions. createMultiRegionsWithWritableSerialization(conf, htd.getName(), 10); CatalogTracker ct = TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker(); // Erase the current version of root meta for this test. undoVersionInRoot(ct); MetaReader.fullScanMetaAndPrint(ct); LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI"); long numMigratedRows = MetaMigrationConvertingToPB.updateMetaIfNecessary(TEST_UTIL.getHBaseCluster().getMaster()); assertEquals(numMigratedRows, 10); // Assert that the flag in ROOT is updated to reflect the correct status boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated( TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker()); assertEquals(true, metaUpdated); verifyMetaRowsAreUpdated(ct); LOG.info("END testMasterCrashDuringMetaMigration"); }
@Test public void testPreWALRestoreSkip() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip"); TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer(); ServerName sn2 = rs1.getRegionServer().getServerName(); String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName(); util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes()); while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) { Thread.sleep(100); } Put put = new Put(ROW); put.add(A, A, A); put.add(B, B, B); put.add(C, C, C); table.put(put); table.flushCommits(); cluster.killRegionServer(rs1.getRegionServer().getServerName()); Threads.sleep(20000); // just to be sure that the kill has fully started. util.waitUntilAllRegionsAssigned(tableName); verifyMethodResult( SimpleRegionObserver.class, new String[] {"getCtPreWALRestore", "getCtPostWALRestore"}, tableName, new Integer[] {0, 0}); util.deleteTable(tableName); table.close(); }
@Test(timeout = 60000) public void testDeleteDeletedTable() throws Exception { final TableName tableName = TableName.valueOf("testDeleteDeletedTable"); final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor(); HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); UTIL.getHBaseAdmin().disableTable(tableName); // delete the table (that exists) long procId1 = procExec.submitProcedure( new DeleteTableProcedure(procExec.getEnvironment(), tableName), nonceGroup, nonce); // delete the table (that will no longer exist) long procId2 = procExec.submitProcedure( new DeleteTableProcedure(procExec.getEnvironment(), tableName), nonceGroup + 1, nonce + 1); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.waitProcedure(procExec, procId2); // First delete should succeed ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); MasterProcedureTestingUtility.validateTableDeletion( UTIL.getHBaseCluster().getMaster(), tableName, regions, "f"); // Second delete should fail with TableNotFound ProcedureResult result = procExec.getResult(procId2); assertTrue(result.isFailed()); LOG.debug("Delete failed with exception: " + result.getException()); assertTrue(result.getException().getCause() instanceof TableNotFoundException); }
@BeforeClass public static void setUpBeforeClass() throws Exception { TestHelper.setupLogging(); TEST_UTIL.startMiniCluster(1); IndexManager.createIndexMetaTable(TEST_UTIL.getConfiguration()); }
@Test(timeout = 60000) public void testDoubleDeletedTableWithSameNonce() throws Exception { final TableName tableName = TableName.valueOf("testDoubleDeletedTableWithSameNonce"); final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor(); HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); UTIL.getHBaseAdmin().disableTable(tableName); // delete the table (that exists) long procId1 = procExec.submitProcedure( new DeleteTableProcedure(procExec.getEnvironment(), tableName), nonceGroup, nonce); // delete the table (that will no longer exist) long procId2 = procExec.submitProcedure( new DeleteTableProcedure(procExec.getEnvironment(), tableName), nonceGroup, nonce); // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.waitProcedure(procExec, procId2); // First delete should succeed ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); MasterProcedureTestingUtility.validateTableDeletion( UTIL.getHBaseCluster().getMaster(), tableName, regions, "f"); // Second delete should not fail, because it is the same delete ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); assertTrue(procId1 == procId2); }
/** * Test simple flush snapshotting a table that is online * * @throws Exception */ @Test public void testFlushTableSnapshotWithProcedure() throws Exception { // make sure we don't fail on listing snapshots SnapshotTestingUtils.assertNoSnapshots(admin); // put some stuff in the table SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM); LOG.debug("FS state before snapshot:"); UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); // take a snapshot of the enabled table String snapshotString = "offlineTableSnapshot"; byte[] snapshot = Bytes.toBytes(snapshotString); Map<String, String> props = new HashMap<String, String>(); props.put("table", TABLE_NAME.getNameAsString()); admin.execProcedure( SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION, snapshotString, props); LOG.debug("Snapshot completed."); // make sure we have the snapshot List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot LOG.debug("FS state after snapshot:"); UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); SnapshotTestingUtils.confirmSnapshotValid( UTIL, ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM); }
@Test(timeout = 60000) public void testRecoveryAndDoubleExecution() throws Exception { final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); // create the table byte[][] splitKeys = null; HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); UTIL.getHBaseAdmin().disableTable(tableName); final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor(); ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); // Start the Delete procedure && kill the executor long procId = procExec.submitProcedure( new DeleteTableProcedure(procExec.getEnvironment(), tableName), nonceGroup, nonce); // Restart the executor and execute the step twice // NOTE: the 6 (number of DeleteTableState steps) is hardcoded, // so you have to look at this test at least once when you add a new step. MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( procExec, procId, 6, DeleteTableState.values()); MasterProcedureTestingUtility.validateTableDeletion( UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); }
private Table createTableAndLoadData( HMaster master, TableName tablename, int numRegions, int replication) throws Exception { assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions); byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 0; i < splitRows.length; i++) { splitRows[i] = ROWS[(i + 1) * ROWSIZE / numRegions]; } Table table = TEST_UTIL.createTable(tablename, FAMILYNAME, splitRows); if (replication > 1) { HBaseTestingUtility.setReplicas(admin, tablename, replication); } loadData(table); verifyRowCount(table, ROWSIZE); // sleep here is an ugly hack to allow region transitions to finish long timeout = System.currentTimeMillis() + waitTime; List<Pair<HRegionInfo, ServerName>> tableRegions; while (System.currentTimeMillis() < timeout) { tableRegions = MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename); if (tableRegions.size() == numRegions * replication) break; Thread.sleep(250); } tableRegions = MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(numRegions * replication, tableRegions.size()); return table; }
@After public void after() throws IOException { hTable = null; testingUtility.shutdownMiniCluster(); testingUtility.shutdownMiniZKCluster(); testingUtility = null; }