@Test public void testInvalidColumnFamily() throws IOException, InterruptedException { byte[] table = Bytes.toBytes("testInvalidColumnFamily"); byte[] family = Bytes.toBytes("family"); byte[] fakecf = Bytes.toBytes("fakecf"); boolean caughtMinorCompact = false; boolean caughtMajorCompact = false; Table ht = null; try { ht = TEST_UTIL.createTable(table, family); HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); try { admin.compact(table, fakecf); } catch (IOException ioe) { caughtMinorCompact = true; } try { admin.majorCompact(table, fakecf); } catch (IOException ioe) { caughtMajorCompact = true; } } finally { if (ht != null) { TEST_UTIL.deleteTable(table); } assertTrue(caughtMinorCompact); assertTrue(caughtMajorCompact); } }
/** * Tests overriding compaction handling via coprocessor hooks * * @throws Exception */ @Test public void testCompactionOverride() throws Exception { byte[] compactTable = Bytes.toBytes("TestCompactionOverride"); HBaseAdmin admin = util.getHBaseAdmin(); if (admin.tableExists(compactTable)) { admin.disableTable(compactTable); admin.deleteTable(compactTable); } HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(compactTable)); htd.addFamily(new HColumnDescriptor(A)); htd.addCoprocessor(EvenOnlyCompactor.class.getName()); admin.createTable(htd); HTable table = new HTable(util.getConfiguration(), compactTable); for (long i = 1; i <= 10; i++) { byte[] iBytes = Bytes.toBytes(i); Put put = new Put(iBytes); put.setDurability(Durability.SKIP_WAL); put.add(A, A, iBytes); table.put(put); } HRegion firstRegion = cluster.getRegions(compactTable).get(0); Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName()); assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp); EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp; // force a compaction long ts = System.currentTimeMillis(); admin.flush(compactTable); // wait for flush for (int i = 0; i < 10; i++) { if (compactor.lastFlush >= ts) { break; } Thread.sleep(1000); } assertTrue("Flush didn't complete", compactor.lastFlush >= ts); LOG.debug("Flush complete"); ts = compactor.lastFlush; admin.majorCompact(compactTable); // wait for compaction for (int i = 0; i < 30; i++) { if (compactor.lastCompaction >= ts) { break; } Thread.sleep(1000); } LOG.debug("Last compaction was at " + compactor.lastCompaction); assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts); // only even rows should remain ResultScanner scanner = table.getScanner(new Scan()); try { for (long i = 2; i <= 10; i += 2) { Result r = scanner.next(); assertNotNull(r); assertFalse(r.isEmpty()); byte[] iBytes = Bytes.toBytes(i); assertArrayEquals("Row should be " + i, r.getRow(), iBytes); assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes); } } finally { scanner.close(); } table.close(); }
/** * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and * excluded from minor compaction. Without the fix of HBASE-6901, an * ArrayIndexOutOfBoundsException will be thrown. */ @Ignore("Flakey: See HBASE-9051") @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); try { util.startMiniCluster(); final FileSystem fs = util.getDFSCluster().getFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); HTable table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir final Path storePath = HStore.getStoreHomedir( FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), admin.getTableRegions(TABLE_NAME).get(0), FAMILIES[0]); assertEquals(0, fs.listStatus(storePath).length); // Generate two bulk load files conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); util.startMiniMapReduceCluster(); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); runIncrementalPELoad(conf, table, testDir); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table); } // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals( "LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); // minor compactions shouldn't get rid of the file admin.compact(TABLE_NAME.getName()); try { quickPoll( new Callable<Boolean>() { public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); throw new IOException("SF# = " + fs.listStatus(storePath).length); } catch (AssertionError ae) { // this is expected behavior } // a major compaction should work though admin.majorCompact(TABLE_NAME.getName()); quickPoll( new Callable<Boolean>() { public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); } finally { util.shutdownMiniMapReduceCluster(); util.shutdownMiniCluster(); } }
/** * Load data to a table, flush it to disk, trigger compaction, confirm the compaction state is * right and wait till it is done. * * @param tableName * @param flushes * @param expectedState * @param singleFamily otherwise, run compaction on all cfs * @throws IOException * @throws InterruptedException */ private void compaction( final String tableName, final int flushes, final CompactionState expectedState, boolean singleFamily) throws IOException, InterruptedException { // Create a table with regions TableName table = TableName.valueOf(tableName); byte[] family = Bytes.toBytes("family"); byte[][] families = { family, Bytes.add(family, Bytes.toBytes("2")), Bytes.add(family, Bytes.toBytes("3")) }; Table ht = null; try { ht = TEST_UTIL.createTable(table, families); loadData(ht, families, 3000, flushes); HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); List<HRegion> regions = rs.getOnlineRegions(table); int countBefore = countStoreFilesInFamilies(regions, families); int countBeforeSingleFamily = countStoreFilesInFamily(regions, family); assertTrue(countBefore > 0); // there should be some data files HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); if (expectedState == CompactionState.MINOR) { if (singleFamily) { admin.compact(table.getName(), family); } else { admin.compact(table.getName()); } } else { if (singleFamily) { admin.majorCompact(table.getName(), family); } else { admin.majorCompact(table.getName()); } } long curt = System.currentTimeMillis(); long waitTime = 5000; long endt = curt + waitTime; CompactionState state = admin.getCompactionState(table.getName()); while (state == CompactionState.NONE && curt < endt) { Thread.sleep(10); state = admin.getCompactionState(table.getName()); curt = System.currentTimeMillis(); } // Now, should have the right compaction state, // otherwise, the compaction should have already been done if (expectedState != state) { for (HRegion region : regions) { state = region.getCompactionState(); assertEquals(CompactionState.NONE, state); } } else { // Wait until the compaction is done state = admin.getCompactionState(table.getName()); while (state != CompactionState.NONE && curt < endt) { Thread.sleep(10); state = admin.getCompactionState(table.getName()); } // Now, compaction should be done. assertEquals(CompactionState.NONE, state); } int countAfter = countStoreFilesInFamilies(regions, families); int countAfterSingleFamily = countStoreFilesInFamily(regions, family); assertTrue(countAfter < countBefore); if (!singleFamily) { if (expectedState == CompactionState.MAJOR) assertTrue(families.length == countAfter); else assertTrue(families.length < countAfter); } else { int singleFamDiff = countBeforeSingleFamily - countAfterSingleFamily; // assert only change was to single column family assertTrue(singleFamDiff == (countBefore - countAfter)); if (expectedState == CompactionState.MAJOR) { assertTrue(1 == countAfterSingleFamily); } else { assertTrue(1 < countAfterSingleFamily); } } } finally { if (ht != null) { TEST_UTIL.deleteTable(table); } } }
public void doTest(Class<?> regionClass, boolean distributedLogReplay) throws Exception { Configuration c = TEST_UTIL.getConfiguration(); c.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, distributedLogReplay); // Insert our custom region c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class); c.setBoolean("dfs.support.append", true); // Encourage plenty of flushes c.setLong("hbase.hregion.memstore.flush.size", 200000); c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); // Only run compaction when we tell it to c.setInt("hbase.hstore.compactionThreshold", 1000); c.setLong("hbase.hstore.blockingStoreFiles", 1000); // Compact quickly after we tell it to! c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000); LOG.info("Starting mini cluster"); TEST_UTIL.startMiniCluster(1); CompactionBlockerRegion compactingRegion = null; Admin admin = null; try { LOG.info("Creating admin"); admin = TEST_UTIL.getConnection().getAdmin(); LOG.info("Creating table"); TEST_UTIL.createTable(TABLE_NAME, FAMILY); Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME); LOG.info("Loading test table"); // Find the region List<HRegion> testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME); assertEquals(1, testRegions.size()); compactingRegion = (CompactionBlockerRegion) testRegions.get(0); LOG.info("Blocking compactions"); compactingRegion.stopCompactions(); long lastFlushTime = compactingRegion.getLastFlushTime(); // Load some rows TEST_UTIL.loadNumericRows(table, FAMILY, 0, FIRST_BATCH_COUNT); // add a compaction from an older (non-existing) region to see whether we successfully skip // those entries HRegionInfo oldHri = new HRegionInfo(table.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor( oldHri, FAMILY, Lists.newArrayList(new Path("/a")), Lists.newArrayList(new Path("/b")), new Path("store_dir")); WALUtil.writeCompactionMarker( compactingRegion.getWAL(), table.getTableDescriptor(), oldHri, compactionDescriptor, new AtomicLong(Long.MAX_VALUE - 100)); // Wait till flush has happened, otherwise there won't be multiple store files long startWaitTime = System.currentTimeMillis(); while (compactingRegion.getLastFlushTime() <= lastFlushTime || compactingRegion.countStoreFiles() <= 1) { LOG.info("Waiting for the region to flush " + compactingRegion.getRegionNameAsString()); Thread.sleep(1000); assertTrue( "Timed out waiting for the region to flush", System.currentTimeMillis() - startWaitTime < 30000); } assertTrue(compactingRegion.countStoreFiles() > 1); final byte REGION_NAME[] = compactingRegion.getRegionName(); LOG.info("Asking for compaction"); ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName()); LOG.info("Waiting for compaction to be about to start"); compactingRegion.waitForCompactionToBlock(); LOG.info("Starting a new server"); RegionServerThread newServerThread = TEST_UTIL.getMiniHBaseCluster().startRegionServer(); final HRegionServer newServer = newServerThread.getRegionServer(); LOG.info("Killing region server ZK lease"); TEST_UTIL.expireRegionServerSession(0); CompactionBlockerRegion newRegion = null; startWaitTime = System.currentTimeMillis(); LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME)); // wait for region to be assigned and to go out of log replay if applicable Waiter.waitFor( c, 60000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { HRegion newRegion = newServer.getOnlineRegion(REGION_NAME); return newRegion != null && !newRegion.isRecovering(); } }); newRegion = (CompactionBlockerRegion) newServer.getOnlineRegion(REGION_NAME); LOG.info("Allowing compaction to proceed"); compactingRegion.allowCompactions(); while (compactingRegion.compactCount == 0) { Thread.sleep(1000); } // The server we killed stays up until the compaction that was started before it was killed // completes. In logs // you should see the old regionserver now going down. LOG.info("Compaction finished"); // After compaction of old region finishes on the server that was going down, make sure that // all the files we expect are still working when region is up in new location. FileSystem fs = newRegion.getFilesystem(); for (String f : newRegion.getStoreFileList(new byte[][] {FAMILY})) { assertTrue("After compaction, does not exist: " + f, fs.exists(new Path(f))); } // If we survive the split keep going... // Now we make sure that the region isn't totally confused. Load up more rows. TEST_UTIL.loadNumericRows( table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT); ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName()); startWaitTime = System.currentTimeMillis(); while (newRegion.compactCount == 0) { Thread.sleep(1000); assertTrue( "New region never compacted", System.currentTimeMillis() - startWaitTime < 180000); } assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, TEST_UTIL.countRows(table)); } finally { if (compactingRegion != null) { compactingRegion.allowCompactions(); } admin.close(); TEST_UTIL.shutdownMiniCluster(); } }