@Test(timeout = 60000) public void testDisableCompaction() throws Exception { // prepare data LedgerHandle[] lhs = prepareData(3, false); // disable compaction baseConf.setMinorCompactionThreshold(0.0f); baseConf.setMajorCompactionThreshold(0.0f); // restart bookies restartBookies(baseConf); // remove ledger2 and ledger3 // so entry log 1 and 2 would have ledger1 entries left bkc.deleteLedger(lhs[1].getId()); bkc.deleteLedger(lhs[2].getId()); LOG.info("Finished deleting the ledgers contains most entries."); Thread.sleep(baseConf.getMajorCompactionInterval() * 1000 + baseConf.getGcWaitTime()); // entry logs ([0,1].log) should not be compacted. for (File ledgerDirectory : tmpDirs) { assertTrue( "Not Found entry log file ([0,1].log that should have been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, false, 0, 1)); } }
@Test(timeout = 60000) public void testMajorCompaction() throws Exception { // prepare data LedgerHandle[] lhs = prepareData(3, true); for (LedgerHandle lh : lhs) { lh.close(); } // disable minor compaction baseConf.setMinorCompactionThreshold(0.0f); // restart bookies restartBookies(baseConf); // remove ledger1 and ledger3 bkc.deleteLedger(lhs[0].getId()); bkc.deleteLedger(lhs[2].getId()); LOG.info("Finished deleting the ledgers contains most entries."); Thread.sleep(baseConf.getMajorCompactionInterval() * 1000 + baseConf.getGcWaitTime()); // entry logs ([0,1,2].log) should be compacted for (File ledgerDirectory : tmpDirs) { assertFalse( "Found entry log file ([0,1,2].log that should have not been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 0, 1, 2)); } // even entry log files are removed, we still can access entries for ledger2 // since those entries has been compacted to new entry log verifyLedger(lhs[1].getId(), 0, lhs[1].getLastAddConfirmed()); }
@Test(timeout = 60000) public void testCompactionOnDeletedLedgers() throws Exception { // prepare data final LedgerHandle[] lhs = prepareData(3, false); // disable compaction baseConf.setMinorCompactionThreshold(0.0f); baseConf.setMajorCompactionThreshold(0.0f); // restart bookies restartBookies(); // remove ledger2 and ledger3 // so entry log 1 and 2 would have ledger1 entries left bkc.deleteLedger(lhs[1].getId()); bkc.deleteLedger(lhs[2].getId()); LOG.info("Finished deleting the ledgers contains most entries."); Thread.sleep(baseConf.getMajorCompactionInterval() * 1000 + baseConf.getGcWaitTime()); Bookie bookie = this.bs.get(0).getBookie(); final GarbageCollectorThread gcThread = ((InterleavedLedgerStorage) bookie.ledgerStorage).gcThread; final CountDownLatch flushLatch = new CountDownLatch(1); final CountDownLatch flushNotifier = new CountDownLatch(1); Thread flushThread = new Thread( new Runnable() { @Override public void run() { try { flushNotifier.await(); } catch (InterruptedException e) { // no-op } try { bkc.deleteLedger(lhs[0].getId()); } catch (InterruptedException e) { LOG.warn("Interrupted on deleting ledger {} : ", lhs[0].getId(), e); } catch (BKException e) { LOG.error("Error on deleting ledger {} : ", lhs[0].getId(), e); } gcThread.doGcLedgers(); flushLatch.countDown(); } }, "flush-thread"); flushThread.start(); gcThread.doCompactEntryLogs(0.99, flushNotifier, flushLatch); // entry logs ([0,1,2].log) should be compacted for (File ledgerDirectory : tmpDirs) { assertFalse( "Found entry log file ([0,1,2].log that should have not been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 0, 1, 2)); } }
@Test(timeout = 60000) public void testCompactionSmallEntryLogs() throws Exception { // create a ledger to write a few entries LedgerHandle alh = bkc.createLedger(NUM_BOOKIES, NUM_BOOKIES, digestType, "".getBytes()); for (int i = 0; i < 3; i++) { alh.addEntry(msg.getBytes()); } alh.close(); // restart bookie to roll entry log files restartBookies(); // prepare data LedgerHandle[] lhs = prepareData(3, false); for (LedgerHandle lh : lhs) { lh.close(); } // remove ledger2 and ledger3 bkc.deleteLedger(lhs[1].getId()); bkc.deleteLedger(lhs[2].getId()); LOG.info("Finished deleting the ledgers contains most entries."); // restart bookies again to roll entry log files. restartBookies(); Thread.sleep(baseConf.getMajorCompactionInterval() * 1000 + baseConf.getGcWaitTime()); // entry logs (0.log) should not be compacted // entry logs ([1,2,3].log) should be compacted. for (File ledgerDirectory : tmpDirs) { assertTrue( "Not Found entry log file ([0].log that should have been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 0)); assertFalse( "Found entry log file ([1,2,3].log that should have not been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 1, 2, 3)); } // even entry log files are removed, we still can access entries for ledger1 // since those entries has been compacted to new entry log verifyLedger(lhs[0].getId(), 0, lhs[0].getLastAddConfirmed()); }
@Test(timeout = 60000) public void testMajorCompactionAboveThreshold() throws Exception { // prepare data LedgerHandle[] lhs = prepareData(3, false); for (LedgerHandle lh : lhs) { lh.close(); } // remove ledger1 and ledger2 bkc.deleteLedger(lhs[0].getId()); bkc.deleteLedger(lhs[1].getId()); LOG.info("Finished deleting the ledgers contains less entries."); Thread.sleep(baseConf.getMajorCompactionInterval() * 1000 + baseConf.getGcWaitTime()); // entry logs ([0,1,2].log) should not be compacted for (File ledgerDirectory : tmpDirs) { assertTrue( "Not Found entry log file ([1,2].log that should have been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, false, 0, 1, 2)); } }