Exemplo n.º 1
0
  /**
   * Test that compaction should execute silently when there is no entry logs to compact. {@see
   * https://issues.apache.org/jira/browse/BOOKKEEPER-700}
   */
  @Test(timeout = 60000)
  public void testWhenNoLogsToCompact() throws Exception {
    tearDown(); // I dont want the test infrastructure
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
    File tmpDir = createTempDir("bkTest", ".dir");
    File curDir = Bookie.getCurrentDirectory(tmpDir);
    Bookie.checkDirectoryStructure(curDir);
    conf.setLedgerDirNames(new String[] {tmpDir.toString()});

    LedgerDirsManager dirs = new LedgerDirsManager(conf, conf.getLedgerDirs());
    final Set<Long> ledgers = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
    ActiveLedgerManager manager = getActiveLedgerManager(ledgers);
    CheckpointSource checkpointSource =
        new CheckpointSource() {
          @Override
          public Checkpoint newCheckpoint() {
            return null;
          }

          @Override
          public void checkpointComplete(Checkpoint checkpoint, boolean compact)
              throws IOException {}
        };
    InterleavedLedgerStorage storage =
        new InterleavedLedgerStorage(
            conf, manager, dirs, dirs, checkpointSource, NullStatsLogger.INSTANCE);

    double threshold = 0.1;
    // shouldn't throw exception
    storage.gcThread.doCompactEntryLogs(threshold);
  }
Exemplo n.º 2
0
  /**
   * Test that compaction doesnt add to index without having persisted entrylog first. This is
   * needed because compaction doesn't go through the journal. {@see
   * https://issues.apache.org/jira/browse/BOOKKEEPER-530} {@see
   * https://issues.apache.org/jira/browse/BOOKKEEPER-664}
   */
  @Test(timeout = 60000)
  public void testCompactionSafety() throws Exception {
    tearDown(); // I dont want the test infrastructure
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
    final Set<Long> ledgers = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
    ActiveLedgerManager manager = getActiveLedgerManager(ledgers);

    File tmpDir = createTempDir("compaction", "compactionSafety");
    File curDir = Bookie.getCurrentDirectory(tmpDir);
    Bookie.checkDirectoryStructure(curDir);
    conf.setLedgerDirNames(new String[] {tmpDir.toString()});

    conf.setEntryLogSizeLimit(EntryLogger.LOGFILE_HEADER_LENGTH + 3 * (4 + ENTRY_SIZE));
    conf.setGcWaitTime(100);
    conf.setMinorCompactionThreshold(0.7f);
    conf.setMajorCompactionThreshold(0.0f);
    conf.setMinorCompactionInterval(1);
    conf.setMajorCompactionInterval(10);
    conf.setPageLimit(1);

    CheckpointSource checkpointProgress =
        new CheckpointSource() {
          AtomicInteger idGen = new AtomicInteger(0);

          class MyCheckpoint implements Checkpoint {
            int id = idGen.incrementAndGet();

            @Override
            public int compareTo(Checkpoint o) {
              if (o == Checkpoint.MAX) {
                return -1;
              } else if (o == Checkpoint.MIN) {
                return 1;
              }
              return id - ((MyCheckpoint) o).id;
            }
          }

          @Override
          public Checkpoint newCheckpoint() {
            return new MyCheckpoint();
          }

          @Override
          public void checkpointComplete(Checkpoint checkpoint, boolean compact)
              throws IOException {}
        };

    final byte[] KEY = "foobar".getBytes();
    File log0 = new File(curDir, "0.log");
    File log1 = new File(curDir, "1.log");
    LedgerDirsManager dirs = new LedgerDirsManager(conf, conf.getLedgerDirs());
    assertFalse("Log 0 shouldnt exist", log0.exists());
    assertFalse("Log 1 shouldnt exist", log1.exists());
    InterleavedLedgerStorage storage =
        new InterleavedLedgerStorage(
            conf, manager, dirs, dirs, checkpointProgress, NullStatsLogger.INSTANCE);
    ledgers.add(1l);
    ledgers.add(2l);
    ledgers.add(3l);
    storage.setMasterKey(1, KEY);
    storage.setMasterKey(2, KEY);
    storage.setMasterKey(3, KEY);
    LOG.info("Write Ledger 1");
    storage.addEntry(genEntry(1, 1, ENTRY_SIZE));
    LOG.info("Write Ledger 2");
    storage.addEntry(genEntry(2, 1, ENTRY_SIZE));
    storage.addEntry(genEntry(2, 2, ENTRY_SIZE));
    LOG.info("Write ledger 3");
    storage.addEntry(genEntry(3, 2, ENTRY_SIZE));
    storage.flush();
    storage.shutdown();

    assertTrue("Log 0 should exist", log0.exists());
    assertTrue("Log 1 should exist", log1.exists());
    ledgers.remove(2l);
    ledgers.remove(3l);

    storage =
        new InterleavedLedgerStorage(
            conf, manager, dirs, dirs, checkpointProgress, NullStatsLogger.INSTANCE);
    storage.start();
    for (int i = 0; i < 10; i++) {
      if (!log0.exists() && !log1.exists()) {
        break;
      }
      Thread.sleep(1000);
      storage.entryLogger.flush(); // simulate sync thread
    }
    assertFalse("Log shouldnt exist", log0.exists());
    assertFalse("Log shouldnt exist", log1.exists());

    LOG.info("Write ledger 4");
    ledgers.add(4l);
    storage.setMasterKey(4, KEY);
    storage.addEntry(genEntry(4, 1, ENTRY_SIZE)); // force ledger 1 page to flush

    storage =
        new InterleavedLedgerStorage(
            conf, manager, dirs, dirs, checkpointProgress, NullStatsLogger.INSTANCE);
    storage.getEntry(1, 1); // entry should exist
  }