public ColumnFamilyStore testSingleSSTableCompaction(String strategyClassName) throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
    store.clearUnsafe();
    store.metadata.gcGraceSeconds(1);
    store.setCompactionStrategyClass(strategyClassName);

    // disable compaction while flushing
    store.disableAutoCompaction();

    long timestamp = populate(KEYSPACE1, CF_STANDARD1, 0, 9, 3); // ttl=3s

    store.forceBlockingFlush();
    assertEquals(1, store.getSSTables().size());
    long originalSize = store.getSSTables().iterator().next().uncompressedLength();

    // wait enough to force single compaction
    TimeUnit.SECONDS.sleep(5);

    // enable compaction, submit background and wait for it to complete
    store.enableAutoCompaction();
    FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
    while (CompactionManager.instance.getPendingTasks() > 0
        || CompactionManager.instance.getActiveCompactions() > 0) TimeUnit.SECONDS.sleep(1);

    // and sstable with ttl should be compacted
    assertEquals(1, store.getSSTables().size());
    long size = store.getSSTables().iterator().next().uncompressedLength();
    assertTrue("should be less than " + originalSize + ", but was " + size, size < originalSize);

    // make sure max timestamp of compacted sstables is recorded properly after compaction.
    assertMaxTimestamp(store, timestamp);

    return store;
  }
  @Test
  public void testEchoedRow() throws IOException, ExecutionException, InterruptedException {
    // This test check that EchoedRow doesn't skipp rows: see CASSANDRA-2653

    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard2");

    // disable compaction while flushing
    cfs.disableAutoCompaction();

    // Insert 4 keys in two sstables. We need the sstables to have 2 rows
    // at least to trigger what was causing CASSANDRA-2653
    for (int i = 1; i < 5; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      RowMutation rm = new RowMutation(KEYSPACE1, key.key);
      rm.add(
          "Standard2",
          ByteBufferUtil.bytes(String.valueOf(i)),
          ByteBufferUtil.EMPTY_BYTE_BUFFER,
          i);
      rm.apply();

      if (i % 2 == 0) cfs.forceBlockingFlush();
    }
    Collection<SSTableReader> toCompact = cfs.getSSTables();
    assert toCompact.size() == 2;

    // Reinserting the same keys. We will compact only the previous sstable, but we need those new
    // ones
    // to make sure we use EchoedRow, otherwise it won't be used because purge can be done.
    for (int i = 1; i < 5; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      RowMutation rm = new RowMutation(KEYSPACE1, key.key);
      rm.add(
          "Standard2",
          ByteBufferUtil.bytes(String.valueOf(i)),
          ByteBufferUtil.EMPTY_BYTE_BUFFER,
          i);
      rm.apply();
    }
    cfs.forceBlockingFlush();
    SSTableReader tmpSSTable = null;
    for (SSTableReader sstable : cfs.getSSTables())
      if (!toCompact.contains(sstable)) tmpSSTable = sstable;
    assert tmpSSTable != null;

    // Force compaction on first sstables. Since each row is in only one sstable, we will be using
    // EchoedRow.
    Util.compact(cfs, toCompact);
    assertEquals(2, cfs.getSSTables().size());

    // Now, we remove the sstable that was just created to force the use of EchoedRow (so that it
    // doesn't hide the problem)
    cfs.markObsolete(Collections.singleton(tmpSSTable), OperationType.UNKNOWN);
    assertEquals(1, cfs.getSSTables().size());

    // Now assert we do have the 4 keys
    assertEquals(4, Util.getRangeSlice(cfs).size());
  }
  private void testDontPurgeAccidentaly(String k, String cfname)
      throws IOException, ExecutionException, InterruptedException {
    // This test catches the regression of CASSANDRA-2786
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);

    // disable compaction while flushing
    cfs.clearUnsafe();
    cfs.disableAutoCompaction();

    // Add test row
    DecoratedKey key = Util.dk(k);
    RowMutation rm = new RowMutation(KEYSPACE1, key.key);
    rm.add(
        cfname,
        CompositeType.build(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes("c")),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        0);
    rm.apply();

    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstablesBefore = cfs.getSSTables();

    QueryFilter filter = QueryFilter.getIdentityFilter(key, cfname, System.currentTimeMillis());
    assert !(cfs.getColumnFamily(filter).getColumnCount() == 0);

    // Remove key
    rm = new RowMutation(KEYSPACE1, key.key);
    rm.delete(cfname, 2);
    rm.apply();

    ColumnFamily cf = cfs.getColumnFamily(filter);
    assert cf == null || cf.getColumnCount() == 0 : "should be empty: " + cf;

    // Sleep one second so that the removal is indeed purgeable even with gcgrace == 0
    Thread.sleep(1000);

    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstablesAfter = cfs.getSSTables();
    Collection<SSTableReader> toCompact = new ArrayList<SSTableReader>();
    for (SSTableReader sstable : sstablesAfter)
      if (!sstablesBefore.contains(sstable)) toCompact.add(sstable);

    Util.compact(cfs, toCompact);

    cf = cfs.getColumnFamily(filter);
    assert cf == null || cf.getColumnCount() == 0 : "should be empty: " + cf;
  }
  public ColumnFamilyStore testSingleSSTableCompaction(String strategyClassName) throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
    store.clearUnsafe();
    store.metadata.gcGraceSeconds(1);
    store.setCompactionStrategyClass(strategyClassName);

    // disable compaction while flushing
    store.disableAutoCompaction();

    long timestamp = System.currentTimeMillis();
    for (int i = 0; i < 10; i++) {
      DecoratedKey key = Util.dk(Integer.toString(i));
      RowMutation rm = new RowMutation(KEYSPACE1, key.key);
      for (int j = 0; j < 10; j++)
        rm.add(
            "Standard1",
            ByteBufferUtil.bytes(Integer.toString(j)),
            ByteBufferUtil.EMPTY_BYTE_BUFFER,
            timestamp,
            j > 0
                ? 3
                : 0); // let first column never expire, since deleting all columns does not produce
                      // sstable
      rm.apply();
    }
    store.forceBlockingFlush();
    assertEquals(1, store.getSSTables().size());
    long originalSize = store.getSSTables().iterator().next().uncompressedLength();

    // wait enough to force single compaction
    TimeUnit.SECONDS.sleep(5);

    // enable compaction, submit background and wait for it to complete
    store.enableAutoCompaction();
    FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
    while (CompactionManager.instance.getPendingTasks() > 0
        || CompactionManager.instance.getActiveCompactions() > 0) TimeUnit.SECONDS.sleep(1);

    // and sstable with ttl should be compacted
    assertEquals(1, store.getSSTables().size());
    long size = store.getSSTables().iterator().next().uncompressedLength();
    assertTrue("should be less than " + originalSize + ", but was " + size, size < originalSize);

    // make sure max timestamp of compacted sstables is recorded properly after compaction.
    assertMaxTimestamp(store, timestamp);

    return store;
  }
  @Test
  public void testCompactionLog() throws Exception {
    SystemKeyspace.discardCompactionsInProgress();

    String cf = "Standard4";
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(cf);
    SchemaLoader.insertData(KEYSPACE1, cf, 0, 1);
    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstables = cfs.getSSTables();
    assertFalse(sstables.isEmpty());
    Set<Integer> generations =
        Sets.newHashSet(
            Iterables.transform(
                sstables,
                new Function<SSTableReader, Integer>() {
                  public Integer apply(SSTableReader sstable) {
                    return sstable.descriptor.generation;
                  }
                }));
    UUID taskId = SystemKeyspace.startCompaction(cfs, sstables);
    Map<Pair<String, String>, Map<Integer, UUID>> compactionLogs =
        SystemKeyspace.getUnfinishedCompactions();
    Set<Integer> unfinishedCompactions = compactionLogs.get(Pair.create(KEYSPACE1, cf)).keySet();
    assertTrue(unfinishedCompactions.containsAll(generations));

    SystemKeyspace.finishCompaction(taskId);
    compactionLogs = SystemKeyspace.getUnfinishedCompactions();
    assertFalse(compactionLogs.containsKey(Pair.create(KEYSPACE1, cf)));
  }
  @Test
  public void testCheckForExpiredSSTableBlockers() throws InterruptedException {
    String KEYSPACE1 = "Keyspace1";
    ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1");
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    cfs.metadata.gcGraceSeconds(0);

    RowMutation rm = new RowMutation(KEYSPACE1, Util.dk("test").key);
    rm.add(
        "Standard1",
        ByteBufferUtil.bytes("col1"),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        System.currentTimeMillis());
    rm.applyUnsafe();
    cfs.forceBlockingFlush();
    SSTableReader blockingSSTable = cfs.getSSTables().iterator().next();
    for (int i = 0; i < 10; i++) {
      rm = new RowMutation(KEYSPACE1, Util.dk("test").key);
      rm.delete("Standard1", System.currentTimeMillis());
      rm.applyUnsafe();
      cfs.forceBlockingFlush();
    }
    Multimap<SSTableReader, SSTableReader> blockers =
        SSTableExpiredBlockers.checkForExpiredSSTableBlockers(
            cfs.getSSTables(), (int) (System.currentTimeMillis() / 1000) + 100);
    assertEquals(1, blockers.keySet().size());
    assertTrue(blockers.keySet().contains(blockingSSTable));
    assertEquals(10, blockers.get(blockingSSTable).size());
  }
  @Test
  public void testSuperColumnTombstones()
      throws IOException, ExecutionException, InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Super1");
    cfs.disableAutoCompaction();

    DecoratedKey key = Util.dk("tskey");
    ByteBuffer scName = ByteBufferUtil.bytes("TestSuperColumn");

    // a subcolumn
    RowMutation rm = new RowMutation(KEYSPACE1, key.key);
    rm.add(
        "Super1",
        CompositeType.build(scName, ByteBufferUtil.bytes(0)),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        FBUtilities.timestampMicros());
    rm.apply();
    cfs.forceBlockingFlush();

    // shadow the subcolumn with a supercolumn tombstone
    rm = new RowMutation(KEYSPACE1, key.key);
    rm.deleteRange(
        "Super1",
        SuperColumns.startOf(scName),
        SuperColumns.endOf(scName),
        FBUtilities.timestampMicros());
    rm.apply();
    cfs.forceBlockingFlush();

    CompactionManager.instance.performMaximal(cfs);
    assertEquals(1, cfs.getSSTables().size());

    // check that the shadowed column is gone
    SSTableReader sstable = cfs.getSSTables().iterator().next();
    Range keyRange =
        new Range<RowPosition>(key, sstable.partitioner.getMinimumToken().maxKeyBound());
    SSTableScanner scanner = sstable.getScanner(DataRange.forKeyRange(keyRange));
    OnDiskAtomIterator iter = scanner.next();
    assertEquals(key, iter.getKey());
    assert iter.next() instanceof RangeTombstone;
    assert !iter.hasNext();
  }
  /**
   * Writes out a bunch of mutations for a single column family.
   *
   * @param mutations A group of Mutations for the same keyspace and column family.
   * @return The ColumnFamilyStore that was used.
   */
  public static ColumnFamilyStore writeColumnFamily(List<Mutation> mutations) {
    IMutation first = mutations.get(0);
    String keyspaceName = first.getKeyspaceName();
    UUID cfid = first.getColumnFamilyIds().iterator().next();

    for (Mutation rm : mutations) rm.applyUnsafe();

    ColumnFamilyStore store = Keyspace.open(keyspaceName).getColumnFamilyStore(cfid);
    store.forceBlockingFlush();
    return store;
  }
  @Test
  public void testUserDefinedCompaction() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    final String cfname = "Standard3"; // use clean(no sstable) CF
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);

    // disable compaction while flushing
    cfs.disableAutoCompaction();

    final int ROWS_PER_SSTABLE = 10;
    for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      Mutation rm = new Mutation(KEYSPACE1, key.getKey());
      rm.add(
          cfname,
          Util.cellname("col"),
          ByteBufferUtil.EMPTY_BYTE_BUFFER,
          System.currentTimeMillis());
      rm.applyUnsafe();
    }
    cfs.forceBlockingFlush();
    Collection<SSTableReader> sstables = cfs.getSSTables();

    assertEquals(1, sstables.size());
    SSTableReader sstable = sstables.iterator().next();

    int prevGeneration = sstable.descriptor.generation;
    String file = new File(sstable.descriptor.filenameFor(Component.DATA)).getAbsolutePath();
    // submit user defined compaction on flushed sstable
    CompactionManager.instance.forceUserDefinedCompaction(file);
    // wait until user defined compaction finishes
    do {
      Thread.sleep(100);
    } while (CompactionManager.instance.getPendingTasks() > 0
        || CompactionManager.instance.getActiveCompactions() > 0);
    // CF should have only one sstable with generation number advanced
    sstables = cfs.getSSTables();
    assertEquals(1, sstables.size());
    assertEquals(prevGeneration + 1, sstables.iterator().next().descriptor.generation);
  }
  public List<Row> fetchPage(int pageSize)
      throws RequestValidationException, RequestExecutionException {
    assert command.filter.countCQL3Rows() || command.filter.columns.size() <= pageSize;

    if (isExhausted()) {
      return Collections.<Row>emptyList();
    }

    queried = true;

    return localQuery
        ? Collections.singletonList(command.getRow(Keyspace.open(command.ksName)))
        : StorageProxy.read(Collections.<ReadCommand>singletonList(command), consistencyLevel);
  }
  @Test
  public void testNoExpire() throws ExecutionException, InterruptedException {
    ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1");
    cfs.disableAutoCompaction();
    cfs.metadata.gcGraceSeconds(0);
    long timestamp = System.currentTimeMillis();
    RowMutation rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col7"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1);

    rm.apply();
    cfs.forceBlockingFlush();

    rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1);
    rm.apply();
    cfs.forceBlockingFlush();
    rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col3"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1);
    rm.apply();
    cfs.forceBlockingFlush();
    DecoratedKey noTTLKey = Util.dk("nottl");
    rm = new RowMutation("Keyspace1", noTTLKey.key);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col311"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp);
    rm.apply();
    cfs.forceBlockingFlush();
    Thread.sleep(2000); // wait for ttl to expire
    assertEquals(4, cfs.getSSTables().size());
    cfs.enableAutoCompaction(true);
    assertEquals(1, cfs.getSSTables().size());
    SSTableReader sstable = cfs.getSSTables().iterator().next();
    SSTableScanner scanner = sstable.getScanner(DataRange.allData(sstable.partitioner));
    assertTrue(scanner.hasNext());
    while (scanner.hasNext()) {
      OnDiskAtomIterator iter = scanner.next();
      assertEquals(noTTLKey, iter.getKey());
    }
  }
  @Test
  public void testAggressiveFullyExpired() {
    String KEYSPACE1 = "Keyspace1";
    ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1");
    cfs.disableAutoCompaction();
    cfs.metadata.gcGraceSeconds(0);

    DecoratedKey ttlKey = Util.dk("ttl");
    RowMutation rm = new RowMutation("Keyspace1", ttlKey.key);
    rm.add("Standard1", ByteBufferUtil.bytes("col1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 1, 1);
    rm.add("Standard1", ByteBufferUtil.bytes("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 3, 1);
    rm.applyUnsafe();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KEYSPACE1, ttlKey.key);
    rm.add("Standard1", ByteBufferUtil.bytes("col1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2, 1);
    rm.add("Standard1", ByteBufferUtil.bytes("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 5, 1);
    rm.applyUnsafe();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KEYSPACE1, ttlKey.key);
    rm.add("Standard1", ByteBufferUtil.bytes("col1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 4, 1);
    rm.add("Standard1", ByteBufferUtil.bytes("shadow"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 7, 1);
    rm.applyUnsafe();
    cfs.forceBlockingFlush();

    rm = new RowMutation(KEYSPACE1, ttlKey.key);
    rm.add("Standard1", ByteBufferUtil.bytes("shadow"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 6, 3);
    rm.add("Standard1", ByteBufferUtil.bytes("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 8, 1);
    rm.applyUnsafe();
    cfs.forceBlockingFlush();

    Set<SSTableReader> sstables = Sets.newHashSet(cfs.getSSTables());
    int now = (int) (System.currentTimeMillis() / 1000);
    int gcBefore = now + 2;
    Set<SSTableReader> expired =
        CompactionController.getFullyExpiredSSTables(
            cfs, sstables, Collections.EMPTY_SET, gcBefore);
    assertEquals(2, expired.size());

    cfs.clearUnsafe();
  }
  @Test
  public void testSimpleExpire() throws ExecutionException, InterruptedException {
    ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1");
    cfs.disableAutoCompaction();
    cfs.metadata.gcGraceSeconds(0);
    long timestamp = System.currentTimeMillis();
    RowMutation rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col7"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1);

    rm.apply();
    cfs.forceBlockingFlush();

    rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1);
    rm.apply();
    cfs.forceBlockingFlush();
    rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
    rm.add(
        "Standard1", ByteBufferUtil.bytes("col3"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1);
    rm.apply();
    cfs.forceBlockingFlush();
    rm = new RowMutation("Keyspace1", Util.dk("ttl").key);
    rm.add(
        "Standard1",
        ByteBufferUtil.bytes("col311"),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        timestamp,
        1);
    rm.apply();

    cfs.forceBlockingFlush();
    Thread.sleep(2000); // wait for ttl to expire
    assertEquals(4, cfs.getSSTables().size());
    cfs.enableAutoCompaction(true);
    assertEquals(0, cfs.getSSTables().size());
  }
Exemple #14
0
 public static ColumnFamily getColumnFamily(Keyspace keyspace, DecoratedKey key, String cfName) {
   ColumnFamilyStore cfStore = keyspace.getColumnFamilyStore(cfName);
   assert cfStore != null : "Table " + cfName + " has not been defined";
   return cfStore.getColumnFamily(
       QueryFilter.getIdentityFilter(key, cfName, System.currentTimeMillis()));
 }
  @Test
  @Ignore("making ranges based on the keys, not on the tokens")
  public void testNeedsCleanup() {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore("CF_STANDARD1");
    store.clearUnsafe();

    // disable compaction while flushing
    store.disableAutoCompaction();

    // write three groups of 9 keys: 001, 002, ... 008, 009
    //                               101, 102, ... 108, 109
    //                               201, 202, ... 208, 209
    for (int i = 1; i < 10; i++) {
      insertRowWithKey(i);
      insertRowWithKey(i + 100);
      insertRowWithKey(i + 200);
    }
    store.forceBlockingFlush();

    assertEquals(1, store.getSSTables().size());
    SSTableReader sstable = store.getSSTables().iterator().next();

    // contiguous range spans all data
    assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 209)));
    assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 210)));

    // separate ranges span all data
    assertFalse(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 9,
                100, 109,
                200, 209)));
    assertFalse(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 109,
                200, 210)));
    assertFalse(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 9,
                100, 210)));

    // one range is missing completely
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                100, 109,
                200, 209)));
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 9,
                200, 209)));
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 9,
                100, 109)));

    // the beginning of one range is missing
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                1, 9,
                100, 109,
                200, 209)));
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 9,
                101, 109,
                200, 209)));
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 9,
                100, 109,
                201, 209)));

    // the end of one range is missing
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 8,
                100, 109,
                200, 209)));
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 9,
                100, 108,
                200, 209)));
    assertTrue(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 9,
                100, 109,
                200, 208)));

    // some ranges don't contain any data
    assertFalse(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 0,
                0, 9,
                50, 51,
                100, 109,
                150, 199,
                200, 209,
                300, 301)));
    // same case, but with a middle range not covering some of the existing data
    assertFalse(
        CompactionManager.needsCleanup(
            sstable,
            makeRanges(
                0, 0,
                0, 9,
                50, 51,
                100, 103,
                150, 199,
                200, 209,
                300, 301)));
  }
  public static void main(String args[]) throws IOException {
    Options options = Options.parseArgs(args);
    try {
      // load keyspace descriptions.
      DatabaseDescriptor.loadSchemas(false);

      if (Schema.instance.getCFMetaData(options.keyspace, options.cf) == null)
        throw new IllegalArgumentException(
            String.format("Unknown keyspace/columnFamily %s.%s", options.keyspace, options.cf));

      Keyspace keyspace = Keyspace.openWithoutSSTables(options.keyspace);
      ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(options.cf);

      OutputHandler handler = new OutputHandler.SystemOutput(false, options.debug);
      Directories.SSTableLister lister = cfs.directories.sstableLister();
      if (options.snapshot != null) lister.onlyBackups(true).snapshots(options.snapshot);
      else lister.includeBackups(false);

      Collection<SSTableReader> readers = new ArrayList<SSTableReader>();

      // Upgrade sstables
      for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
        Set<Component> components = entry.getValue();
        if (!components.contains(Component.DATA) || !components.contains(Component.PRIMARY_INDEX))
          continue;

        try {
          SSTableReader sstable =
              SSTableReader.openNoValidation(entry.getKey(), components, cfs.metadata);
          if (sstable.descriptor.version.equals(Descriptor.Version.CURRENT)) continue;
          readers.add(sstable);
        } catch (Exception e) {
          JVMStabilityInspector.inspectThrowable(e);
          System.err.println(String.format("Error Loading %s: %s", entry.getKey(), e.getMessage()));
          if (options.debug) e.printStackTrace(System.err);

          continue;
        }
      }

      int numSSTables = readers.size();
      handler.output("Found " + numSSTables + " sstables that need upgrading.");

      for (SSTableReader sstable : readers) {
        try {
          Upgrader upgrader = new Upgrader(cfs, sstable, handler);
          upgrader.upgrade();

          if (!options.keepSource) {
            // Remove the sstable (it's been copied by upgrade)
            System.out.format("Deleting table %s.%n", sstable.descriptor.baseFilename());
            sstable.markObsolete();
            sstable.selfRef().release();
          }
        } catch (Exception e) {
          System.err.println(String.format("Error upgrading %s: %s", sstable, e.getMessage()));
          if (options.debug) e.printStackTrace(System.err);
        }
      }
      CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES);
      SSTableDeletingTask.waitForDeletions();
      System.exit(0);
    } catch (Exception e) {
      System.err.println(e.getMessage());
      if (options.debug) e.printStackTrace(System.err);
      System.exit(1);
    }
  }
  @Test
  public void testRangeTombstones() throws IOException, ExecutionException, InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard2");
    cfs.clearUnsafe();

    // disable compaction while flushing
    cfs.disableAutoCompaction();

    final CFMetaData cfmeta = cfs.metadata;
    Directories dir = cfs.directories;

    ArrayList<DecoratedKey> keys = new ArrayList<DecoratedKey>();

    for (int i = 0; i < 4; i++) {
      keys.add(Util.dk("" + i));
    }

    ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfmeta);
    cf.addColumn(Util.column("01", "a", 1)); // this must not resurrect
    cf.addColumn(Util.column("a", "a", 3));
    cf.deletionInfo()
        .add(
            new RangeTombstone(
                Util.cellname("0"),
                Util.cellname("b"),
                2,
                (int) (System.currentTimeMillis() / 1000)),
            cfmeta.comparator);

    SSTableWriter writer =
        new SSTableWriter(
            cfs.getTempSSTablePath(dir.getDirectoryForNewSSTables()),
            0,
            0,
            cfs.metadata,
            StorageService.getPartitioner(),
            new MetadataCollector(cfs.metadata.comparator));

    writer.append(Util.dk("0"), cf);
    writer.append(Util.dk("1"), cf);
    writer.append(Util.dk("3"), cf);

    cfs.addSSTable(writer.closeAndOpenReader());
    writer =
        new SSTableWriter(
            cfs.getTempSSTablePath(dir.getDirectoryForNewSSTables()),
            0,
            0,
            cfs.metadata,
            StorageService.getPartitioner(),
            new MetadataCollector(cfs.metadata.comparator));

    writer.append(Util.dk("0"), cf);
    writer.append(Util.dk("1"), cf);
    writer.append(Util.dk("2"), cf);
    writer.append(Util.dk("3"), cf);
    cfs.addSSTable(writer.closeAndOpenReader());

    Collection<SSTableReader> toCompact = cfs.getSSTables();
    assert toCompact.size() == 2;

    // Force compaction on first sstables. Since each row is in only one sstable, we will be using
    // EchoedRow.
    Util.compact(cfs, toCompact);
    assertEquals(1, cfs.getSSTables().size());

    // Now assert we do have the 4 keys
    assertEquals(4, Util.getRangeSlice(cfs).size());

    ArrayList<DecoratedKey> k = new ArrayList<DecoratedKey>();
    for (Row r : Util.getRangeSlice(cfs)) {
      k.add(r.key);
      assertEquals(ByteBufferUtil.bytes("a"), r.cf.getColumn(Util.cellname("a")).value());
      assertNull(r.cf.getColumn(Util.cellname("01")));
      assertEquals(3, r.cf.getColumn(Util.cellname("a")).timestamp());
    }

    for (SSTableReader sstable : cfs.getSSTables()) {
      StatsMetadata stats = sstable.getSSTableMetadata();
      assertEquals(ByteBufferUtil.bytes("0"), stats.minColumnNames.get(0));
      assertEquals(ByteBufferUtil.bytes("b"), stats.maxColumnNames.get(0));
    }

    assertEquals(keys, k);
  }
  @Test
  public void testParallelLeveledCompaction() throws Exception {
    String ksname = "Keyspace1";
    String cfname = "StandardLeveled";
    Keyspace keyspace = Keyspace.open(ksname);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore(cfname);
    store.disableAutoCompaction();

    LeveledCompactionStrategy lcs = (LeveledCompactionStrategy) store.getCompactionStrategy();

    ByteBuffer value =
        ByteBuffer.wrap(new byte[100 * 1024]); // 100 KB value, make it easy to have multiple files

    // Enough data to have a level 1 and 2
    int rows = 128;
    int columns = 10;

    // Adds enough data to trigger multiple sstable per level
    for (int r = 0; r < rows; r++) {
      DecoratedKey key = Util.dk(String.valueOf(r));
      RowMutation rm = new RowMutation(ksname, key.key);
      for (int c = 0; c < columns; c++) {
        rm.add(cfname, ByteBufferUtil.bytes("column" + c), value, 0);
      }
      rm.apply();
      store.forceBlockingFlush();
    }

    // Execute LCS in parallel
    ExecutorService executor =
        new ThreadPoolExecutor(
            4, 4, Long.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>());
    List<Runnable> tasks = new ArrayList<Runnable>();
    while (true) {
      while (true) {
        final AbstractCompactionTask t = lcs.getMaximalTask(Integer.MIN_VALUE);
        if (t == null) break;
        tasks.add(
            new Runnable() {
              public void run() {
                t.execute(null);
              }
            });
      }
      if (tasks.isEmpty()) break;

      List<Future<?>> futures = new ArrayList<Future<?>>(tasks.size());
      for (Runnable r : tasks) futures.add(executor.submit(r));
      FBUtilities.waitOnFutures(futures);

      tasks.clear();
    }

    // Assert all SSTables are lined up correctly.
    LeveledManifest manifest = lcs.manifest;
    int levels = manifest.getLevelCount();
    for (int level = 0; level < levels; level++) {
      List<SSTableReader> sstables = manifest.getLevel(level);
      // score check
      assert (double) SSTable.getTotalBytes(sstables) / manifest.maxBytesForLevel(level) < 1.00;
      // overlap check for levels greater than 0
      if (level > 0) {
        for (SSTableReader sstable : sstables) {
          Set<SSTableReader> overlaps = LeveledManifest.overlapping(sstable, sstables);
          assert overlaps.size() == 1 && overlaps.contains(sstable);
        }
      }
    }
    for (SSTableReader sstable : store.getSSTables()) {
      assert sstable.getSSTableLevel() == sstable.getSSTableLevel();
    }
  }
 public static ColumnFamilyStore newCFS() {
   return newCFS(ks.getName());
 }
public class MockSchema {
  static {
    Memory offsets = Memory.allocate(4);
    offsets.setInt(0, 0);
    indexSummary =
        new IndexSummary(Murmur3Partitioner.instance, offsets, 0, Memory.allocate(4), 0, 0, 0, 1);
  }

  private static final AtomicInteger id = new AtomicInteger();
  public static final Keyspace ks =
      Keyspace.mockKS(KeyspaceMetadata.create("mockks", KeyspaceParams.simpleTransient(1)));

  public static final IndexSummary indexSummary;
  private static final SegmentedFile segmentedFile =
      new BufferedSegmentedFile(
          new ChannelProxy(temp("mocksegmentedfile")), RandomAccessReader.DEFAULT_BUFFER_SIZE, 0);

  public static Memtable memtable(ColumnFamilyStore cfs) {
    return new Memtable(cfs.metadata);
  }

  public static SSTableReader sstable(int generation, ColumnFamilyStore cfs) {
    return sstable(generation, false, cfs);
  }

  public static SSTableReader sstable(int generation, boolean keepRef, ColumnFamilyStore cfs) {
    return sstable(generation, 0, keepRef, cfs);
  }

  public static SSTableReader sstable(int generation, int size, ColumnFamilyStore cfs) {
    return sstable(generation, size, false, cfs);
  }

  public static SSTableReader sstable(
      int generation, int size, boolean keepRef, ColumnFamilyStore cfs) {
    Descriptor descriptor =
        new Descriptor(
            cfs.getDirectories().getDirectoryForNewSSTables(),
            cfs.keyspace.getName(),
            cfs.getColumnFamilyName(),
            generation);
    Set<Component> components =
        ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
    for (Component component : components) {
      File file = new File(descriptor.filenameFor(component));
      try {
        file.createNewFile();
      } catch (IOException e) {
      }
    }
    if (size > 0) {
      try {
        File file = new File(descriptor.filenameFor(Component.DATA));
        try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
          raf.setLength(size);
        }
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
    SerializationHeader header = SerializationHeader.make(cfs.metadata, Collections.emptyList());
    StatsMetadata metadata =
        (StatsMetadata)
            new MetadataCollector(cfs.metadata.comparator)
                .finalizeMetadata(
                    cfs.metadata.partitioner.getClass().getCanonicalName(), 0.01f, -1, header)
                .get(MetadataType.STATS);
    SSTableReader reader =
        SSTableReader.internalOpen(
            descriptor,
            components,
            cfs.metadata,
            segmentedFile.sharedCopy(),
            segmentedFile.sharedCopy(),
            indexSummary.sharedCopy(),
            new AlwaysPresentFilter(),
            1L,
            metadata,
            SSTableReader.OpenReason.NORMAL,
            header);
    reader.first = reader.last = readerBounds(generation);
    if (!keepRef) reader.selfRef().release();
    return reader;
  }

  public static ColumnFamilyStore newCFS() {
    return newCFS(ks.getName());
  }

  public static ColumnFamilyStore newCFS(String ksname) {
    String cfname = "mockcf" + (id.incrementAndGet());
    CFMetaData metadata = newCFMetaData(ksname, cfname);
    return new ColumnFamilyStore(ks, cfname, 0, metadata, new Directories(metadata), false, false);
  }

  private static CFMetaData newCFMetaData(String ksname, String cfname) {
    CFMetaData metadata =
        CFMetaData.Builder.create(ksname, cfname)
            .addPartitionKey("key", UTF8Type.instance)
            .addClusteringColumn("col", UTF8Type.instance)
            .addRegularColumn("value", UTF8Type.instance)
            .withPartitioner(Murmur3Partitioner.instance)
            .build();
    metadata.caching(CachingParams.CACHE_NOTHING);
    return metadata;
  }

  public static BufferDecoratedKey readerBounds(int generation) {
    return new BufferDecoratedKey(
        new Murmur3Partitioner.LongToken(generation), ByteBufferUtil.EMPTY_BYTE_BUFFER);
  }

  private static File temp(String id) {
    try {
      File file = File.createTempFile(id, "tmp");
      file.deleteOnExit();
      return file;
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
  }

  public static void cleanup() {
    // clean up data directory which are stored as data directory/keyspace/data files
    for (String dirName : DatabaseDescriptor.getAllDataFileLocations()) {
      File dir = new File(dirName);
      if (!dir.exists()) continue;
      String[] children = dir.list();
      for (String child : children) FileUtils.deleteRecursive(new File(dir, child));
    }
  }
}
  @Test
  public void testUncheckedTombstoneSizeTieredCompaction() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
    store.clearUnsafe();
    store.metadata.gcGraceSeconds(1);
    store.metadata.compactionStrategyOptions.put("tombstone_compaction_interval", "1");
    store.metadata.compactionStrategyOptions.put("unchecked_tombstone_compaction", "false");
    store.reload();
    store.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getName());

    // disable compaction while flushing
    store.disableAutoCompaction();

    // Populate sstable1 with with keys [0..9]
    populate(KEYSPACE1, CF_STANDARD1, 0, 9, 3); // ttl=3s
    store.forceBlockingFlush();

    // Populate sstable2 with with keys [10..19] (keys do not overlap with SSTable1)
    long timestamp2 = populate(KEYSPACE1, CF_STANDARD1, 10, 19, 3); // ttl=3s
    store.forceBlockingFlush();

    assertEquals(2, store.getSSTables().size());

    Iterator<SSTableReader> it = store.getSSTables().iterator();
    long originalSize1 = it.next().uncompressedLength();
    long originalSize2 = it.next().uncompressedLength();

    // wait enough to force single compaction
    TimeUnit.SECONDS.sleep(5);

    // enable compaction, submit background and wait for it to complete
    store.enableAutoCompaction();
    FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
    while (CompactionManager.instance.getPendingTasks() > 0
        || CompactionManager.instance.getActiveCompactions() > 0) TimeUnit.SECONDS.sleep(1);

    // even though both sstables were candidate for tombstone compaction
    // it was not executed because they have an overlapping token range
    assertEquals(2, store.getSSTables().size());
    it = store.getSSTables().iterator();
    long newSize1 = it.next().uncompressedLength();
    long newSize2 = it.next().uncompressedLength();
    assertEquals(
        "candidate sstable should not be tombstone-compacted because its key range overlap with other sstable",
        originalSize1,
        newSize1);
    assertEquals(
        "candidate sstable should not be tombstone-compacted because its key range overlap with other sstable",
        originalSize2,
        newSize2);

    // now let's enable the magic property
    store.metadata.compactionStrategyOptions.put("unchecked_tombstone_compaction", "true");
    store.reload();

    // submit background task again and wait for it to complete
    FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
    while (CompactionManager.instance.getPendingTasks() > 0
        || CompactionManager.instance.getActiveCompactions() > 0) TimeUnit.SECONDS.sleep(1);

    // we still have 2 sstables, since they were not compacted against each other
    assertEquals(2, store.getSSTables().size());
    it = store.getSSTables().iterator();
    newSize1 = it.next().uncompressedLength();
    newSize2 = it.next().uncompressedLength();
    assertTrue(
        "should be less than " + originalSize1 + ", but was " + newSize1, newSize1 < originalSize1);
    assertTrue(
        "should be less than " + originalSize2 + ", but was " + newSize2, newSize2 < originalSize2);

    // make sure max timestamp of compacted sstables is recorded properly after compaction.
    assertMaxTimestamp(store, timestamp2);
  }