Пример #1
0
 private static void insertRowWithKey(int key) {
   long timestamp = System.currentTimeMillis();
   DecoratedKey decoratedKey = Util.dk(String.format("%03d", key));
   Mutation rm = new Mutation(KEYSPACE1, decoratedKey.getKey());
   rm.add("CF_STANDARD1", Util.cellname("col"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1000);
   rm.applyUnsafe();
 }
Пример #2
0
  @Test
  public void testEchoedRow() {
    // This test check that EchoedRow doesn't skipp rows: see CASSANDRA-2653

    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard2");

    // disable compaction while flushing
    cfs.disableAutoCompaction();

    // Insert 4 keys in two sstables. We need the sstables to have 2 rows
    // at least to trigger what was causing CASSANDRA-2653
    for (int i = 1; i < 5; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      Mutation rm = new Mutation(KEYSPACE1, key.getKey());
      rm.add("Standard2", Util.cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
      rm.applyUnsafe();

      if (i % 2 == 0) cfs.forceBlockingFlush();
    }
    Collection<SSTableReader> toCompact = cfs.getSSTables();
    assertEquals(2, toCompact.size());

    // Reinserting the same keys. We will compact only the previous sstable, but we need those new
    // ones
    // to make sure we use EchoedRow, otherwise it won't be used because purge can be done.
    for (int i = 1; i < 5; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      Mutation rm = new Mutation(KEYSPACE1, key.getKey());
      rm.add("Standard2", Util.cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
      rm.applyUnsafe();
    }
    cfs.forceBlockingFlush();
    SSTableReader tmpSSTable = null;
    for (SSTableReader sstable : cfs.getSSTables())
      if (!toCompact.contains(sstable)) tmpSSTable = sstable;
    assertNotNull(tmpSSTable);

    // Force compaction on first sstables. Since each row is in only one sstable, we will be using
    // EchoedRow.
    Util.compact(cfs, toCompact);
    assertEquals(2, cfs.getSSTables().size());

    // Now, we remove the sstable that was just created to force the use of EchoedRow (so that it
    // doesn't hide the problem)
    cfs.markObsolete(Collections.singleton(tmpSSTable), OperationType.UNKNOWN);
    assertEquals(1, cfs.getSSTables().size());

    // Now assert we do have the 4 keys
    assertEquals(4, Util.getRangeSlice(cfs).size());
  }
Пример #3
0
  private void testDontPurgeAccidentaly(String k, String cfname) throws InterruptedException {
    // This test catches the regression of CASSANDRA-2786
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);

    // disable compaction while flushing
    cfs.clearUnsafe();
    cfs.disableAutoCompaction();

    // Add test row
    DecoratedKey key = Util.dk(k);
    Mutation rm = new Mutation(KEYSPACE1, key.getKey());
    rm.add(
        cfname,
        Util.cellname(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes("c")),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        0);
    rm.applyUnsafe();

    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstablesBefore = cfs.getSSTables();

    QueryFilter filter = QueryFilter.getIdentityFilter(key, cfname, System.currentTimeMillis());
    assertTrue(cfs.getColumnFamily(filter).hasColumns());

    // Remove key
    rm = new Mutation(KEYSPACE1, key.getKey());
    rm.delete(cfname, 2);
    rm.applyUnsafe();

    ColumnFamily cf = cfs.getColumnFamily(filter);
    assertTrue("should be empty: " + cf, cf == null || !cf.hasColumns());

    // Sleep one second so that the removal is indeed purgeable even with gcgrace == 0
    Thread.sleep(1000);

    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstablesAfter = cfs.getSSTables();
    Collection<SSTableReader> toCompact = new ArrayList<SSTableReader>();
    for (SSTableReader sstable : sstablesAfter)
      if (!sstablesBefore.contains(sstable)) toCompact.add(sstable);

    Util.compact(cfs, toCompact);

    cf = cfs.getColumnFamily(filter);
    assertTrue("should be empty: " + cf, cf == null || !cf.hasColumns());
  }
Пример #4
0
 private long populate(String ks, String cf, int startRowKey, int endRowKey, int ttl) {
   long timestamp = System.currentTimeMillis();
   for (int i = startRowKey; i <= endRowKey; i++) {
     DecoratedKey key = Util.dk(Integer.toString(i));
     Mutation rm = new Mutation(ks, key.getKey());
     for (int j = 0; j < 10; j++)
       rm.add(
           cf,
           Util.cellname(Integer.toString(j)),
           ByteBufferUtil.EMPTY_BYTE_BUFFER,
           timestamp,
           j > 0
               ? ttl
               : 0); // let first column never expire, since deleting all columns does not produce
                     // sstable
     rm.applyUnsafe();
   }
   return timestamp;
 }
Пример #5
0
  @Test
  public void testSuperColumnTombstones() throws ExecutionException, InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Super1");
    cfs.disableAutoCompaction();

    DecoratedKey key = Util.dk("tskey");
    ByteBuffer scName = ByteBufferUtil.bytes("TestSuperColumn");

    // a subcolumn
    Mutation rm = new Mutation(KEYSPACE1, key.getKey());
    rm.add(
        "Super1",
        Util.cellname(scName, ByteBufferUtil.bytes(0)),
        ByteBufferUtil.EMPTY_BYTE_BUFFER,
        FBUtilities.timestampMicros());
    rm.applyUnsafe();
    cfs.forceBlockingFlush();

    // shadow the subcolumn with a supercolumn tombstone
    rm = new Mutation(KEYSPACE1, key.getKey());
    rm.deleteRange(
        "Super1",
        SuperColumns.startOf(scName),
        SuperColumns.endOf(scName),
        FBUtilities.timestampMicros());
    rm.applyUnsafe();
    cfs.forceBlockingFlush();

    CompactionManager.instance.performMaximal(cfs);
    assertEquals(1, cfs.getSSTables().size());

    // check that the shadowed column is gone
    SSTableReader sstable = cfs.getSSTables().iterator().next();
    Range keyRange =
        new Range<RowPosition>(key, sstable.partitioner.getMinimumToken().maxKeyBound());
    SSTableScanner scanner = sstable.getScanner(DataRange.forKeyRange(keyRange));
    OnDiskAtomIterator iter = scanner.next();
    assertEquals(key, iter.getKey());
    assertTrue(iter.next() instanceof RangeTombstone);
    assertFalse(iter.hasNext());
  }
Пример #6
0
  @Test
  public void testUserDefinedCompaction() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    final String cfname = "Standard3"; // use clean(no sstable) CF
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);

    // disable compaction while flushing
    cfs.disableAutoCompaction();

    final int ROWS_PER_SSTABLE = 10;
    for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      Mutation rm = new Mutation(KEYSPACE1, key.getKey());
      rm.add(
          cfname,
          Util.cellname("col"),
          ByteBufferUtil.EMPTY_BYTE_BUFFER,
          System.currentTimeMillis());
      rm.applyUnsafe();
    }
    cfs.forceBlockingFlush();
    Collection<SSTableReader> sstables = cfs.getSSTables();

    assertEquals(1, sstables.size());
    SSTableReader sstable = sstables.iterator().next();

    int prevGeneration = sstable.descriptor.generation;
    String file = new File(sstable.descriptor.filenameFor(Component.DATA)).getAbsolutePath();
    // submit user defined compaction on flushed sstable
    CompactionManager.instance.forceUserDefinedCompaction(file);
    // wait until user defined compaction finishes
    do {
      Thread.sleep(100);
    } while (CompactionManager.instance.getPendingTasks() > 0
        || CompactionManager.instance.getActiveCompactions() > 0);
    // CF should have only one sstable with generation number advanced
    sstables = cfs.getSSTables();
    assertEquals(1, sstables.size());
    assertEquals(prevGeneration + 1, sstables.iterator().next().descriptor.generation);
  }
    /**
     * Called (in order) for every row present in the CF. Hashes the row, and adds it to the tree
     * being built.
     *
     * <p>There are four possible cases: 1. Token is greater than range.right (we haven't generated
     * a range for it yet), 2. Token is less than/equal to range.left (the range was valid), 3.
     * Token is contained in the range (the range is in progress), 4. No more invalid ranges exist.
     *
     * <p>TODO: Because we only validate completely empty trees at the moment, we do not bother
     * dealing with case 2 and case 4 should result in an error.
     *
     * <p>Additionally, there is a special case for the minimum token, because although it sorts
     * first, it is contained in the last possible range.
     *
     * @param row The row.
     */
    public void add(AbstractCompactedRow row) {
      assert request.range.contains(row.key.token)
          : row.key.token + " is not contained in " + request.range;
      assert lastKey == null || lastKey.compareTo(row.key) < 0
          : "row " + row.key + " received out of order wrt " + lastKey;
      lastKey = row.key;

      if (range == null) range = ranges.next();

      // generate new ranges as long as case 1 is true
      while (!range.contains(row.key.token)) {
        // add the empty hash, and move to the next range
        range.addHash(EMPTY_ROW);
        range = ranges.next();
      }

      // case 3 must be true: mix in the hashed row
      range.addHash(rowHash(row));
    }
Пример #8
0
  /**
   * Retrieves a local subBlock
   *
   * @param blockId row key
   * @param sblockId SubBlock column name
   * @param offset inside the sblock
   * @return a local sublock
   * @throws TException
   */
  private LocalBlock getLocalSubBlock(
      String subBlockCFName, ByteBuffer blockId, ByteBuffer sblockId, int offset)
      throws TException {
    DecoratedKey<Token<?>> decoratedKey =
        new DecoratedKey<Token<?>>(StorageService.getPartitioner().getToken(blockId), blockId);

    Table table = Table.open(cfsKeyspace);
    ColumnFamilyStore sblockStore = table.getColumnFamilyStore(subBlockCFName);

    Collection<SSTableReader> sstables = sblockStore.getSSTables();

    for (SSTableReader sstable : sstables) {

      long position = sstable.getPosition(decoratedKey, Operator.EQ);

      if (position == -1) continue;

      String filename = sstable.descriptor.filenameFor(Component.DATA);
      RandomAccessFile raf = null;
      int mappedLength = -1;
      MappedByteBuffer mappedData = null;
      MappedFileDataInput file = null;
      try {
        raf = new RandomAccessFile(filename, "r");
        assert position < raf.length();

        mappedLength =
            (raf.length() - position) < Integer.MAX_VALUE
                ? (int) (raf.length() - position)
                : Integer.MAX_VALUE;

        mappedData = raf.getChannel().map(FileChannel.MapMode.READ_ONLY, position, mappedLength);

        file = new MappedFileDataInput(mappedData, filename, 0);

        if (file == null) continue;

        // Verify key was found in data file
        DecoratedKey keyInDisk =
            SSTableReader.decodeKey(
                sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(file));
        assert keyInDisk.equals(decoratedKey)
            : String.format("%s != %s in %s", keyInDisk, decoratedKey, file.getPath());

        long rowSize = SSTableReader.readRowSize(file, sstable.descriptor);

        assert rowSize > 0;
        assert rowSize < mappedLength;

        Filter bf = IndexHelper.defreezeBloomFilter(file, sstable.descriptor.usesOldBloomFilter);

        // verify this column in in this version of the row.
        if (!bf.isPresent(sblockId)) continue;

        List<IndexHelper.IndexInfo> indexList = IndexHelper.deserializeIndex(file);

        // we can stop early if bloom filter says none of the
        // columns actually exist -- but,
        // we can't stop before initializing the cf above, in
        // case there's a relevant tombstone
        ColumnFamilySerializer serializer = ColumnFamily.serializer();
        try {
          ColumnFamily cf =
              serializer.deserializeFromSSTableNoColumns(
                  ColumnFamily.create(sstable.metadata), file);

          if (cf.isMarkedForDelete()) continue;

        } catch (Exception e) {
          e.printStackTrace();

          throw new IOException(
              serializer
                  + " failed to deserialize "
                  + sstable.getColumnFamilyName()
                  + " with "
                  + sstable.metadata
                  + " from "
                  + file,
              e);
        }

        Integer sblockLength = null;

        if (indexList == null) sblockLength = seekToSubColumn(sstable.metadata, file, sblockId);
        else sblockLength = seekToSubColumn(sstable.metadata, file, sblockId, indexList);

        if (sblockLength == null || sblockLength < 0) continue;

        int bytesReadFromStart = mappedLength - (int) file.bytesRemaining();

        if (logger.isDebugEnabled())
          logger.debug("BlockLength = " + sblockLength + " Availible " + file.bytesRemaining());

        assert offset <= sblockLength : String.format("%d > %d", offset, sblockLength);

        long dataOffset = position + bytesReadFromStart;

        if (file.bytesRemaining() == 0 || sblockLength == 0) continue;

        return new LocalBlock(file.getPath(), dataOffset + offset, sblockLength - offset);

      } catch (IOException e) {
        throw new TException(e);
      } finally {
        FileUtils.closeQuietly(raf);
      }
    }

    return null;
  }
Пример #9
0
  public void collectReducedColumns(
      ColumnFamily container,
      Iterator<Cell> reducedColumns,
      DecoratedKey key,
      int gcBefore,
      long now) {
    columnCounter = columnCounter(container.getComparator(), now);
    DeletionInfo.InOrderTester tester = container.deletionInfo().inOrderTester(reversed);

    while (reducedColumns.hasNext()) {
      Cell cell = reducedColumns.next();

      if (logger.isTraceEnabled())
        logger.trace(
            "collecting {} of {}: {}",
            columnCounter.live(),
            count,
            cell.getString(container.getComparator()));

      // An expired tombstone will be immediately discarded in memory, and needn't be counted.
      // Neither should be any cell shadowed by a range- or a partition tombstone.
      if (cell.getLocalDeletionTime() < gcBefore || !columnCounter.count(cell, tester)) continue;

      if (columnCounter.live() > count) break;

      if (respectTombstoneThresholds()
          && columnCounter.tombstones() > DatabaseDescriptor.getTombstoneFailureThreshold()) {
        Tracing.trace(
            "Scanned over {} tombstones; query aborted (see tombstone_failure_threshold); slices={}",
            DatabaseDescriptor.getTombstoneFailureThreshold(),
            getSlicesInfo(container));

        throw new TombstoneOverwhelmingException(
            columnCounter.tombstones(),
            count,
            container.metadata().ksName,
            container.metadata().cfName,
            container.getComparator().getString(cell.name()),
            getSlicesInfo(container));
      }

      container.appendColumn(cell);
    }

    boolean warnTombstones =
        logger.isWarnEnabled()
            && respectTombstoneThresholds()
            && columnCounter.tombstones() > DatabaseDescriptor.getTombstoneWarnThreshold();
    if (warnTombstones) {
      String msg =
          String.format(
              "Read %d live and %d tombstone cells in %s.%s for key: %1.512s (see tombstone_warn_threshold). %d columns were requested, slices=%1.512s",
              columnCounter.live(),
              columnCounter.tombstones(),
              container.metadata().ksName,
              container.metadata().cfName,
              container.metadata().getKeyValidator().getString(key.getKey()),
              count,
              getSlicesInfo(container));
      ClientWarn.warn(msg);
      logger.warn(msg);
    }
    Tracing.trace(
        "Read {} live and {} tombstone cells{}",
        columnCounter.live(),
        columnCounter.tombstones(),
        warnTombstones ? " (see tombstone_warn_threshold)" : "");
  }