Exemplo n.º 1
0
 public Collection<SSTableReader> flush() {
   long estimatedSize = estimatedSize();
   Directories.DataDirectory dataDirectory =
       cfs.getDirectories().getWriteableLocation(estimatedSize);
   if (dataDirectory == null)
     throw new RuntimeException("Insufficient disk space to write " + estimatedSize + " bytes");
   File sstableDirectory = cfs.getDirectories().getLocationForDisk(dataDirectory);
   assert sstableDirectory != null : "Flush task is not bound to any disk";
   return writeSortedContents(sstableDirectory);
 }
Exemplo n.º 2
0
  /**
   * Writes out a bunch of mutations for a single column family.
   *
   * @param mutations A group of Mutations for the same keyspace and column family.
   * @return The ColumnFamilyStore that was used.
   */
  public static ColumnFamilyStore writeColumnFamily(List<Mutation> mutations) {
    IMutation first = mutations.get(0);
    String keyspaceName = first.getKeyspaceName();
    UUID cfid = first.getColumnFamilyIds().iterator().next();

    for (Mutation rm : mutations) rm.applyUnsafe();

    ColumnFamilyStore store = Keyspace.open(keyspaceName).getColumnFamilyStore(cfid);
    store.forceBlockingFlush();
    return store;
  }
Exemplo n.º 3
0
  /**
   * Writes out a bunch of rows for a single column family.
   *
   * @param rows A group of RowMutations for the same table and column family.
   * @return The ColumnFamilyStore that was used.
   */
  public static ColumnFamilyStore writeColumnFamily(List<IMutation> rms)
      throws IOException, ExecutionException, InterruptedException {
    IMutation first = rms.get(0);
    String tablename = first.getTable();
    UUID cfid = first.getColumnFamilyIds().iterator().next();

    for (IMutation rm : rms) rm.apply();

    ColumnFamilyStore store = Table.open(tablename).getColumnFamilyStore(cfid);
    store.forceBlockingFlush();
    return store;
  }
Exemplo n.º 4
0
  @SuppressWarnings("resource") // log and writer closed by SSTableTxnWriter
  public SSTableTxnWriter createFlushWriter(
      String filename, PartitionColumns columns, EncodingStats stats) {
    // we operate "offline" here, as we expose the resulting reader consciously when done
    // (although we may want to modify this behaviour in future, to encapsulate full flush behaviour
    // in LifecycleTransaction)
    LifecycleTransaction txn = null;
    try {
      txn = LifecycleTransaction.offline(OperationType.FLUSH);
      MetadataCollector sstableMetadataCollector =
          new MetadataCollector(cfs.metadata.comparator)
              .commitLogIntervals(
                  new IntervalSet(commitLogLowerBound.get(), commitLogUpperBound.get()));

      return new SSTableTxnWriter(
          txn,
          cfs.createSSTableMultiWriter(
              Descriptor.fromFilename(filename),
              (long) partitions.size(),
              ActiveRepairService.UNREPAIRED_SSTABLE,
              sstableMetadataCollector,
              new SerializationHeader(true, cfs.metadata, columns, stats),
              txn));
    } catch (Throwable t) {
      if (txn != null) txn.close();
      throw t;
    }
  }
Exemplo n.º 5
0
  public MemtableUnfilteredPartitionIterator makePartitionIterator(
      final ColumnFilter columnFilter, final DataRange dataRange, final boolean isForThrift) {
    AbstractBounds<PartitionPosition> keyRange = dataRange.keyRange();

    boolean startIsMin = keyRange.left.isMinimum();
    boolean stopIsMin = keyRange.right.isMinimum();

    boolean isBound = keyRange instanceof Bounds;
    boolean includeStart = isBound || keyRange instanceof IncludingExcludingBounds;
    boolean includeStop = isBound || keyRange instanceof Range;
    Map<PartitionPosition, AtomicBTreePartition> subMap;
    if (startIsMin)
      subMap = stopIsMin ? partitions : partitions.headMap(keyRange.right, includeStop);
    else
      subMap =
          stopIsMin
              ? partitions.tailMap(keyRange.left, includeStart)
              : partitions.subMap(keyRange.left, includeStart, keyRange.right, includeStop);

    int minLocalDeletionTime = Integer.MAX_VALUE;

    // avoid iterating over the memtable if we purge all tombstones
    if (cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones())
      minLocalDeletionTime = findMinLocalDeletionTime(subMap.entrySet().iterator());

    final Iterator<Map.Entry<PartitionPosition, AtomicBTreePartition>> iter =
        subMap.entrySet().iterator();

    return new MemtableUnfilteredPartitionIterator(
        cfs, iter, isForThrift, minLocalDeletionTime, columnFilter, dataRange);
  }
  @Test
  public void testGetSliceWithCollision() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    cfs.clearUnsafe();

    insert("k1", "k2", "k3"); // token = 2
    insert("key1", "key2", "key3"); // token = 4
    insert("longKey1", "longKey2"); // token = 8

    List<Row> rows =
        cfs.getRangeSlice(
            new Bounds<RowPosition>(dk("k2"), dk("key2")), null, new IdentityQueryFilter(), 10000);
    assert rows.size() == 4 : "Expecting 4 keys, got " + rows.size();
    assert rows.get(0).key.key.equals(ByteBufferUtil.bytes("k2"));
    assert rows.get(1).key.key.equals(ByteBufferUtil.bytes("k3"));
    assert rows.get(2).key.key.equals(ByteBufferUtil.bytes("key1"));
    assert rows.get(3).key.key.equals(ByteBufferUtil.bytes("key2"));
  }
Exemplo n.º 7
0
 public static List<Row> getRangeSlice(ColumnFamilyStore cfs, ByteBuffer superColumn)
     throws IOException, ExecutionException, InterruptedException {
   Token min = StorageService.getPartitioner().getMinimumToken();
   return cfs.getRangeSlice(
       superColumn,
       new Bounds<Token>(min, min).toRowBounds(),
       10000,
       new IdentityQueryFilter(),
       null);
 }
Exemplo n.º 8
0
  public static List<Row> getRangeSlice(ColumnFamilyStore cfs, ByteBuffer superColumn) {
    IDiskAtomFilter filter =
        superColumn == null
            ? new IdentityQueryFilter()
            : new SliceQueryFilter(
                SuperColumns.startOf(superColumn),
                SuperColumns.endOf(superColumn),
                false,
                Integer.MAX_VALUE);

    Token min = StorageService.getPartitioner().getMinimumToken();
    return cfs.getRangeSlice(Bounds.makeRowBounds(min, min), null, filter, 10000);
  }
Exemplo n.º 9
0
  private Collection<SSTableReader> writeSortedContents(File sstableDirectory) {
    boolean isBatchLogTable =
        cfs.name.equals(SystemKeyspace.BATCHES)
            && cfs.keyspace.getName().equals(SystemKeyspace.NAME);

    logger.debug("Writing {}", Memtable.this.toString());

    Collection<SSTableReader> ssTables;
    try (SSTableTxnWriter writer =
        createFlushWriter(
            cfs.getSSTablePath(sstableDirectory), columnsCollector.get(), statsCollector.get())) {
      boolean trackContention = logger.isTraceEnabled();
      int heavilyContendedRowCount = 0;
      // (we can't clear out the map as-we-go to free up memory,
      //  since the memtable is being used for queries in the "pending flush" category)
      for (AtomicBTreePartition partition : partitions.values()) {
        // Each batchlog partition is a separate entry in the log. And for an entry, we only do 2
        // operations: 1) we insert the entry and 2) we delete it. Further, BL data is strictly
        // local,
        // we don't need to preserve tombstones for repair. So if both operation are in this
        // memtable (which will almost always be the case if there is no ongoing failure), we can
        // just skip the entry (CASSANDRA-4667).
        if (isBatchLogTable && !partition.partitionLevelDeletion().isLive() && partition.hasRows())
          continue;

        if (trackContention && partition.usePessimisticLocking()) heavilyContendedRowCount++;

        if (!partition.isEmpty()) {
          try (UnfilteredRowIterator iter = partition.unfilteredIterator()) {
            writer.append(iter);
          }
        }
      }

      if (writer.getFilePointer() > 0) {
        logger.debug(
            String.format(
                "Completed flushing %s (%s) for commitlog position %s",
                writer.getFilename(),
                FBUtilities.prettyPrintMemory(writer.getFilePointer()),
                commitLogUpperBound));

        // sstables should contain non-repaired data.
        ssTables = writer.finish(true);
      } else {
        logger.debug(
            "Completed flushing {}; nothing needed to be retained.  Commitlog position was {}",
            writer.getFilename(),
            commitLogUpperBound);
        writer.abort();
        ssTables = Collections.emptyList();
      }

      if (heavilyContendedRowCount > 0)
        logger.trace(
            String.format(
                "High update contention in %d/%d partitions of %s ",
                heavilyContendedRowCount, partitions.size(), Memtable.this.toString()));

      return ssTables;
    }
  }
Exemplo n.º 10
0
 public static NamesQueryFilter namesFilter(ColumnFamilyStore cfs, String... names) {
   SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
   for (String str : names) s.add(cellname(str));
   return new NamesQueryFilter(s);
 }
Exemplo n.º 11
0
 public static QueryFilter namesQueryFilter(
     ColumnFamilyStore cfs, DecoratedKey key, CellName... names) {
   SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
   for (CellName n : names) s.add(n);
   return QueryFilter.getNamesFilter(key, cfs.name, s, System.currentTimeMillis());
 }
Exemplo n.º 12
0
 public static void compact(ColumnFamilyStore cfs, Collection<SSTableReader> sstables) {
   int gcBefore = cfs.gcBefore(System.currentTimeMillis());
   AbstractCompactionTask task =
       cfs.getCompactionStrategy().getUserDefinedTask(sstables, gcBefore);
   task.execute(null);
 }
Exemplo n.º 13
0
 public static Future<?> compactAll(ColumnFamilyStore cfs, int gcBefore) {
   List<Descriptor> descriptors = new ArrayList<>();
   for (SSTableReader sstable : cfs.getSSTables()) descriptors.add(sstable.descriptor);
   return CompactionManager.instance.submitUserDefined(cfs, descriptors, gcBefore);
 }
Exemplo n.º 14
0
 public static ColumnFamily cloneAndRemoveDeleted(ColumnFamily cf, int gcBefore) {
   return ColumnFamilyStore.removeDeleted(cf.cloneMe(), gcBefore);
 }
Exemplo n.º 15
0
 public static ColumnFamily getColumnFamily(Keyspace keyspace, DecoratedKey key, String cfName) {
   ColumnFamilyStore cfStore = keyspace.getColumnFamilyStore(cfName);
   assert cfStore != null : "Table " + cfName + " has not been defined";
   return cfStore.getColumnFamily(
       QueryFilter.getIdentityFilter(key, cfName, System.currentTimeMillis()));
 }
Exemplo n.º 16
0
 public static ColumnFamily getColumnFamily(Table table, DecoratedKey key, String cfName)
     throws IOException {
   ColumnFamilyStore cfStore = table.getColumnFamilyStore(cfName);
   assert cfStore != null : "Column family " + cfName + " has not been defined";
   return cfStore.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
 }