Exemple #1
0
  private Integer seekToSubColumn(
      CFMetaData metadata,
      FileDataInput file,
      ByteBuffer sblockId,
      List<IndexHelper.IndexInfo> indexList)
      throws IOException {
    file.readInt(); // column count

    /* get the various column ranges we have to read */
    AbstractType comparator = metadata.comparator;

    int index = IndexHelper.indexFor(sblockId, indexList, comparator, false);
    if (index == indexList.size()) return null;

    IndexHelper.IndexInfo indexInfo = indexList.get(index);
    if (comparator.compare(sblockId, indexInfo.firstName) < 0) return null;

    FileMark mark = file.mark();

    FileUtils.skipBytesFully(file, indexInfo.offset);

    while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width) {
      Integer dataLength = isSubBlockFound(metadata, file, sblockId);

      if (dataLength == null) return null;

      if (dataLength < 0) continue;

      return dataLength;
    }

    return null;
  }
  private int discardHead(
      ColumnFamily cf,
      int toDiscard,
      ColumnFamily copy,
      Iterator<Column> iter,
      DeletionInfo.InOrderTester tester) {
    ColumnCounter counter = columnCounter();

    List<Column> staticColumns = new ArrayList<>(cfm.staticColumns().size());

    // Discard the first 'toDiscard' live, non-static columns
    while (iter.hasNext()) {
      Column c = iter.next();

      // if it's a static column, don't count it and save it to add to the trimmed results
      ColumnDefinition columnDef = cfm.getColumnDefinitionFromColumnName(c.name());
      if (columnDef != null && columnDef.type == ColumnDefinition.Type.STATIC) {
        staticColumns.add(c);
        continue;
      }

      counter.count(c, tester);

      // once we've discarded the required amount, add the rest
      if (counter.live() > toDiscard) {
        for (Column staticColumn : staticColumns) copy.addColumn(staticColumn);

        copy.addColumn(c);
        while (iter.hasNext()) copy.addColumn(iter.next());
      }
    }
    return Math.min(counter.live(), toDiscard);
  }
  public List<Row> fetchPage(int pageSize)
      throws RequestValidationException, RequestExecutionException {
    if (isExhausted()) return Collections.emptyList();

    int currentPageSize = nextPageSize(pageSize);
    List<Row> rows = filterEmpty(queryNextPage(currentPageSize, consistencyLevel, localQuery));

    if (rows.isEmpty()) {
      logger.debug("Got empty set of rows, considering pager exhausted");
      exhausted = true;
      return Collections.emptyList();
    }

    int liveCount = getPageLiveCount(rows);
    logger.debug("Fetched {} live rows", liveCount);

    // Because SP.getRangeSlice doesn't trim the result (see SP.trim()), liveCount may be greater
    // than what asked
    // (currentPageSize). This would throw off the paging logic so we trim the excess. It's not
    // extremely efficient
    // but most of the time there should be nothing or very little to trim.
    if (liveCount > currentPageSize) {
      rows = discardLast(rows, liveCount - currentPageSize);
      liveCount = currentPageSize;
    }

    remaining -= liveCount;

    // If we've got less than requested, there is no more query to do (but
    // we still need to return the current page)
    if (liveCount < currentPageSize) {
      logger.debug(
          "Got result ({}) smaller than page size ({}), considering pager exhausted",
          liveCount,
          currentPageSize);
      exhausted = true;
    }

    // If it's not the first query and the first column is the last one returned (likely
    // but not certain since paging can race with deletes/expiration), then remove the
    // first column.
    if (containsPreviousLast(rows.get(0))) {
      rows = discardFirst(rows);
      remaining++;
    }
    // Otherwise, if 'lastWasRecorded', we queried for one more than the page size,
    // so if the page is full, trim the last entry
    else if (lastWasRecorded && !exhausted) {
      // We've asked for one more than necessary
      rows = discardLast(rows);
      remaining++;
    }

    logger.debug("Remaining rows to page: {}", remaining);

    if (!isExhausted()) lastWasRecorded = recordLast(rows.get(rows.size() - 1));

    return rows;
  }
Exemple #4
0
  public KsDef toThrift() {
    List<CfDef> cfDefs = new ArrayList<CfDef>();
    for (CFMetaData cfm : cfMetaData().values()) cfDefs.add(cfm.toThrift());
    KsDef ksdef = new KsDef(name, strategyClass.getName(), cfDefs);
    ksdef.setStrategy_options(strategyOptions);
    ksdef.setDurable_writes(durableWrites);

    return ksdef;
  }
  @VisibleForTesting
  List<Row> discardFirst(List<Row> rows, int toDiscard) {
    if (toDiscard == 0 || rows.isEmpty()) return rows;

    int i = 0;
    DecoratedKey firstKey = null;
    ColumnFamily firstCf = null;
    while (toDiscard > 0 && i < rows.size()) {
      Row first = rows.get(i++);
      firstKey = first.key;
      firstCf = first.cf.cloneMeShallow(isReversed());
      toDiscard -=
          isReversed()
              ? discardLast(first.cf, toDiscard, firstCf)
              : discardFirst(first.cf, toDiscard, firstCf);
    }

    // If there is less live data than to discard, all is discarded
    if (toDiscard > 0) return Collections.<Row>emptyList();

    // i is the index of the first row that we are sure to keep. On top of that,
    // we also keep firstCf is it hasn't been fully emptied by the last iteration above.
    int count = firstCf.getColumnCount();
    int newSize = rows.size() - (count == 0 ? i : i - 1);
    List<Row> newRows = new ArrayList<Row>(newSize);
    if (count != 0) newRows.add(new Row(firstKey, firstCf));
    newRows.addAll(rows.subList(i, rows.size()));

    return newRows;
  }
  public boolean shouldInclude(SSTableReader sstable) {
    List<ByteBuffer> minColumnNames = sstable.getSSTableMetadata().minColumnNames;
    List<ByteBuffer> maxColumnNames = sstable.getSSTableMetadata().maxColumnNames;
    CellNameType comparator = sstable.metadata.comparator;

    if (minColumnNames.isEmpty() || maxColumnNames.isEmpty()) return true;

    for (ColumnSlice slice : slices)
      if (slice.intersects(minColumnNames, maxColumnNames, comparator, reversed)) return true;

    return false;
  }
Exemple #7
0
  private List<String> getKeyLocations(ByteBuffer key) {
    List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(cfsKeyspace, key);
    DatabaseDescriptor.getEndpointSnitch()
        .sortByProximity(FBUtilities.getLocalAddress(), endpoints);

    List<String> hosts = new ArrayList<String>(endpoints.size());

    for (InetAddress endpoint : endpoints) {
      hosts.add(endpoint.getHostName());
    }

    return hosts;
  }
  public ColumnFamily updateForKey(
      ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params)
      throws InvalidRequestException {
    CFDefinition cfDef = cfm.getCfDef();
    ColumnFamily cf = UnsortedColumns.factory.create(cfm);

    // Inserting the CQL row marker (see #4361)
    // We always need to insert a marker, because of the following situation:
    //   CREATE TABLE t ( k int PRIMARY KEY, c text );
    //   INSERT INTO t(k, c) VALUES (1, 1)
    //   DELETE c FROM t WHERE k = 1;
    //   SELECT * FROM t;
    // The last query should return one row (but with c == null). Adding
    // the marker with the insert make sure the semantic is correct (while making sure a
    // 'DELETE FROM t WHERE k = 1' does remove the row entirely)
    //
    // We never insert markers for Super CF as this would confuse the thrift side.
    if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper()) {
      ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build();
      cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER));
    }

    List<Operation> updates = getOperations();

    if (cfDef.isCompact) {
      if (builder.componentCount() == 0)
        throw new InvalidRequestException(
            String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next()));

      if (cfDef.value == null) {
        // compact + no compact value implies there is no column outside the PK. So no operation
        // could
        // have passed through validation
        assert updates.isEmpty();
        setToEmptyOperation.execute(key, cf, builder.copy(), params);
      } else {
        // compact means we don't have a row marker, so don't accept to set only the PK. See
        // CASSANDRA-5648.
        if (updates.isEmpty())
          throw new InvalidRequestException(
              String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value));

        for (Operation update : updates) update.execute(key, cf, builder.copy(), params);
      }
    } else {
      for (Operation update : updates) update.execute(key, cf, builder.copy(), params);
    }

    return cf;
  }
  private List<Row> filterEmpty(List<Row> result) {
    for (Row row : result) {
      if (row.cf == null || row.cf.getColumnCount() == 0) {
        List<Row> newResult = new ArrayList<Row>(result.size() - 1);
        for (Row row2 : result) {
          if (row2.cf == null || row2.cf.getColumnCount() == 0) continue;

          newResult.add(row2);
        }
        return newResult;
      }
    }
    return result;
  }
Exemple #10
0
  private IColumn validateAndGetColumn(List<Row> rows, ByteBuffer columnName)
      throws NotFoundException {
    if (rows.isEmpty()) throw new NotFoundException();

    if (rows.size() > 1) throw new RuntimeException("Block id returned more than one row");

    Row row = rows.get(0);
    if (row.cf == null) throw new NotFoundException();

    IColumn col = row.cf.getColumn(columnName);

    if (col == null || !col.isLive()) throw new NotFoundException();

    return col;
  }
  private List<SuperColumn> thriftifySuperColumns(
      Collection<IColumn> columns, boolean reverseOrder) {
    if (columns == null || columns.isEmpty()) {
      return EMPTY_SUPERCOLUMNS;
    }

    ArrayList<SuperColumn> thriftSuperColumns = new ArrayList<SuperColumn>(columns.size());
    for (IColumn column : columns) {
      List<Column> subcolumns = thriftifyColumns(column.getSubColumns());
      if (subcolumns.isEmpty()) {
        continue;
      }
      thriftSuperColumns.add(new SuperColumn(column.name(), subcolumns));
    }

    if (reverseOrder) Collections.reverse(thriftSuperColumns);

    return thriftSuperColumns;
  }
Exemple #12
0
  public List<List<String>> describe_keys(String keyspace, List<ByteBuffer> keys)
      throws TException {
    List<List<String>> keyEndpoints = new ArrayList<List<String>>(keys.size());

    for (ByteBuffer key : keys) {
      keyEndpoints.add(getKeyLocations(key));
    }

    return keyEndpoints;
  }
  /**
   * Splits this filter into two SliceQueryFilters: one that slices only the static columns, and one
   * that slices the remainder of the normal data.
   *
   * <p>This should only be called when the filter is reversed and the filter is known to cover
   * static columns (through hasStaticSlice()).
   *
   * @return a pair of (static, normal) SliceQueryFilters
   */
  public Pair<SliceQueryFilter, SliceQueryFilter> splitOutStaticSlice(CFMetaData cfm) {
    assert reversed;

    Composite staticSliceEnd = cfm.comparator.staticPrefix().end();
    List<ColumnSlice> nonStaticSlices = new ArrayList<>(slices.length);
    for (ColumnSlice slice : slices) {
      if (sliceIncludesStatics(slice, cfm))
        nonStaticSlices.add(new ColumnSlice(slice.start, staticSliceEnd));
      else nonStaticSlices.add(slice);
    }

    return Pair.create(
        new SliceQueryFilter(staticSliceEnd, Composites.EMPTY, true, count, compositesToGroup),
        new SliceQueryFilter(
            nonStaticSlices.toArray(new ColumnSlice[nonStaticSlices.size()]),
            true,
            count,
            compositesToGroup));
  }
Exemple #14
0
  /**
   * Writes out a bunch of mutations for a single column family.
   *
   * @param mutations A group of Mutations for the same keyspace and column family.
   * @return The ColumnFamilyStore that was used.
   */
  public static ColumnFamilyStore writeColumnFamily(List<Mutation> mutations) {
    IMutation first = mutations.get(0);
    String keyspaceName = first.getKeyspaceName();
    UUID cfid = first.getColumnFamilyIds().iterator().next();

    for (Mutation rm : mutations) rm.applyUnsafe();

    ColumnFamilyStore store = Keyspace.open(keyspaceName).getColumnFamilyStore(cfid);
    store.forceBlockingFlush();
    return store;
  }
Exemple #15
0
 @Override
 public List<Row> search(
     List<IndexExpression> clause,
     AbstractBounds<RowPosition> range,
     int maxResults,
     IFilter dataFilter,
     boolean maxIsColumns) {
   assert clause != null && !clause.isEmpty();
   ExtendedFilter filter =
       ExtendedFilter.create(baseCfs, dataFilter, clause, maxResults, maxIsColumns, false);
   return baseCfs.filter(getIndexedIterator(range, filter), filter);
 }
Exemple #16
0
    protected ModificationStatement prepareInternal(
        CFDefinition cfDef, VariableSpecifications boundNames, Attributes attrs)
        throws InvalidRequestException {
      UpdateStatement stmt = new UpdateStatement(boundNames.size(), cfDef.cfm, attrs);

      // Created from an INSERT
      if (stmt.isCounter())
        throw new InvalidRequestException(
            "INSERT statement are not allowed on counter tables, use UPDATE instead");
      if (columnNames.size() != columnValues.size())
        throw new InvalidRequestException("Unmatched column names/values");
      if (columnNames.isEmpty()) throw new InvalidRequestException("No columns provided to INSERT");

      for (int i = 0; i < columnNames.size(); i++) {
        CFDefinition.Name name = cfDef.get(columnNames.get(i));
        if (name == null)
          throw new InvalidRequestException(
              String.format("Unknown identifier %s", columnNames.get(i)));

        for (int j = 0; j < i; j++)
          if (name.name.equals(columnNames.get(j)))
            throw new InvalidRequestException(
                String.format("Multiple definitions found for column %s", name));

        Term.Raw value = columnValues.get(i);

        switch (name.kind) {
          case KEY_ALIAS:
          case COLUMN_ALIAS:
            Term t = value.prepare(name);
            t.collectMarkerSpecification(boundNames);
            stmt.addKeyValue(name.name, t);
            break;
          case VALUE_ALIAS:
          case COLUMN_METADATA:
            Operation operation = new Operation.SetValue(value).prepare(name);
            operation.collectMarkerSpecification(boundNames);
            stmt.addOperation(operation);
            break;
        }
      }
      return stmt;
    }
  public SliceQueryFilter withUpdatedStart(Composite newStart, CFMetaData cfm) {
    Comparator<Composite> cmp = reversed ? cfm.comparator.reverseComparator() : cfm.comparator;

    // Check our slices to see if any fall before the new start (in which case they can be removed)
    // or
    // if they contain the new start (in which case they should start from the page start).
    // However, if the
    // slices would include static columns, we need to ensure they are also fetched, and so a
    // separate
    // slice for the static columns may be required.
    // Note that if the query is reversed, we can't handle statics by simply adding a separate slice
    // here, so
    // the reversed case is handled by SliceFromReadCommand instead. See CASSANDRA-8502 for more
    // details.
    List<ColumnSlice> newSlices = new ArrayList<>();
    boolean pastNewStart = false;
    for (ColumnSlice slice : slices) {
      if (pastNewStart) {
        newSlices.add(slice);
        continue;
      }

      if (slice.isBefore(cmp, newStart)) {
        if (!reversed && sliceIncludesStatics(slice, cfm))
          newSlices.add(new ColumnSlice(Composites.EMPTY, cfm.comparator.staticPrefix().end()));

        continue;
      } else if (slice.includes(cmp, newStart)) {
        if (!reversed && sliceIncludesStatics(slice, cfm) && !newStart.isEmpty())
          newSlices.add(new ColumnSlice(Composites.EMPTY, cfm.comparator.staticPrefix().end()));

        newSlices.add(new ColumnSlice(newStart, slice.finish));
      } else {
        newSlices.add(slice);
      }

      pastNewStart = true;
    }
    return withUpdatedSlices(newSlices.toArray(new ColumnSlice[newSlices.size()]));
  }
 public List<String> getActiveSegmentNames() {
   List<String> segmentNames = new ArrayList<>();
   for (CommitLogSegment segment : allocator.getActiveSegments())
     segmentNames.add(segment.getName());
   return segmentNames;
 }
Exemple #19
0
  /** Creates initial set of nodes and tokens. Nodes are added to StorageService as 'normal' */
  public static void createInitialRing(
      StorageService ss,
      IPartitioner partitioner,
      List<Token> endpointTokens,
      List<Token> keyTokens,
      List<InetAddress> hosts,
      List<UUID> hostIds,
      int howMany)
      throws UnknownHostException {
    // Expand pool of host IDs as necessary
    for (int i = hostIdPool.size(); i < howMany; i++) hostIdPool.add(UUID.randomUUID());

    for (int i = 0; i < howMany; i++) {
      endpointTokens.add(new BigIntegerToken(String.valueOf(10 * i)));
      keyTokens.add(new BigIntegerToken(String.valueOf(10 * i + 5)));
      hostIds.add(hostIdPool.get(i));
    }

    for (int i = 0; i < endpointTokens.size(); i++) {
      InetAddress ep = InetAddress.getByName("127.0.0." + String.valueOf(i + 1));
      Gossiper.instance.initializeNodeUnsafe(ep, hostIds.get(i), 1);
      Gossiper.instance.injectApplicationState(
          ep,
          ApplicationState.TOKENS,
          new VersionedValue.VersionedValueFactory(partitioner)
              .tokens(Collections.singleton(endpointTokens.get(i))));
      ss.onChange(
          ep,
          ApplicationState.STATUS,
          new VersionedValue.VersionedValueFactory(partitioner)
              .normal(Collections.singleton(endpointTokens.get(i))));
      hosts.add(ep);
    }

    // check that all nodes are in token metadata
    for (int i = 0; i < endpointTokens.size(); ++i)
      assertTrue(ss.getTokenMetadata().isMember(hosts.get(i)));
  }
Exemple #20
0
 public static Future<?> compactAll(ColumnFamilyStore cfs, int gcBefore) {
   List<Descriptor> descriptors = new ArrayList<>();
   for (SSTableReader sstable : cfs.getSSTables()) descriptors.add(sstable.descriptor);
   return CompactionManager.instance.submitUserDefined(cfs, descriptors, gcBefore);
 }