public boolean shouldInclude(SSTableReader sstable) { List<ByteBuffer> minColumnNames = sstable.getSSTableMetadata().minColumnNames; List<ByteBuffer> maxColumnNames = sstable.getSSTableMetadata().maxColumnNames; CellNameType comparator = sstable.metadata.comparator; if (minColumnNames.isEmpty() || maxColumnNames.isEmpty()) return true; for (ColumnSlice slice : slices) if (slice.intersects(minColumnNames, maxColumnNames, comparator, reversed)) return true; return false; }
public ColumnFamily updateForKey( ByteBuffer key, ColumnNameBuilder builder, UpdateParameters params) throws InvalidRequestException { CFDefinition cfDef = cfm.getCfDef(); ColumnFamily cf = UnsortedColumns.factory.create(cfm); // Inserting the CQL row marker (see #4361) // We always need to insert a marker, because of the following situation: // CREATE TABLE t ( k int PRIMARY KEY, c text ); // INSERT INTO t(k, c) VALUES (1, 1) // DELETE c FROM t WHERE k = 1; // SELECT * FROM t; // The last query should return one row (but with c == null). Adding // the marker with the insert make sure the semantic is correct (while making sure a // 'DELETE FROM t WHERE k = 1' does remove the row entirely) // // We never insert markers for Super CF as this would confuse the thrift side. if (cfDef.isComposite && !cfDef.isCompact && !cfm.isSuper()) { ByteBuffer name = builder.copy().add(ByteBufferUtil.EMPTY_BYTE_BUFFER).build(); cf.addColumn(params.makeColumn(name, ByteBufferUtil.EMPTY_BYTE_BUFFER)); } List<Operation> updates = getOperations(); if (cfDef.isCompact) { if (builder.componentCount() == 0) throw new InvalidRequestException( String.format("Missing PRIMARY KEY part %s", cfDef.columns.values().iterator().next())); if (cfDef.value == null) { // compact + no compact value implies there is no column outside the PK. So no operation // could // have passed through validation assert updates.isEmpty(); setToEmptyOperation.execute(key, cf, builder.copy(), params); } else { // compact means we don't have a row marker, so don't accept to set only the PK. See // CASSANDRA-5648. if (updates.isEmpty()) throw new InvalidRequestException( String.format("Column %s is mandatory for this COMPACT STORAGE table", cfDef.value)); for (Operation update : updates) update.execute(key, cf, builder.copy(), params); } } else { for (Operation update : updates) update.execute(key, cf, builder.copy(), params); } return cf; }
@VisibleForTesting List<Row> discardLast(List<Row> rows, int toDiscard) { if (toDiscard == 0 || rows.isEmpty()) return rows; int i = rows.size() - 1; DecoratedKey lastKey = null; ColumnFamily lastCf = null; while (toDiscard > 0 && i >= 0) { Row last = rows.get(i--); lastKey = last.key; lastCf = last.cf.cloneMeShallow(isReversed()); toDiscard -= isReversed() ? discardFirst(last.cf, toDiscard, lastCf) : discardLast(last.cf, toDiscard, lastCf); } // If there is less live data than to discard, all is discarded if (toDiscard > 0) return Collections.<Row>emptyList(); // i is the index of the last row that we are sure to keep. On top of that, // we also keep lastCf is it hasn't been fully emptied by the last iteration above. int count = lastCf.getColumnCount(); int newSize = count == 0 ? i + 1 : i + 2; List<Row> newRows = new ArrayList<Row>(newSize); newRows.addAll(rows.subList(0, i + 1)); if (count != 0) newRows.add(new Row(lastKey, lastCf)); return newRows; }
public List<Row> fetchPage(int pageSize) throws RequestValidationException, RequestExecutionException { if (isExhausted()) return Collections.emptyList(); int currentPageSize = nextPageSize(pageSize); List<Row> rows = filterEmpty(queryNextPage(currentPageSize, consistencyLevel, localQuery)); if (rows.isEmpty()) { logger.debug("Got empty set of rows, considering pager exhausted"); exhausted = true; return Collections.emptyList(); } int liveCount = getPageLiveCount(rows); logger.debug("Fetched {} live rows", liveCount); // Because SP.getRangeSlice doesn't trim the result (see SP.trim()), liveCount may be greater // than what asked // (currentPageSize). This would throw off the paging logic so we trim the excess. It's not // extremely efficient // but most of the time there should be nothing or very little to trim. if (liveCount > currentPageSize) { rows = discardLast(rows, liveCount - currentPageSize); liveCount = currentPageSize; } remaining -= liveCount; // If we've got less than requested, there is no more query to do (but // we still need to return the current page) if (liveCount < currentPageSize) { logger.debug( "Got result ({}) smaller than page size ({}), considering pager exhausted", liveCount, currentPageSize); exhausted = true; } // If it's not the first query and the first column is the last one returned (likely // but not certain since paging can race with deletes/expiration), then remove the // first column. if (containsPreviousLast(rows.get(0))) { rows = discardFirst(rows); remaining++; } // Otherwise, if 'lastWasRecorded', we queried for one more than the page size, // so if the page is full, trim the last entry else if (lastWasRecorded && !exhausted) { // We've asked for one more than necessary rows = discardLast(rows); remaining++; } logger.debug("Remaining rows to page: {}", remaining); if (!isExhausted()) lastWasRecorded = recordLast(rows.get(rows.size() - 1)); return rows; }
@Override public List<Row> search( List<IndexExpression> clause, AbstractBounds<RowPosition> range, int maxResults, IFilter dataFilter, boolean maxIsColumns) { assert clause != null && !clause.isEmpty(); ExtendedFilter filter = ExtendedFilter.create(baseCfs, dataFilter, clause, maxResults, maxIsColumns, false); return baseCfs.filter(getIndexedIterator(range, filter), filter); }
private IColumn validateAndGetColumn(List<Row> rows, ByteBuffer columnName) throws NotFoundException { if (rows.isEmpty()) throw new NotFoundException(); if (rows.size() > 1) throw new RuntimeException("Block id returned more than one row"); Row row = rows.get(0); if (row.cf == null) throw new NotFoundException(); IColumn col = row.cf.getColumn(columnName); if (col == null || !col.isLive()) throw new NotFoundException(); return col; }
protected ModificationStatement prepareInternal( CFDefinition cfDef, VariableSpecifications boundNames, Attributes attrs) throws InvalidRequestException { UpdateStatement stmt = new UpdateStatement(boundNames.size(), cfDef.cfm, attrs); // Created from an INSERT if (stmt.isCounter()) throw new InvalidRequestException( "INSERT statement are not allowed on counter tables, use UPDATE instead"); if (columnNames.size() != columnValues.size()) throw new InvalidRequestException("Unmatched column names/values"); if (columnNames.isEmpty()) throw new InvalidRequestException("No columns provided to INSERT"); for (int i = 0; i < columnNames.size(); i++) { CFDefinition.Name name = cfDef.get(columnNames.get(i)); if (name == null) throw new InvalidRequestException( String.format("Unknown identifier %s", columnNames.get(i))); for (int j = 0; j < i; j++) if (name.name.equals(columnNames.get(j))) throw new InvalidRequestException( String.format("Multiple definitions found for column %s", name)); Term.Raw value = columnValues.get(i); switch (name.kind) { case KEY_ALIAS: case COLUMN_ALIAS: Term t = value.prepare(name); t.collectMarkerSpecification(boundNames); stmt.addKeyValue(name.name, t); break; case VALUE_ALIAS: case COLUMN_METADATA: Operation operation = new Operation.SetValue(value).prepare(name); operation.collectMarkerSpecification(boundNames); stmt.addOperation(operation); break; } } return stmt; }
private List<SuperColumn> thriftifySuperColumns( Collection<IColumn> columns, boolean reverseOrder) { if (columns == null || columns.isEmpty()) { return EMPTY_SUPERCOLUMNS; } ArrayList<SuperColumn> thriftSuperColumns = new ArrayList<SuperColumn>(columns.size()); for (IColumn column : columns) { List<Column> subcolumns = thriftifyColumns(column.getSubColumns()); if (subcolumns.isEmpty()) { continue; } thriftSuperColumns.add(new SuperColumn(column.name(), subcolumns)); } if (reverseOrder) Collections.reverse(thriftSuperColumns); return thriftSuperColumns; }