@VisibleForTesting List<Row> discardFirst(List<Row> rows, int toDiscard) { if (toDiscard == 0 || rows.isEmpty()) return rows; int i = 0; DecoratedKey firstKey = null; ColumnFamily firstCf = null; while (toDiscard > 0 && i < rows.size()) { Row first = rows.get(i++); firstKey = first.key; firstCf = first.cf.cloneMeShallow(isReversed()); toDiscard -= isReversed() ? discardLast(first.cf, toDiscard, firstCf) : discardFirst(first.cf, toDiscard, firstCf); } // If there is less live data than to discard, all is discarded if (toDiscard > 0) return Collections.<Row>emptyList(); // i is the index of the first row that we are sure to keep. On top of that, // we also keep firstCf is it hasn't been fully emptied by the last iteration above. int count = firstCf.getColumnCount(); int newSize = rows.size() - (count == 0 ? i : i - 1); List<Row> newRows = new ArrayList<Row>(newSize); if (count != 0) newRows.add(new Row(firstKey, firstCf)); newRows.addAll(rows.subList(i, rows.size())); return newRows; }
private Integer seekToSubColumn( CFMetaData metadata, FileDataInput file, ByteBuffer sblockId, List<IndexHelper.IndexInfo> indexList) throws IOException { file.readInt(); // column count /* get the various column ranges we have to read */ AbstractType comparator = metadata.comparator; int index = IndexHelper.indexFor(sblockId, indexList, comparator, false); if (index == indexList.size()) return null; IndexHelper.IndexInfo indexInfo = indexList.get(index); if (comparator.compare(sblockId, indexInfo.firstName) < 0) return null; FileMark mark = file.mark(); FileUtils.skipBytesFully(file, indexInfo.offset); while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width) { Integer dataLength = isSubBlockFound(metadata, file, sblockId); if (dataLength == null) return null; if (dataLength < 0) continue; return dataLength; } return null; }
public List<Row> fetchPage(int pageSize) throws RequestValidationException, RequestExecutionException { if (isExhausted()) return Collections.emptyList(); int currentPageSize = nextPageSize(pageSize); List<Row> rows = filterEmpty(queryNextPage(currentPageSize, consistencyLevel, localQuery)); if (rows.isEmpty()) { logger.debug("Got empty set of rows, considering pager exhausted"); exhausted = true; return Collections.emptyList(); } int liveCount = getPageLiveCount(rows); logger.debug("Fetched {} live rows", liveCount); // Because SP.getRangeSlice doesn't trim the result (see SP.trim()), liveCount may be greater // than what asked // (currentPageSize). This would throw off the paging logic so we trim the excess. It's not // extremely efficient // but most of the time there should be nothing or very little to trim. if (liveCount > currentPageSize) { rows = discardLast(rows, liveCount - currentPageSize); liveCount = currentPageSize; } remaining -= liveCount; // If we've got less than requested, there is no more query to do (but // we still need to return the current page) if (liveCount < currentPageSize) { logger.debug( "Got result ({}) smaller than page size ({}), considering pager exhausted", liveCount, currentPageSize); exhausted = true; } // If it's not the first query and the first column is the last one returned (likely // but not certain since paging can race with deletes/expiration), then remove the // first column. if (containsPreviousLast(rows.get(0))) { rows = discardFirst(rows); remaining++; } // Otherwise, if 'lastWasRecorded', we queried for one more than the page size, // so if the page is full, trim the last entry else if (lastWasRecorded && !exhausted) { // We've asked for one more than necessary rows = discardLast(rows); remaining++; } logger.debug("Remaining rows to page: {}", remaining); if (!isExhausted()) lastWasRecorded = recordLast(rows.get(rows.size() - 1)); return rows; }
protected ModificationStatement prepareInternal( CFDefinition cfDef, VariableSpecifications boundNames, Attributes attrs) throws InvalidRequestException { UpdateStatement stmt = new UpdateStatement(boundNames.size(), cfDef.cfm, attrs); // Created from an INSERT if (stmt.isCounter()) throw new InvalidRequestException( "INSERT statement are not allowed on counter tables, use UPDATE instead"); if (columnNames.size() != columnValues.size()) throw new InvalidRequestException("Unmatched column names/values"); if (columnNames.isEmpty()) throw new InvalidRequestException("No columns provided to INSERT"); for (int i = 0; i < columnNames.size(); i++) { CFDefinition.Name name = cfDef.get(columnNames.get(i)); if (name == null) throw new InvalidRequestException( String.format("Unknown identifier %s", columnNames.get(i))); for (int j = 0; j < i; j++) if (name.name.equals(columnNames.get(j))) throw new InvalidRequestException( String.format("Multiple definitions found for column %s", name)); Term.Raw value = columnValues.get(i); switch (name.kind) { case KEY_ALIAS: case COLUMN_ALIAS: Term t = value.prepare(name); t.collectMarkerSpecification(boundNames); stmt.addKeyValue(name.name, t); break; case VALUE_ALIAS: case COLUMN_METADATA: Operation operation = new Operation.SetValue(value).prepare(name); operation.collectMarkerSpecification(boundNames); stmt.addOperation(operation); break; } } return stmt; }
public List<List<String>> describe_keys(String keyspace, List<ByteBuffer> keys) throws TException { List<List<String>> keyEndpoints = new ArrayList<List<String>>(keys.size()); for (ByteBuffer key : keys) { keyEndpoints.add(getKeyLocations(key)); } return keyEndpoints; }
private List<String> getKeyLocations(ByteBuffer key) { List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(cfsKeyspace, key); DatabaseDescriptor.getEndpointSnitch() .sortByProximity(FBUtilities.getLocalAddress(), endpoints); List<String> hosts = new ArrayList<String>(endpoints.size()); for (InetAddress endpoint : endpoints) { hosts.add(endpoint.getHostName()); } return hosts; }
/** Creates initial set of nodes and tokens. Nodes are added to StorageService as 'normal' */ public static void createInitialRing( StorageService ss, IPartitioner partitioner, List<Token> endpointTokens, List<Token> keyTokens, List<InetAddress> hosts, List<UUID> hostIds, int howMany) throws UnknownHostException { // Expand pool of host IDs as necessary for (int i = hostIdPool.size(); i < howMany; i++) hostIdPool.add(UUID.randomUUID()); for (int i = 0; i < howMany; i++) { endpointTokens.add(new BigIntegerToken(String.valueOf(10 * i))); keyTokens.add(new BigIntegerToken(String.valueOf(10 * i + 5))); hostIds.add(hostIdPool.get(i)); } for (int i = 0; i < endpointTokens.size(); i++) { InetAddress ep = InetAddress.getByName("127.0.0." + String.valueOf(i + 1)); Gossiper.instance.initializeNodeUnsafe(ep, hostIds.get(i), 1); Gossiper.instance.injectApplicationState( ep, ApplicationState.TOKENS, new VersionedValue.VersionedValueFactory(partitioner) .tokens(Collections.singleton(endpointTokens.get(i)))); ss.onChange( ep, ApplicationState.STATUS, new VersionedValue.VersionedValueFactory(partitioner) .normal(Collections.singleton(endpointTokens.get(i)))); hosts.add(ep); } // check that all nodes are in token metadata for (int i = 0; i < endpointTokens.size(); ++i) assertTrue(ss.getTokenMetadata().isMember(hosts.get(i))); }
private List<Row> filterEmpty(List<Row> result) { for (Row row : result) { if (row.cf == null || row.cf.getColumnCount() == 0) { List<Row> newResult = new ArrayList<Row>(result.size() - 1); for (Row row2 : result) { if (row2.cf == null || row2.cf.getColumnCount() == 0) continue; newResult.add(row2); } return newResult; } } return result; }
private IColumn validateAndGetColumn(List<Row> rows, ByteBuffer columnName) throws NotFoundException { if (rows.isEmpty()) throw new NotFoundException(); if (rows.size() > 1) throw new RuntimeException("Block id returned more than one row"); Row row = rows.get(0); if (row.cf == null) throw new NotFoundException(); IColumn col = row.cf.getColumn(columnName); if (col == null || !col.isLive()) throw new NotFoundException(); return col; }
/** * Splits this filter into two SliceQueryFilters: one that slices only the static columns, and one * that slices the remainder of the normal data. * * <p>This should only be called when the filter is reversed and the filter is known to cover * static columns (through hasStaticSlice()). * * @return a pair of (static, normal) SliceQueryFilters */ public Pair<SliceQueryFilter, SliceQueryFilter> splitOutStaticSlice(CFMetaData cfm) { assert reversed; Composite staticSliceEnd = cfm.comparator.staticPrefix().end(); List<ColumnSlice> nonStaticSlices = new ArrayList<>(slices.length); for (ColumnSlice slice : slices) { if (sliceIncludesStatics(slice, cfm)) nonStaticSlices.add(new ColumnSlice(slice.start, staticSliceEnd)); else nonStaticSlices.add(slice); } return Pair.create( new SliceQueryFilter(staticSliceEnd, Composites.EMPTY, true, count, compositesToGroup), new SliceQueryFilter( nonStaticSlices.toArray(new ColumnSlice[nonStaticSlices.size()]), true, count, compositesToGroup)); }
public SliceQueryFilter withUpdatedStart(Composite newStart, CFMetaData cfm) { Comparator<Composite> cmp = reversed ? cfm.comparator.reverseComparator() : cfm.comparator; // Check our slices to see if any fall before the new start (in which case they can be removed) // or // if they contain the new start (in which case they should start from the page start). // However, if the // slices would include static columns, we need to ensure they are also fetched, and so a // separate // slice for the static columns may be required. // Note that if the query is reversed, we can't handle statics by simply adding a separate slice // here, so // the reversed case is handled by SliceFromReadCommand instead. See CASSANDRA-8502 for more // details. List<ColumnSlice> newSlices = new ArrayList<>(); boolean pastNewStart = false; for (ColumnSlice slice : slices) { if (pastNewStart) { newSlices.add(slice); continue; } if (slice.isBefore(cmp, newStart)) { if (!reversed && sliceIncludesStatics(slice, cfm)) newSlices.add(new ColumnSlice(Composites.EMPTY, cfm.comparator.staticPrefix().end())); continue; } else if (slice.includes(cmp, newStart)) { if (!reversed && sliceIncludesStatics(slice, cfm) && !newStart.isEmpty()) newSlices.add(new ColumnSlice(Composites.EMPTY, cfm.comparator.staticPrefix().end())); newSlices.add(new ColumnSlice(newStart, slice.finish)); } else { newSlices.add(slice); } pastNewStart = true; } return withUpdatedSlices(newSlices.toArray(new ColumnSlice[newSlices.size()])); }