/** * iterator over the Tokens in the given ring, starting with the token for the node owning start * (which does not have to be a Token in the ring) * * @param includeMin True if the minimum token should be returned in the ring even if it has no * owner. */ public static Iterator<Token> ringIterator( final ArrayList<Token> ring, Token start, boolean includeMin) { if (ring.isEmpty()) return includeMin ? Iterators.singletonIterator(StorageService.getPartitioner().getMinimumToken()) : Iterators.<Token>emptyIterator(); final boolean insertMin = includeMin && !ring.get(0).isMinimum(); final int startIndex = firstTokenIndex(ring, start, insertMin); return new AbstractIterator<Token>() { int j = startIndex; protected Token computeNext() { if (j < -1) return endOfData(); try { // return minimum for index == -1 if (j == -1) return StorageService.getPartitioner().getMinimumToken(); // return ring token for other indexes return ring.get(j); } finally { j++; if (j == ring.size()) j = insertMin ? -1 : 0; if (j == startIndex) // end iteration j = -2; } } }; }
/** * @param path path to first element * @param pathTo path to second element * @return environment in which the generic parameters in the path to the first element are bound * to those in the path to the second element, or null type on error */ public static ResolvedName mapGenericParameters( final Deque<? extends INamedElement> path, final Deque<? extends INamedElement> pathTo) { // Construct an environment in which the current function's generic parameters // are bound to those of the eventual override. final Deque<List<? extends ResolvedName>> tableau = new LinkedList<>(ResolvedName.fromNamedElement(path.getLast()).tableau()); final Iterator<? extends INamedElement> pathIt = path.descendingIterator(); while (pathIt.hasNext()) { pathIt.next(); tableau.removeLast(); } CachedIterator<? extends INamedElement> itPathTo = null; CachedIterator<? extends IGenericParameter> itGPTo = null; boolean end = false; { // Initialise iterators into direct override's generic parameters. boolean foundValid = false; itPathTo = Iterators.cached(pathTo.iterator()); while (!foundValid && itPathTo.hasItem()) { itGPTo = Iterators.cached(itPathTo.item().genericParameters().iterator()); while (!foundValid && itGPTo.hasItem()) { foundValid = true; if (!foundValid) itGPTo.next(); } if (!foundValid) itPathTo.next(); } if (!foundValid) end = true; } for (final INamedElement elt : path) { final List<ResolvedName> row = new ArrayList<>(); for (@SuppressWarnings("unused") final IGenericParameter genericParameter : elt.genericParameters()) { if (end) return null; row.add(ResolvedName.fromNamedElement(itGPTo.item())); { // Increment iterators into direct override's generic parameters. boolean init = true; boolean foundValid = false; if (!init) itPathTo = Iterators.cached(pathTo.iterator()); while (!foundValid && itPathTo.hasItem()) { if (!init) itGPTo = Iterators.cached(itPathTo.item().genericParameters().iterator()); while (!foundValid && itGPTo.hasItem()) { if (!init) foundValid = true; init = false; if (!foundValid) itGPTo.next(); } if (!foundValid) itPathTo.next(); } if (!foundValid) end = true; } } tableau.add(row); } if (!end) return null; return ResolvedName.newNameReference(path.getLast(), tableau); }
/* * Get the exponentially-decayed approximate counts of values in multiple buckets. The elements in * the provided list denote the upper bound each of the buckets and must be sorted in ascending * order. * * The approximate count in each bucket is guaranteed to be within 2 * totalCount * maxError of * the real count. */ public List<Bucket> getHistogram(List<Long> bucketUpperBounds) { checkArgument( Ordering.natural().isOrdered(bucketUpperBounds), "buckets must be sorted in increasing order"); final ImmutableList.Builder<Bucket> builder = ImmutableList.builder(); final PeekingIterator<Long> iterator = Iterators.peekingIterator(bucketUpperBounds.iterator()); final AtomicDouble sum = new AtomicDouble(); final AtomicDouble lastSum = new AtomicDouble(); // for computing weighed average of values in bucket final AtomicDouble bucketWeightedSum = new AtomicDouble(); final double normalizationFactor = weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read())); postOrderTraversal( root, new Callback() { @Override public boolean process(Node node) { while (iterator.hasNext() && iterator.peek() <= node.getUpperBound()) { double bucketCount = sum.get() - lastSum.get(); Bucket bucket = new Bucket( bucketCount / normalizationFactor, bucketWeightedSum.get() / bucketCount); builder.add(bucket); lastSum.set(sum.get()); bucketWeightedSum.set(0); iterator.next(); } bucketWeightedSum.addAndGet(node.getMiddle() * node.weightedCount); sum.addAndGet(node.weightedCount); return iterator.hasNext(); } }); while (iterator.hasNext()) { double bucketCount = sum.get() - lastSum.get(); Bucket bucket = new Bucket(bucketCount / normalizationFactor, bucketWeightedSum.get() / bucketCount); builder.add(bucket); iterator.next(); } return builder.build(); }
private Iterator4 checkDuplicates(CompositeIterator4 executeAllCandidates) { return Iterators.filter( executeAllCandidates, new Predicate4() { private TreeInt ids = new TreeInt(0); public boolean match(Object current) { int id = ((Integer) current).intValue(); if (ids.find(id) != null) { return false; } ids = (TreeInt) ids.add(new TreeInt(id)); return true; } }); }
/** * Gets the values at the specified quantiles +/- maxError. The list of quantiles must be sorted * in increasing order, and each value must be in the range [0, 1] */ public List<Long> getQuantiles(List<Double> quantiles) { checkArgument( Ordering.natural().isOrdered(quantiles), "quantiles must be sorted in increasing order"); for (double quantile : quantiles) { checkArgument(quantile >= 0 && quantile <= 1, "quantile must be between [0,1]"); } final ImmutableList.Builder<Long> builder = ImmutableList.builder(); final PeekingIterator<Double> iterator = Iterators.peekingIterator(quantiles.iterator()); postOrderTraversal( root, new Callback() { private double sum = 0; @Override public boolean process(Node node) { sum += node.weightedCount; while (iterator.hasNext() && sum > iterator.peek() * weightedCount) { iterator.next(); // we know the max value ever seen, so cap the percentile to provide better error // bounds in this case long value = Math.min(node.getUpperBound(), max); builder.add(value); } return iterator.hasNext(); } }); // we finished the traversal without consuming all quantiles. This means the remaining quantiles // correspond to the max known value while (iterator.hasNext()) { builder.add(max); iterator.next(); } return builder.build(); }
/** * Group files with similar min timestamp into buckets. Files with recent min timestamps are * grouped together into buckets designated to short timespans while files with older timestamps * are grouped into buckets representing longer timespans. * * @param files pairs consisting of a file and its min timestamp * @param timeUnit * @param base * @param now * @return a list of buckets of files. The list is ordered such that the files with newest * timestamps come first. Each bucket is also a list of files ordered from newest to oldest. */ @VisibleForTesting static <T> List<List<T>> getBuckets( Collection<Pair<T, Long>> files, long timeUnit, int base, long now) { // Sort files by age. Newest first. final List<Pair<T, Long>> sortedFiles = Lists.newArrayList(files); Collections.sort( sortedFiles, Collections.reverseOrder( new Comparator<Pair<T, Long>>() { public int compare(Pair<T, Long> p1, Pair<T, Long> p2) { return p1.right.compareTo(p2.right); } })); List<List<T>> buckets = Lists.newArrayList(); Target target = getInitialTarget(now, timeUnit); PeekingIterator<Pair<T, Long>> it = Iterators.peekingIterator(sortedFiles.iterator()); outerLoop: while (it.hasNext()) { while (!target.onTarget(it.peek().right)) { // If the file is too new for the target, skip it. if (target.compareToTimestamp(it.peek().right) < 0) { it.next(); if (!it.hasNext()) break outerLoop; } else // If the file is too old for the target, switch targets. target = target.nextTarget(base); } List<T> bucket = Lists.newArrayList(); while (target.onTarget(it.peek().right)) { bucket.add(it.next().left); if (!it.hasNext()) break; } buckets.add(bucket); } return buckets; }
public MutationState createTable(CreateTableStatement statement, byte[][] splits) throws SQLException { PTableType tableType = statement.getTableType(); boolean isView = tableType == PTableType.VIEW; if (isView && !statement.getProps().isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_TABLE_CONFIG) .build() .buildException(); } connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); try { connection.setAutoCommit(false); TableName tableNameNode = statement.getTableName(); String schemaName = tableNameNode.getSchemaName(); String tableName = tableNameNode.getTableName(); PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint(); String pkName = null; Set<String> pkColumns = Collections.<String>emptySet(); Iterator<String> pkColumnsIterator = Iterators.emptyIterator(); if (pkConstraint != null) { pkColumns = pkConstraint.getColumnNames(); pkColumnsIterator = pkColumns.iterator(); pkName = pkConstraint.getName(); } List<ColumnDef> colDefs = statement.getColumnDefs(); List<PColumn> columns = Lists.newArrayListWithExpectedSize(colDefs.size()); PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN); int columnOrdinal = 0; Map<String, PName> familyNames = Maps.newLinkedHashMap(); boolean isPK = false; for (ColumnDef colDef : colDefs) { if (colDef.isPK()) { if (isPK) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) .setColumnName(colDef.getColumnDefName().getColumnName().getName()) .build() .buildException(); } isPK = true; } PColumn column = newColumn(columnOrdinal++, colDef, pkConstraint); if (SchemaUtil.isPKColumn(column)) { // TODO: remove this constraint? if (!pkColumns.isEmpty() && !column.getName().getString().equals(pkColumnsIterator.next())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER) .setSchemaName(schemaName) .setTableName(tableName) .setColumnName(column.getName().getString()) .build() .buildException(); } } columns.add(column); if (colDef.getDataType() == PDataType.BINARY && colDefs.size() > 1) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.BINARY_IN_ROW_KEY) .setSchemaName(schemaName) .setTableName(tableName) .setColumnName(column.getName().getString()) .build() .buildException(); } if (column.getFamilyName() != null) { familyNames.put(column.getFamilyName().getString(), column.getFamilyName()); } } if (!isPK && pkColumns.isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); } List<Pair<byte[], Map<String, Object>>> familyPropList = Lists.newArrayListWithExpectedSize(familyNames.size()); Map<String, Object> commonFamilyProps = Collections.emptyMap(); Map<String, Object> tableProps = Collections.emptyMap(); if (!statement.getProps().isEmpty()) { if (statement.isView()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES) .build() .buildException(); } for (String familyName : statement.getProps().keySet()) { if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { if (familyNames.get(familyName) == null) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY) .setFamilyName(familyName) .build() .buildException(); } } } commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); Collection<Pair<String, Object>> props = statement.getProps().get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY); // Somewhat hacky way of determining if property is for HColumnDescriptor or // HTableDescriptor HColumnDescriptor defaultDescriptor = new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); for (Pair<String, Object> prop : props) { if (defaultDescriptor.getValue(prop.getFirst()) != null) { commonFamilyProps.put(prop.getFirst(), prop.getSecond()); } else { tableProps.put(prop.getFirst(), prop.getSecond()); } } } for (PName familyName : familyNames.values()) { Collection<Pair<String, Object>> props = statement.getProps().get(familyName.getString()); if (props.isEmpty()) { familyPropList.add( new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps)); } else { Map<String, Object> combinedFamilyProps = Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size()); combinedFamilyProps.putAll(commonFamilyProps); for (Pair<String, Object> prop : props) { combinedFamilyProps.put(prop.getFirst(), prop.getSecond()); } familyPropList.add( new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps)); } } // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists if (tableType == PTableType.SYSTEM) { PTable table = new PTableImpl( new PNameImpl(tableName), tableType, MetaDataProtocol.MIN_TABLE_TIMESTAMP, 0, QueryConstants.SYSTEM_TABLE_PK_NAME, null, columns); connection.addTable(schemaName, table); } for (PColumn column : columns) { addColumnMutation(schemaName, tableName, column, colUpsert); } Integer saltBucketNum = (Integer) tableProps.remove(PhoenixDatabaseMetaData.SALT_BUCKETS); if (saltBucketNum != null && (saltBucketNum <= 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM)) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM) .build() .buildException(); } PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE); tableUpsert.setString(1, schemaName); tableUpsert.setString(2, tableName); tableUpsert.setString(3, tableType.getSerializedValue()); tableUpsert.setInt(4, 0); tableUpsert.setInt(5, columnOrdinal); if (saltBucketNum != null) { tableUpsert.setInt(6, saltBucketNum); } else { tableUpsert.setNull(6, Types.INTEGER); } tableUpsert.setString(7, pkName); tableUpsert.execute(); final List<Mutation> tableMetaData = connection.getMutationState().toMutations(); connection.rollback(); MetaDataMutationResult result = connection .getQueryServices() .createTable(tableMetaData, isView, tableProps, familyPropList, splits); MutationCode code = result.getMutationCode(); switch (code) { case TABLE_ALREADY_EXISTS: connection.addTable(schemaName, result.getTable()); if (!statement.ifNotExists()) { throw new TableAlreadyExistsException(schemaName, tableName); } break; case NEWER_TABLE_FOUND: // TODO: add table if in result? throw new NewerTableAlreadyExistsException(schemaName, tableName); case UNALLOWED_TABLE_MUTATION: throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); default: PTable table = new PTableImpl( new PNameImpl(tableName), tableType, result.getMutationTime(), 0, pkName, saltBucketNum, columns); connection.addTable(schemaName, table); if (tableType == PTableType.USER) { connection.setAutoCommit(true); // Delete everything in the column. You'll still be able to do queries at earlier // timestamps Long scn = connection.getSCN(); long ts = (scn == null ? result.getMutationTime() : scn); PSchema schema = new PSchemaImpl( schemaName, ImmutableMap.<String, PTable>of(table.getName().getString(), table)); TableRef tableRef = new TableRef(null, table, schema, ts); byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table.getColumnFamilies()); MutationPlan plan = new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts); return connection.getQueryServices().updateData(plan); } break; } return new MutationState(0, connection); } finally { connection.setAutoCommit(wasAutoCommit); } }
@Override public Iterator<TitanElement> execute(final StandardElementQuery query) { Iterator<TitanElement> iter = null; if (!query.hasIndex()) { log.warn( "Query requires iterating over all vertices [{}]. For better performance, use indexes", query.getCondition()); if (query.getType() == StandardElementQuery.Type.VERTEX) { iter = (Iterator) getVertices().iterator(); } else if (query.getType() == StandardElementQuery.Type.EDGE) { iter = (Iterator) getEdges().iterator(); } else throw new IllegalArgumentException("Unexpected type: " + query.getType()); iter = Iterators.filter( iter, new Predicate<TitanElement>() { @Override public boolean apply(@Nullable TitanElement element) { return query.matches(element); } }); } else { String index = query.getIndex(); log.debug("Answering query [{}] with index {}", query, index); // Filter out everything not covered by the index KeyCondition<TitanKey> condition = query.getCondition(); // ASSUMPTION: query is an AND of KeyAtom Preconditions.checkArgument(condition instanceof KeyAnd); Preconditions.checkArgument(condition.hasChildren()); List<KeyCondition<TitanKey>> newConds = Lists.newArrayList(); boolean needsFilter = false; for (KeyCondition<TitanKey> c : condition.getChildren()) { KeyAtom<TitanKey> atom = (KeyAtom<TitanKey>) c; if (getGraph() .getIndexInformation(index) .supports(atom.getKey().getDataType(), atom.getRelation()) && atom.getKey().hasIndex(index, query.getType().getElementType()) && atom.getCondition() != null) { newConds.add(atom); } else { log.debug( "Filtered out atom [{}] from query [{}] because it is not indexed or not covered by the index"); needsFilter = true; } } Preconditions.checkArgument( !newConds.isEmpty(), "Invalid index assignment [%s] to query [%s]", index, query); final StandardElementQuery indexQuery; if (needsFilter) { Preconditions.checkArgument( !newConds.isEmpty(), "Query has been assigned an index [%s] in error: %s", query.getIndex(), query); indexQuery = new StandardElementQuery( query.getType(), KeyAnd.of(newConds.toArray(new KeyAtom[newConds.size()])), query.getLimit(), index); } else { indexQuery = query; } try { iter = Iterators.transform( indexCache .get( indexQuery, new Callable<List<Object>>() { @Override public List<Object> call() throws Exception { return graph.elementQuery(indexQuery, txHandle); } }) .iterator(), new Function<Object, TitanElement>() { @Nullable @Override public TitanElement apply(@Nullable Object id) { Preconditions.checkNotNull(id); if (id instanceof Long) return (TitanVertex) getVertex((Long) id); else if (id instanceof RelationIdentifier) return (TitanElement) getEdge((RelationIdentifier) id); else throw new IllegalArgumentException("Unexpected id type: " + id); } }); } catch (Exception e) { throw new TitanException("Could not call index", e); } if (needsFilter) { iter = Iterators.filter( iter, new Predicate<TitanElement>() { @Override public boolean apply(@Nullable TitanElement element) { return element != null && !element.isRemoved() && !isDeleted(query, element) && query.matches(element); } }); } else { iter = Iterators.filter( iter, new Predicate<TitanElement>() { @Override public boolean apply(@Nullable TitanElement element) { return element != null && !element.isRemoved() && !isDeleted(query, element); } }); } } return iter; }
@Override public Iterator<TitanElement> getNew(final StandardElementQuery query) { Preconditions.checkArgument( query.getType() == StandardElementQuery.Type.VERTEX || query.getType() == StandardElementQuery.Type.EDGE); if (query.getType() == StandardElementQuery.Type.VERTEX && hasModifications()) { // Collect all keys from the query - ASSUMPTION: query is an AND of KeyAtom final Set<TitanKey> keys = Sets.newHashSet(); KeyAtom<TitanKey> standardIndexKey = null; for (KeyCondition<TitanKey> cond : query.getCondition().getChildren()) { KeyAtom<TitanKey> atom = (KeyAtom<TitanKey>) cond; if (atom.getRelation() == Cmp.EQUAL && isVertexIndexProperty(atom.getKey())) standardIndexKey = atom; keys.add(atom.getKey()); } Iterator<TitanVertex> vertices; if (standardIndexKey == null) { Set<TitanVertex> vertexSet = Sets.newHashSet(); for (TitanRelation r : addedRelations.getView( new Predicate<InternalRelation>() { @Override public boolean apply(@Nullable InternalRelation relation) { return keys.contains(relation.getType()); } })) { vertexSet.add(((TitanProperty) r).getVertex()); } for (TitanRelation r : deletedRelations.values()) { if (keys.contains(r.getType())) { TitanVertex v = ((TitanProperty) r).getVertex(); if (!v.isRemoved()) vertexSet.add(v); } } vertices = vertexSet.iterator(); } else { vertices = Iterators.transform( newVertexIndexEntries .get(standardIndexKey.getCondition(), standardIndexKey.getKey()) .iterator(), new Function<TitanProperty, TitanVertex>() { @Nullable @Override public TitanVertex apply(@Nullable TitanProperty o) { return o.getVertex(); } }); } return (Iterator) Iterators.filter( vertices, new Predicate<TitanVertex>() { @Override public boolean apply(@Nullable TitanVertex vertex) { return query.matches(vertex); } }); } else if (query.getType() == StandardElementQuery.Type.EDGE && !addedRelations.isEmpty()) { return (Iterator) addedRelations .getView( new Predicate<InternalRelation>() { @Override public boolean apply(@Nullable InternalRelation relation) { return (relation instanceof TitanEdge) && !relation.isHidden() && query.matches(relation); } }) .iterator(); } else throw new IllegalArgumentException("Unexpected type: " + query.getType()); }
@Override public Iterator<TitanRelation> execute(final VertexCentricQuery query) { if (query.getVertex().isNew()) return Iterators.emptyIterator(); final EdgeSerializer edgeSerializer = graph.getEdgeSerializer(); FittedSliceQuery sq = edgeSerializer.getQuery(query); final boolean fittedQuery = sq.isFitted(); final InternalVertex v = query.getVertex(); final boolean needsFiltering = !sq.isFitted() || !deletedRelations.isEmpty(); if (needsFiltering && sq.hasLimit()) sq = new FittedSliceQuery(sq, QueryUtil.updateLimit(sq.getLimit(), 1.1)); Iterable<TitanRelation> result = null; double limitMultiplier = 1.0; int previousDiskSize = 0; boolean finished; do { finished = true; Iterable<Entry> iter = null; if (v instanceof CacheVertex) { CacheVertex cv = (CacheVertex) v; iter = ((CacheVertex) v) .loadRelations( sq, new Retriever<SliceQuery, List<Entry>>() { @Override public List<Entry> get(SliceQuery query) { return graph.edgeQuery(v.getID(), query, txHandle); } }); } else { iter = graph.edgeQuery(v.getID(), sq, txHandle); } result = Iterables.transform( iter, new Function<Entry, TitanRelation>() { @Nullable @Override public TitanRelation apply(@Nullable Entry entry) { return edgeSerializer.readRelation(v, entry); } }); if (needsFiltering) { result = Iterables.filter( result, new Predicate<TitanRelation>() { @Override public boolean apply(@Nullable TitanRelation relation) { // Filter out updated and deleted relations return (relation == ((InternalRelation) relation).it() && !deletedRelations.containsKey(Long.valueOf(relation.getID()))) && (fittedQuery || query.matches(relation)); } }); } // Determine termination if (needsFiltering && query.hasLimit()) { if (!IterablesUtil.sizeLargerOrEqualThan(result, query.getLimit())) { int currentDiskSize = IterablesUtil.size(iter); if (currentDiskSize > previousDiskSize) { finished = false; previousDiskSize = currentDiskSize; limitMultiplier *= 2; sq = new FittedSliceQuery( sq, QueryUtil.updateLimit(sq.getLimit(), limitMultiplier)); } } } } while (!finished); return result.iterator(); }
@Override public UnmodifiableIterator<DocumentMapper> iterator() { return Iterators.unmodifiableIterator(mappers.values().iterator()); }