@Override public Query apply(Function function, Context context) throws IOException { Symbol left = function.arguments().get(0); Symbol collectionSymbol = function.arguments().get(1); Preconditions.checkArgument( DataTypes.isCollectionType(collectionSymbol.valueType()), "invalid argument for ANY expression"); if (left.symbolType().isValueSymbol()) { // 1 = any (array_col) - simple eq assert collectionSymbol.symbolType().isReference() : "no reference found in ANY expression"; return applyArrayReference((Reference) collectionSymbol, (Literal) left, context); } else if (collectionSymbol.symbolType().isValueSymbol()) { assert left.symbolType().isReference() : "no reference found in ANY expression"; return applyArrayLiteral((Reference) left, (Literal) collectionSymbol, context); } else { // should never get here - 2 literal arguments must have been normalized away yet return null; } }
private PlannedAnalyzedRelation normalSelect( QueriedTable table, WhereClause whereClause, TableRelation tableRelation, ConsumerContext context) { QuerySpec querySpec = table.querySpec(); TableInfo tableInfo = tableRelation.tableInfo(); List<Symbol> outputSymbols; if (tableInfo.schemaInfo().systemSchema()) { outputSymbols = tableRelation.resolve(querySpec.outputs()); } else { outputSymbols = new ArrayList<>(querySpec.outputs().size()); for (Symbol symbol : querySpec.outputs()) { outputSymbols.add( DocReferenceConverter.convertIfPossible(tableRelation.resolve(symbol), tableInfo)); } } CollectNode collectNode; MergeNode mergeNode = null; OrderBy orderBy = querySpec.orderBy(); if (context.rootRelation() != table) { // insert directly from shards assert !querySpec.isLimited() : "insert from sub query with limit or order by is not supported. " + "Analyzer should have thrown an exception already."; ImmutableList<Projection> projections = ImmutableList.<Projection>of(); collectNode = PlanNodeBuilder.collect( context.plannerContext().jobId(), tableInfo, context.plannerContext(), whereClause, outputSymbols, projections); } else if (querySpec.isLimited() || orderBy != null) { /** * select id, name, order by id, date * * <p>toCollect: [id, name, date] // includes order by symbols, that aren't already selected * allOutputs: [in(0), in(1), in(2)] // for topN projection on shards/collectNode * orderByInputs: [in(0), in(2)] // for topN projection on shards/collectNode AND handler * finalOutputs: [in(0), in(1)] // for topN output on handler -> changes output to what * should be returned. */ List<Symbol> toCollect; List<Symbol> orderByInputColumns = null; if (orderBy != null) { List<Symbol> orderBySymbols = tableRelation.resolve(orderBy.orderBySymbols()); toCollect = new ArrayList<>(outputSymbols.size() + orderBySymbols.size()); toCollect.addAll(outputSymbols); // note: can only de-dup order by symbols due to non-deterministic functions like select // random(), random() for (Symbol orderBySymbol : orderBySymbols) { if (!toCollect.contains(orderBySymbol)) { toCollect.add(orderBySymbol); } } orderByInputColumns = new ArrayList<>(); for (Symbol symbol : orderBySymbols) { orderByInputColumns.add(new InputColumn(toCollect.indexOf(symbol), symbol.valueType())); } } else { toCollect = new ArrayList<>(outputSymbols.size()); toCollect.addAll(outputSymbols); } List<Symbol> allOutputs = new ArrayList<>(toCollect.size()); // outputs from collector for (int i = 0; i < toCollect.size(); i++) { allOutputs.add(new InputColumn(i, toCollect.get(i).valueType())); } List<Symbol> finalOutputs = new ArrayList<>(outputSymbols.size()); // final outputs on handler after sort for (int i = 0; i < outputSymbols.size(); i++) { finalOutputs.add(new InputColumn(i, outputSymbols.get(i).valueType())); } // if we have an offset we have to get as much docs from every node as we have offset+limit // otherwise results will be wrong TopNProjection tnp; int limit = firstNonNull(querySpec.limit(), Constants.DEFAULT_SELECT_LIMIT); if (orderBy == null) { tnp = new TopNProjection(querySpec.offset() + limit, 0); } else { tnp = new TopNProjection( querySpec.offset() + limit, 0, orderByInputColumns, orderBy.reverseFlags(), orderBy.nullsFirst()); } tnp.outputs(allOutputs); collectNode = PlanNodeBuilder.collect( context.plannerContext().jobId(), tableInfo, context.plannerContext(), whereClause, toCollect, ImmutableList.<Projection>of(tnp)); // MERGE tnp = new TopNProjection(limit, querySpec.offset()); tnp.outputs(finalOutputs); if (orderBy == null) { // no sorting needed mergeNode = PlanNodeBuilder.localMerge( context.plannerContext().jobId(), ImmutableList.<Projection>of(tnp), collectNode, context.plannerContext()); } else { // no order by needed in TopN as we already sorted on collector // and we merge sorted with SortedBucketMerger mergeNode = PlanNodeBuilder.sortedLocalMerge( context.plannerContext().jobId(), ImmutableList.<Projection>of(tnp), orderBy, allOutputs, orderByInputColumns, collectNode, context.plannerContext()); } } else { collectNode = PlanNodeBuilder.collect( context.plannerContext().jobId(), tableInfo, context.plannerContext(), whereClause, outputSymbols, ImmutableList.<Projection>of()); mergeNode = PlanNodeBuilder.localMerge( context.plannerContext().jobId(), ImmutableList.<Projection>of(), collectNode, context.plannerContext()); } return new QueryAndFetch(collectNode, mergeNode, context.plannerContext().jobId()); }