@Override public PlannedAnalyzedRelation visitQueriedTable(QueriedTable table, ConsumerContext context) { TableRelation tableRelation = table.tableRelation(); if (table.querySpec().where().hasVersions()) { context.validationException(new VersionInvalidException()); return null; } TableInfo tableInfo = tableRelation.tableInfo(); if (tableInfo.schemaInfo().systemSchema() && table.querySpec().where().hasQuery()) { ensureNoLuceneOnlyPredicates(table.querySpec().where().query()); } if (table.querySpec().hasAggregates()) { return GlobalAggregateConsumer.globalAggregates( functions, table, tableRelation, table.querySpec().where(), context); } else { return normalSelect(table, table.querySpec().where(), tableRelation, context); } }
@Test public void testDeleteWhere() throws Exception { DeleteAnalyzedStatement statement = analyze("delete from users where name='Trillian'"); TableRelation tableRelation = ((TableRelation) statement.analyzedRelation); TableInfo tableInfo = tableRelation.tableInfo(); assertThat(TEST_DOC_TABLE_IDENT, equalTo(tableInfo.ident())); assertThat(tableInfo.rowGranularity(), is(RowGranularity.DOC)); Function whereClause = (Function) statement.whereClauses.get(0).query(); assertEquals(EqOperator.NAME, whereClause.info().ident().name()); assertFalse(whereClause.info().type() == FunctionInfo.Type.AGGREGATE); assertThat( tableRelation.resolve(whereClause.arguments().get(0)), IsInstanceOf.instanceOf(Reference.class)); assertLiteralSymbol(whereClause.arguments().get(1), "Trillian"); }
private CollectPhase createCollectNode( Planner.Context plannerContext, boolean keepContextForFetcher) { TableInfo tableInfo = docSchemaInfo.getTableInfo("characters"); ReferenceInfo docIdRefInfo = tableInfo.getReferenceInfo(new ColumnIdent("_docid")); Symbol docIdRef = new Reference(docIdRefInfo); List<Symbol> toCollect = ImmutableList.of(docIdRef); CollectPhase collectNode = new CollectPhase( UUID.randomUUID(), plannerContext.nextExecutionPhaseId(), "collect", tableInfo.getRouting(WhereClause.MATCH_ALL, null), toCollect, ImmutableList.<Projection>of()); collectNode.maxRowGranularity(RowGranularity.DOC); collectNode.keepContextForFetcher(keepContextForFetcher); plannerContext.allocateJobSearchContextIds(collectNode.routing()); return collectNode; }
private PlannedAnalyzedRelation normalSelect( QueriedTable table, WhereClause whereClause, TableRelation tableRelation, ConsumerContext context) { QuerySpec querySpec = table.querySpec(); TableInfo tableInfo = tableRelation.tableInfo(); List<Symbol> outputSymbols; if (tableInfo.schemaInfo().systemSchema()) { outputSymbols = tableRelation.resolve(querySpec.outputs()); } else { outputSymbols = new ArrayList<>(querySpec.outputs().size()); for (Symbol symbol : querySpec.outputs()) { outputSymbols.add( DocReferenceConverter.convertIfPossible(tableRelation.resolve(symbol), tableInfo)); } } CollectNode collectNode; MergeNode mergeNode = null; OrderBy orderBy = querySpec.orderBy(); if (context.rootRelation() != table) { // insert directly from shards assert !querySpec.isLimited() : "insert from sub query with limit or order by is not supported. " + "Analyzer should have thrown an exception already."; ImmutableList<Projection> projections = ImmutableList.<Projection>of(); collectNode = PlanNodeBuilder.collect( context.plannerContext().jobId(), tableInfo, context.plannerContext(), whereClause, outputSymbols, projections); } else if (querySpec.isLimited() || orderBy != null) { /** * select id, name, order by id, date * * <p>toCollect: [id, name, date] // includes order by symbols, that aren't already selected * allOutputs: [in(0), in(1), in(2)] // for topN projection on shards/collectNode * orderByInputs: [in(0), in(2)] // for topN projection on shards/collectNode AND handler * finalOutputs: [in(0), in(1)] // for topN output on handler -> changes output to what * should be returned. */ List<Symbol> toCollect; List<Symbol> orderByInputColumns = null; if (orderBy != null) { List<Symbol> orderBySymbols = tableRelation.resolve(orderBy.orderBySymbols()); toCollect = new ArrayList<>(outputSymbols.size() + orderBySymbols.size()); toCollect.addAll(outputSymbols); // note: can only de-dup order by symbols due to non-deterministic functions like select // random(), random() for (Symbol orderBySymbol : orderBySymbols) { if (!toCollect.contains(orderBySymbol)) { toCollect.add(orderBySymbol); } } orderByInputColumns = new ArrayList<>(); for (Symbol symbol : orderBySymbols) { orderByInputColumns.add(new InputColumn(toCollect.indexOf(symbol), symbol.valueType())); } } else { toCollect = new ArrayList<>(outputSymbols.size()); toCollect.addAll(outputSymbols); } List<Symbol> allOutputs = new ArrayList<>(toCollect.size()); // outputs from collector for (int i = 0; i < toCollect.size(); i++) { allOutputs.add(new InputColumn(i, toCollect.get(i).valueType())); } List<Symbol> finalOutputs = new ArrayList<>(outputSymbols.size()); // final outputs on handler after sort for (int i = 0; i < outputSymbols.size(); i++) { finalOutputs.add(new InputColumn(i, outputSymbols.get(i).valueType())); } // if we have an offset we have to get as much docs from every node as we have offset+limit // otherwise results will be wrong TopNProjection tnp; int limit = firstNonNull(querySpec.limit(), Constants.DEFAULT_SELECT_LIMIT); if (orderBy == null) { tnp = new TopNProjection(querySpec.offset() + limit, 0); } else { tnp = new TopNProjection( querySpec.offset() + limit, 0, orderByInputColumns, orderBy.reverseFlags(), orderBy.nullsFirst()); } tnp.outputs(allOutputs); collectNode = PlanNodeBuilder.collect( context.plannerContext().jobId(), tableInfo, context.plannerContext(), whereClause, toCollect, ImmutableList.<Projection>of(tnp)); // MERGE tnp = new TopNProjection(limit, querySpec.offset()); tnp.outputs(finalOutputs); if (orderBy == null) { // no sorting needed mergeNode = PlanNodeBuilder.localMerge( context.plannerContext().jobId(), ImmutableList.<Projection>of(tnp), collectNode, context.plannerContext()); } else { // no order by needed in TopN as we already sorted on collector // and we merge sorted with SortedBucketMerger mergeNode = PlanNodeBuilder.sortedLocalMerge( context.plannerContext().jobId(), ImmutableList.<Projection>of(tnp), orderBy, allOutputs, orderByInputColumns, collectNode, context.plannerContext()); } } else { collectNode = PlanNodeBuilder.collect( context.plannerContext().jobId(), tableInfo, context.plannerContext(), whereClause, outputSymbols, ImmutableList.<Projection>of()); mergeNode = PlanNodeBuilder.localMerge( context.plannerContext().jobId(), ImmutableList.<Projection>of(), collectNode, context.plannerContext()); } return new QueryAndFetch(collectNode, mergeNode, context.plannerContext().jobId()); }