Ejemplo n.º 1
0
    private static Filter genericFunctionFilter(Function function, Context context) {
      if (function.valueType() != DataTypes.BOOLEAN) {
        raiseUnsupported(function);
      }
      // avoid field-cache
      // reason1: analyzed columns or columns with index off wouldn't work
      //   substr(n, 1, 1) in the case of n => analyzed would throw an error because n would be an
      // array
      // reason2: would have to load each value into the field cache
      function = (Function) DocReferenceConverter.convertIf(function);

      final CollectInputSymbolVisitor.Context ctx =
          context.inputSymbolVisitor.extractImplementations(function);
      assert ctx.topLevelInputs().size() == 1;
      @SuppressWarnings("unchecked")
      final Input<Boolean> condition = (Input<Boolean>) ctx.topLevelInputs().get(0);
      @SuppressWarnings("unchecked")
      final List<LuceneCollectorExpression> expressions = ctx.docLevelExpressions();
      final CollectorContext collectorContext =
          new CollectorContext(
              context.mapperService,
              context.fieldDataService,
              new CollectorFieldsVisitor(expressions.size()));

      for (LuceneCollectorExpression expression : expressions) {
        expression.startCollect(collectorContext);
      }
      return new Filter() {
        @Override
        public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs)
            throws IOException {
          for (LuceneCollectorExpression expression : expressions) {
            expression.setNextReader(context.reader().getContext());
          }
          return BitsFilteredDocIdSet.wrap(
              new FunctionDocSet(
                  context.reader(),
                  collectorContext.visitor(),
                  condition,
                  expressions,
                  context.reader().maxDoc(),
                  acceptDocs),
              acceptDocs);
        }
      };
    }
Ejemplo n.º 2
0
    private PlannedAnalyzedRelation normalSelect(
        QueriedTable table,
        WhereClause whereClause,
        TableRelation tableRelation,
        ConsumerContext context) {
      QuerySpec querySpec = table.querySpec();
      TableInfo tableInfo = tableRelation.tableInfo();

      List<Symbol> outputSymbols;
      if (tableInfo.schemaInfo().systemSchema()) {
        outputSymbols = tableRelation.resolve(querySpec.outputs());
      } else {
        outputSymbols = new ArrayList<>(querySpec.outputs().size());
        for (Symbol symbol : querySpec.outputs()) {
          outputSymbols.add(
              DocReferenceConverter.convertIfPossible(tableRelation.resolve(symbol), tableInfo));
        }
      }
      CollectNode collectNode;
      MergeNode mergeNode = null;
      OrderBy orderBy = querySpec.orderBy();
      if (context.rootRelation() != table) {
        // insert directly from shards
        assert !querySpec.isLimited()
            : "insert from sub query with limit or order by is not supported. "
                + "Analyzer should have thrown an exception already.";

        ImmutableList<Projection> projections = ImmutableList.<Projection>of();
        collectNode =
            PlanNodeBuilder.collect(
                context.plannerContext().jobId(),
                tableInfo,
                context.plannerContext(),
                whereClause,
                outputSymbols,
                projections);
      } else if (querySpec.isLimited() || orderBy != null) {
        /**
         * select id, name, order by id, date
         *
         * <p>toCollect: [id, name, date] // includes order by symbols, that aren't already selected
         * allOutputs: [in(0), in(1), in(2)] // for topN projection on shards/collectNode
         * orderByInputs: [in(0), in(2)] // for topN projection on shards/collectNode AND handler
         * finalOutputs: [in(0), in(1)] // for topN output on handler -> changes output to what
         * should be returned.
         */
        List<Symbol> toCollect;
        List<Symbol> orderByInputColumns = null;
        if (orderBy != null) {
          List<Symbol> orderBySymbols = tableRelation.resolve(orderBy.orderBySymbols());
          toCollect = new ArrayList<>(outputSymbols.size() + orderBySymbols.size());
          toCollect.addAll(outputSymbols);
          // note: can only de-dup order by symbols due to non-deterministic functions like select
          // random(), random()
          for (Symbol orderBySymbol : orderBySymbols) {
            if (!toCollect.contains(orderBySymbol)) {
              toCollect.add(orderBySymbol);
            }
          }
          orderByInputColumns = new ArrayList<>();
          for (Symbol symbol : orderBySymbols) {
            orderByInputColumns.add(new InputColumn(toCollect.indexOf(symbol), symbol.valueType()));
          }
        } else {
          toCollect = new ArrayList<>(outputSymbols.size());
          toCollect.addAll(outputSymbols);
        }

        List<Symbol> allOutputs = new ArrayList<>(toCollect.size()); // outputs from collector
        for (int i = 0; i < toCollect.size(); i++) {
          allOutputs.add(new InputColumn(i, toCollect.get(i).valueType()));
        }
        List<Symbol> finalOutputs =
            new ArrayList<>(outputSymbols.size()); // final outputs on handler after sort
        for (int i = 0; i < outputSymbols.size(); i++) {
          finalOutputs.add(new InputColumn(i, outputSymbols.get(i).valueType()));
        }

        // if we have an offset we have to get as much docs from every node as we have offset+limit
        // otherwise results will be wrong
        TopNProjection tnp;
        int limit = firstNonNull(querySpec.limit(), Constants.DEFAULT_SELECT_LIMIT);
        if (orderBy == null) {
          tnp = new TopNProjection(querySpec.offset() + limit, 0);
        } else {
          tnp =
              new TopNProjection(
                  querySpec.offset() + limit,
                  0,
                  orderByInputColumns,
                  orderBy.reverseFlags(),
                  orderBy.nullsFirst());
        }
        tnp.outputs(allOutputs);
        collectNode =
            PlanNodeBuilder.collect(
                context.plannerContext().jobId(),
                tableInfo,
                context.plannerContext(),
                whereClause,
                toCollect,
                ImmutableList.<Projection>of(tnp));

        // MERGE
        tnp = new TopNProjection(limit, querySpec.offset());
        tnp.outputs(finalOutputs);
        if (orderBy == null) {
          // no sorting needed
          mergeNode =
              PlanNodeBuilder.localMerge(
                  context.plannerContext().jobId(),
                  ImmutableList.<Projection>of(tnp),
                  collectNode,
                  context.plannerContext());
        } else {
          // no order by needed in TopN as we already sorted on collector
          // and we merge sorted with SortedBucketMerger
          mergeNode =
              PlanNodeBuilder.sortedLocalMerge(
                  context.plannerContext().jobId(),
                  ImmutableList.<Projection>of(tnp),
                  orderBy,
                  allOutputs,
                  orderByInputColumns,
                  collectNode,
                  context.plannerContext());
        }
      } else {
        collectNode =
            PlanNodeBuilder.collect(
                context.plannerContext().jobId(),
                tableInfo,
                context.plannerContext(),
                whereClause,
                outputSymbols,
                ImmutableList.<Projection>of());
        mergeNode =
            PlanNodeBuilder.localMerge(
                context.plannerContext().jobId(),
                ImmutableList.<Projection>of(),
                collectNode,
                context.plannerContext());
      }
      return new QueryAndFetch(collectNode, mergeNode, context.plannerContext().jobId());
    }