@Override
    public Scorer filteredScorer(LeafReaderContext context, Weight weight, DocIdSet docIdSet)
        throws IOException {
      // CHANGE: If threshold is 0, always pass down the accept docs, don't pay the price of calling
      // nextDoc even...
      final Bits filterAcceptDocs = docIdSet.bits();
      if (threshold == 0) {
        if (filterAcceptDocs != null) {
          return weight.scorer(context, filterAcceptDocs);
        } else {
          return FilteredQuery.LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(
              context, weight, docIdSet);
        }
      }

      // CHANGE: handle "default" value
      if (threshold == -1) {
        // default  value, don't iterate on only apply filter after query if its not a "fast"
        // docIdSet
        // TODO: is there a way we could avoid creating an iterator here?
        if (filterAcceptDocs != null && DocIdSets.isBroken(docIdSet.iterator())) {
          return FilteredQuery.QUERY_FIRST_FILTER_STRATEGY.filteredScorer(
              context, weight, docIdSet);
        }
      }

      return super.filteredScorer(context, weight, docIdSet);
    }
Ejemplo n.º 2
0
    @Override
    public Scorer scorer(
        AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs)
        throws IOException {
      DocIdSet parentsSet = parentFilter.getDocIdSet(context, acceptDocs);
      if (DocIdSets.isEmpty(parentsSet) || remaining == 0) {
        return null;
      }

      BytesValues bytesValues = parentChildIndexFieldData.load(context).getBytesValues(parentType);
      if (bytesValues == null) {
        return null;
      }

      // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The
      // "remaining"
      // count down (short circuit) logic will then work as expected.
      DocIdSetIterator parentsIterator =
          BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator();
      switch (scoreType) {
        case AVG:
          return new AvgParentScorer(
              this, bytesValues, parentIds, scores, occurrences, parentsIterator);
        default:
          return new ParentScorer(this, bytesValues, parentIds, scores, parentsIterator);
      }
    }
 @Override
 public void setNextReader(AtomicReaderContext reader) {
   try {
     bits =
         DocIdSets.toSafeBits(
             reader.reader(), filter.getDocIdSet(reader, reader.reader().getLiveDocs()));
   } catch (IOException ioe) {
     throw new AggregationExecutionException(
         "Failed to aggregate filter aggregator [" + name + "]", ioe);
   }
 }
Ejemplo n.º 4
0
 @Override
 public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub)
     throws IOException {
   // no need to provide deleted docs to the filter
   final Bits bits =
       DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx, null));
   return new LeafBucketCollectorBase(sub, null) {
     @Override
     public void collect(int doc, long bucket) throws IOException {
       if (bits.get(doc)) {
         collectBucket(sub, doc, bucket);
       }
     }
   };
 }
    @Override
    public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
      if (remaining == 0) {
        return null;
      }

      if (shortCircuitFilter != null) {
        DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, acceptDocs);
        if (!DocIdSets.isEmpty(docIdSet)) {
          DocIdSetIterator iterator = docIdSet.iterator();
          if (iterator != null) {
            return ConstantScorer.create(iterator, this, queryWeight);
          }
        }
        return null;
      }

      DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, acceptDocs);
      if (!DocIdSets.isEmpty(parentDocIdSet)) {
        // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The
        // "remaining"
        // count down (short circuit) logic will then work as expected.
        parentDocIdSet = BitsFilteredDocIdSet.wrap(parentDocIdSet, context.reader().getLiveDocs());
        DocIdSetIterator innerIterator = parentDocIdSet.iterator();
        if (innerIterator != null) {
          LongBitSet parentOrds = collector.parentOrds;
          SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
          if (globalValues != null) {
            DocIdSetIterator parentIdIterator =
                new ParentOrdIterator(innerIterator, parentOrds, globalValues, this);
            return ConstantScorer.create(parentIdIterator, this, queryWeight);
          }
        }
      }
      return null;
    }
Ejemplo n.º 6
0
    @Override
    public Scorer scorer(
        AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs)
        throws IOException {
      DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, acceptDocs);
      if (DocIdSets.isEmpty(childrenDocSet)) {
        return null;
      }
      IdReaderTypeCache idTypeCache =
          searchContext.idCache().reader(context.reader()).type(parentType);
      if (idTypeCache == null) {
        return null;
      }

      return new ChildScorer(this, uidToScore, childrenDocSet.iterator(), idTypeCache);
    }
Ejemplo n.º 7
0
    @Override
    public Scorer scorer(
        AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs)
        throws IOException {
      DocIdSet parentsSet = parentFilter.getDocIdSet(context, acceptDocs);
      if (DocIdSets.isEmpty(parentsSet) || remaining == 0) {
        return null;
      }

      IdReaderTypeCache idTypeCache =
          searchContext.idCache().reader(context.reader()).type(parentType);
      // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The
      // "remaining"
      // count down (short circuit) logic will then work as expected.
      DocIdSetIterator parentsIterator =
          BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator();
      switch (scoreType) {
        case AVG:
          return new AvgParentScorer(
              this, idTypeCache, uidToScore.v(), uidToCount.v(), parentsIterator);
        default:
          return new ParentScorer(this, idTypeCache, uidToScore.v(), parentsIterator);
      }
    }