private InternalSearchHit.InternalNestedIdentity getInternalNestedIdentity( SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, DocumentMapper documentMapper, ObjectMapper nestedObjectMapper) throws IOException { int currentParent = nestedSubDocId; ObjectMapper nestedParentObjectMapper; ObjectMapper current = nestedObjectMapper; String originalName = nestedObjectMapper.name(); InternalSearchHit.InternalNestedIdentity nestedIdentity = null; do { Query parentFilter; nestedParentObjectMapper = documentMapper.findParentObjectMapper(current); if (nestedParentObjectMapper != null) { if (nestedParentObjectMapper.nested().isNested() == false) { current = nestedParentObjectMapper; continue; } parentFilter = nestedParentObjectMapper.nestedTypeFilter(); } else { parentFilter = Queries.newNonNestedFilter(); } Query childFilter = nestedObjectMapper.nestedTypeFilter(); if (childFilter == null) { current = nestedParentObjectMapper; continue; } final Weight childWeight = context.searcher().createNormalizedWeight(childFilter, false); Scorer childScorer = childWeight.scorer(subReaderContext); if (childScorer == null) { current = nestedParentObjectMapper; continue; } DocIdSetIterator childIter = childScorer.iterator(); BitSet parentBits = context.bitsetFilterCache().getBitSetProducer(parentFilter).getBitSet(subReaderContext); int offset = 0; int nextParent = parentBits.nextSetBit(currentParent); for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = childIter.nextDoc()) { offset++; } currentParent = nextParent; current = nestedObjectMapper = nestedParentObjectMapper; int currentPrefix = current == null ? 0 : current.name().length() + 1; nestedIdentity = new InternalSearchHit.InternalNestedIdentity( originalName.substring(currentPrefix), offset, nestedIdentity); if (current != null) { originalName = current.name(); } } while (current != null); return nestedIdentity; }
public static Map<String, Integer> termFrequencies( IndexSearcher indexSearcher, Query documentFilterQuery, String fieldName, String propName, String altName) { try { String luceneField = ComplexFieldUtil.propertyField(fieldName, propName, altName); Weight weight = indexSearcher.createNormalizedWeight(documentFilterQuery, false); Map<String, Integer> freq = new HashMap<>(); IndexReader indexReader = indexSearcher.getIndexReader(); for (LeafReaderContext arc : indexReader.leaves()) { if (weight == null) throw new RuntimeException("weight == null"); if (arc == null) throw new RuntimeException("arc == null"); if (arc.reader() == null) throw new RuntimeException("arc.reader() == null"); Scorer scorer = weight.scorer(arc, arc.reader().getLiveDocs()); if (scorer != null) { while (scorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { getFrequenciesFromTermVector( indexReader, scorer.docID() + arc.docBase, luceneField, freq); } } } return freq; } catch (IOException e) { throw ExUtil.wrapRuntimeException(e); } }
private FiltersFunctionFactorScorer functionScorer(LeafReaderContext context) throws IOException { Scorer subQueryScorer = subQueryWeight.scorer(context); if (subQueryScorer == null) { return null; } final LeafScoreFunction[] functions = new LeafScoreFunction[filterFunctions.length]; final Bits[] docSets = new Bits[filterFunctions.length]; for (int i = 0; i < filterFunctions.length; i++) { FilterFunction filterFunction = filterFunctions[i]; functions[i] = filterFunction.function.getLeafScoreFunction(context); Scorer filterScorer = filterWeights[i].scorer(context); docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); } return new FiltersFunctionFactorScorer( this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, needsScores); }
@Override public Scorer filteredScorer(LeafReaderContext context, Weight weight, DocIdSet docIdSet) throws IOException { // CHANGE: If threshold is 0, always pass down the accept docs, don't pay the price of calling // nextDoc even... final Bits filterAcceptDocs = docIdSet.bits(); if (threshold == 0) { if (filterAcceptDocs != null) { return weight.scorer(context, filterAcceptDocs); } else { return FilteredQuery.LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer( context, weight, docIdSet); } } // CHANGE: handle "default" value if (threshold == -1) { // default value, don't iterate on only apply filter after query if its not a "fast" // docIdSet // TODO: is there a way we could avoid creating an iterator here? if (filterAcceptDocs != null && DocIdSets.isBroken(docIdSet.iterator())) { return FilteredQuery.QUERY_FIRST_FILTER_STRATEGY.filteredScorer( context, weight, docIdSet); } } return super.filteredScorer(context, weight, docIdSet); }
// NOTE: acceptDocs applies (and is checked) only in the // parent document space @Override public Scorer scorer(LeafReaderContext readerContext) throws IOException { final Scorer childScorer = childWeight.scorer(readerContext); if (childScorer == null) { // No matches return null; } final int firstChildDoc = childScorer.iterator().nextDoc(); if (firstChildDoc == DocIdSetIterator.NO_MORE_DOCS) { // No matches return null; } // NOTE: this does not take accept docs into account, the responsibility // to not match deleted docs is on the scorer final BitSet parents = parentsFilter.getBitSet(readerContext); if (parents == null) { // No matches return null; } return new BlockJoinScorer(this, childScorer, parents, firstChildDoc, scoreMode); }
/** Replay the wrapped collector, but only on a selection of buckets. */ @Override public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { if (!finished) { throw new IllegalStateException( "Cannot replay yet, collection is not finished: postCollect() has not been called"); } if (this.selectedBuckets != null) { throw new IllegalStateException("Already been replayed"); } final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE); for (long bucket : selectedBuckets) { hash.add(bucket); } this.selectedBuckets = hash; collector.preCollection(); boolean needsScores = collector.needsScores(); Weight weight = null; if (needsScores) { weight = aggContext .searchContext() .searcher() .createNormalizedWeight(aggContext.searchContext().query(), true); } for (Entry entry : entries) { final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); DocIdSetIterator docIt = null; if (needsScores && entry.docDeltas.size() > 0) { Scorer scorer = weight.scorer(entry.context); // We don't need to check if the scorer is null // since we are sure that there are documents to replay (entry.docDeltas it not empty). docIt = scorer.iterator(); leafCollector.setScorer(scorer); } final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator(); final PackedLongValues.Iterator buckets = entry.buckets.iterator(); int doc = 0; for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) { doc += docDeltaIterator.next(); final long bucket = buckets.next(); final long rebasedBucket = hash.find(bucket); if (rebasedBucket != -1) { if (needsScores) { if (docIt.docID() < doc) { docIt.advance(doc); } // aggregations should only be replayed on matching documents assert docIt.docID() == doc; } leafCollector.collect(doc, rebasedBucket); } } } collector.postCollection(); }
@Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { Scorer subQueryScorer = qWeight.scorer(reader, true, false); if (subQueryScorer == null) { return null; } return new BoostedQuery.CustomScorer( getSimilarity(searcher), searcher, reader, this, subQueryScorer, boostVal); }
@Override public Scorer scorer( final AtomicReaderContext context, final boolean scoreDocsInOrder, final boolean topScorer, final Bits acceptDocs) throws IOException { final NodeScorer scorer = (NodeScorer) weight.scorer(context, scoreDocsInOrder, topScorer, acceptDocs); if (scorer == null) { return null; } return new AncestorFilterScorer(scorer, ancestorLevel); }
@Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { // no need to provide deleted docs to the filter final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx, null)); return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { if (bits.get(doc)) { collectBucket(sub, doc, bucket); } } }; }
@Override public Scorer scorer(LeafReaderContext context) throws IOException { SortedDocValues values = DocValues.getSorted(context.reader(), joinField); if (values == null) { return null; } Scorer approximationScorer = approximationWeight.scorer(context); if (approximationScorer == null) { return null; } else if (globalOrds != null) { return new OrdinalMapScorer( this, collector, values, approximationScorer, globalOrds.getGlobalOrds(context.ord)); } else { return new SegmentOrdinalScorer(this, collector, values, approximationScorer); } }
@Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { // Pass true for "scoresDocsInOrder", because we // require in-order scoring, even if caller does not, // since we call advance on the valSrcScorers. Pass // false for "topScorer" because we will not invoke // score(Collector) on these scorers: Scorer subQueryScorer = subQueryWeight.scorer(reader, true, false); if (subQueryScorer == null) { return null; } Scorer[] valSrcScorers = new Scorer[valSrcWeights.length]; for (int i = 0; i < valSrcScorers.length; i++) { valSrcScorers[i] = valSrcWeights[i].scorer(reader, true, topScorer); } return new CustomScorer(similarity, reader, this, subQueryScorer, valSrcScorers); }
@Override protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException { for (LeafReaderContext ctx : leaves) { // search each subreader // we force the use of Scorer (not BulkScorer) to make sure // that the scorer passed to LeafCollector.setScorer supports // Scorer.getChildren Scorer scorer = weight.scorer(ctx); if (scorer != null) { final LeafCollector leafCollector = collector.getLeafCollector(ctx); leafCollector.setScorer(scorer); final Bits liveDocs = ctx.reader().getLiveDocs(); for (int doc = scorer.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = scorer.nextDoc()) { if (liveDocs == null || liveDocs.get(doc)) { leafCollector.collect(doc); } } } } }
@Override public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { SortedDocValues values = DocValues.getSorted(context.reader(), joinField); if (values == null) { return null; } Scorer approximationScorer = approximationWeight.scorer(context, acceptDocs); if (approximationScorer == null) { return null; } if (globalOrds != null) { return new OrdinalMapScorer( this, queryNorm, foundOrds, values, approximationScorer, globalOrds.getGlobalOrds(context.ord)); } { return new SegmentOrdinalScorer(this, queryNorm, foundOrds, values, approximationScorer); } }
public Scorer scorer(AtomicReaderContext context, Bits bits) throws IOException { return mainWeight.scorer(context, bits); }
/** not a direct test of NearSpans, but a demonstration of how/when this causes problems */ public void testSpanNearScorerSkipTo1() throws Exception { SpanNearQuery q = makeQuery(); Weight w = q.weight(searcher); Scorer s = w.scorer(searcher.getIndexReader(), true, false); assertEquals(1, s.advance(1)); }
@Override public Scorer scorer(LeafReaderContext context) throws IOException { shardKeyMap.add(context.reader()); return in.scorer(context); }