private static void assertDocIdSetCacheable( IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException { assertTrue(reader.getContext() instanceof AtomicReaderContext); AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); final CachingWrapperFilter cacher = new CachingWrapperFilter(filter); final DocIdSet originalSet = filter.getDocIdSet(context, context.reader().getLiveDocs()); final DocIdSet cachedSet = cacher.getDocIdSet(context, context.reader().getLiveDocs()); if (originalSet == null) { assertNull(cachedSet); } if (cachedSet == null) { assertTrue(originalSet == null || originalSet.iterator() == null); } else { assertTrue(cachedSet.isCacheable()); assertEquals(shouldCacheable, originalSet.isCacheable()); // System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: // "+cachedSet.getClass().getName()); if (originalSet.isCacheable()) { assertEquals( "Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass()); } else { assertTrue( "Cached DocIdSet must be an FixedBitSet if the original one was not cacheable", cachedSet instanceof FixedBitSet || cachedSet == null); } } }
// inherit javadoc public void search(Weight weight, Filter filter, final HitCollector results) throws IOException { Scorer scorer = weight.scorer(reader); if (scorer == null) return; if (filter == null) { scorer.score(results); return; } DocIdSetIterator filterDocIdIterator = filter.getDocIdSet(reader).iterator(); // CHECKME: use ConjunctionScorer here? boolean more = filterDocIdIterator.next() && scorer.skipTo(filterDocIdIterator.doc()); while (more) { int filterDocId = filterDocIdIterator.doc(); if (filterDocId > scorer.doc() && !scorer.skipTo(filterDocId)) { more = false; } else { int scorerDocId = scorer.doc(); if (scorerDocId == filterDocId) { // permitted by filter results.collect(scorerDocId, scorer.score()); more = filterDocIdIterator.next(); } else { more = filterDocIdIterator.skipTo(scorerDocId); } } } }
@Override public Scorer scorer( AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException { DocIdSet parentsSet = parentFilter.getDocIdSet(context, acceptDocs); if (DocIdSets.isEmpty(parentsSet) || remaining == 0) { return null; } BytesValues bytesValues = parentChildIndexFieldData.load(context).getBytesValues(parentType); if (bytesValues == null) { return null; } // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The // "remaining" // count down (short circuit) logic will then work as expected. DocIdSetIterator parentsIterator = BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator(); switch (scoreType) { case AVG: return new AvgParentScorer( this, bytesValues, parentIds, scores, occurrences, parentsIterator); default: return new ParentScorer(this, bytesValues, parentIds, scores, parentsIterator); } }
@Override public Scorer scorer( AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException { DocIdSet parentsSet = parentFilter.getDocIdSet(context, acceptDocs); if (DocIdSets.isEmpty(parentsSet) || remaining == 0) { return null; } IdReaderTypeCache idTypeCache = searchContext.idCache().reader(context.reader()).type(parentType); // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The // "remaining" // count down (short circuit) logic will then work as expected. DocIdSetIterator parentsIterator = BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator(); switch (scoreType) { case AVG: return new AvgParentScorer( this, idTypeCache, uidToScore.v(), uidToCount.v(), parentsIterator); default: return new ParentScorer(this, idTypeCache, uidToScore.v(), parentsIterator); } }
@Override public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException { // Nested docs only reside in a single segment, so no need to evaluate all segments if (!context.reader().getCoreCacheKey().equals(this.atomicReader.getCoreCacheKey())) { return null; } // If docId == 0 then we a parent doc doesn't have child docs, because child docs are stored // before the parent doc and because parent doc is 0 we can safely assume that there are no // child docs. if (docId == 0) { return null; } final FixedBitSet parents = parentFilter.getDocIdSet(context, null); final int firstChildDocId = parents.prevSetBit(docId - 1) + 1; // A parent doc doesn't have child docs, so we can early exit here: if (firstChildDocId == docId) { return null; } final DocIdSet children = childFilter.getDocIdSet(context, acceptDocs); if (children == null) { return null; } final DocIdSetIterator childrenIterator = children.iterator(); if (childrenIterator == null) { return null; } return new DocIdSet() { @Override public long ramBytesUsed() { return parents.ramBytesUsed() + children.ramBytesUsed(); } @Override public DocIdSetIterator iterator() throws IOException { return new DocIdSetIterator() { int currentDocId = -1; @Override public int docID() { return currentDocId; } @Override public int nextDoc() throws IOException { return advance(currentDocId + 1); } @Override public int advance(int target) throws IOException { target = Math.max(firstChildDocId, target); if (target >= docId) { // We're outside the child nested scope, so it is done return currentDocId = NO_MORE_DOCS; } else { int advanced = childrenIterator.advance(target); if (advanced >= docId) { // We're outside the child nested scope, so it is done return currentDocId = NO_MORE_DOCS; } else { return currentDocId = advanced; } } } @Override public long cost() { return childrenIterator.cost(); } }; } }; }