public void testNullDocIdSetIterator() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir)); AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); final Filter filter = new Filter() { @Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) { return new DocIdSet() { @Override public DocIdSetIterator iterator() { return null; } }; } }; CachingWrapperFilter cacher = new CachingWrapperFilter(filter); // the caching filter should return the empty set constant assertNull(cacher.getDocIdSet(context, context.reader().getLiveDocs())); reader.close(); dir.close(); }
private static void assertDocIdSetCacheable( IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException { assertTrue(reader.getContext() instanceof AtomicReaderContext); AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); final CachingWrapperFilter cacher = new CachingWrapperFilter(filter); final DocIdSet originalSet = filter.getDocIdSet(context, context.reader().getLiveDocs()); final DocIdSet cachedSet = cacher.getDocIdSet(context, context.reader().getLiveDocs()); if (originalSet == null) { assertNull(cachedSet); } if (cachedSet == null) { assertTrue(originalSet == null || originalSet.iterator() == null); } else { assertTrue(cachedSet.isCacheable()); assertEquals(shouldCacheable, originalSet.isCacheable()); // System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: // "+cachedSet.getClass().getName()); if (originalSet.isCacheable()) { assertEquals( "Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass()); } else { assertTrue( "Cached DocIdSet must be an FixedBitSet if the original one was not cacheable", cachedSet instanceof FixedBitSet || cachedSet == null); } } }
public void testCachingWorks() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir)); AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); MockFilter filter = new MockFilter(); CachingWrapperFilter cacher = new CachingWrapperFilter(filter); // first time, nested filter is called DocIdSet strongRef = cacher.getDocIdSet(context, context.reader().getLiveDocs()); assertTrue("first time", filter.wasCalled()); // make sure no exception if cache is holding the wrong docIdSet cacher.getDocIdSet(context, context.reader().getLiveDocs()); // second time, nested filter should not be called filter.clear(); cacher.getDocIdSet(context, context.reader().getLiveDocs()); assertFalse("second time", filter.wasCalled()); reader.close(); dir.close(); }
public void testEnforceDeletions() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter( random(), dir, newIndexWriterConfig(new MockAnalyzer(random())) .setMergeScheduler(new SerialMergeScheduler()) . // asserts below requires no unexpected merges: setMergePolicy(newLogMergePolicy(10))); // NOTE: cannot use writer.getReader because RIW (on // flipping a coin) may give us a newly opened reader, // but we use .reopen on this reader below and expect to // (must) get an NRT reader: DirectoryReader reader = DirectoryReader.open(writer.w, true); // same reason we don't wrap? IndexSearcher searcher = newSearcher(reader, false); // add a doc, refresh the reader, and check that it's there Document doc = new Document(); doc.add(newStringField("id", "1", Field.Store.YES)); writer.addDocument(doc); reader = refreshReader(reader); searcher = newSearcher(reader, false); TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1); assertEquals("Should find a hit...", 1, docs.totalHits); final Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1"))); CachingWrapperFilter filter = new CachingWrapperFilter(startFilter); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertTrue(filter.ramBytesUsed() > 0); assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits); Query constantScore = new ConstantScoreQuery(filter); docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); // make sure we get a cache hit when we reopen reader // that had no change to deletions // fake delete (deletes nothing): writer.deleteDocuments(new Term("foo", "bar")); IndexReader oldReader = reader; reader = refreshReader(reader); assertTrue(reader == oldReader); int missCount = filter.missCount; docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); // cache hit: assertEquals(missCount, filter.missCount); // now delete the doc, refresh the reader, and see that it's not there writer.deleteDocuments(new Term("id", "1")); // NOTE: important to hold ref here so GC doesn't clear // the cache entry! Else the assert below may sometimes // fail: oldReader = reader; reader = refreshReader(reader); searcher = newSearcher(reader, false); missCount = filter.missCount; docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits); // cache hit assertEquals(missCount, filter.missCount); docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits); // apply deletes dynamically: filter = new CachingWrapperFilter(startFilter); writer.addDocument(doc); reader = refreshReader(reader); searcher = newSearcher(reader, false); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits); missCount = filter.missCount; assertTrue(missCount > 0); constantScore = new ConstantScoreQuery(filter); docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); assertEquals(missCount, filter.missCount); writer.addDocument(doc); // NOTE: important to hold ref here so GC doesn't clear // the cache entry! Else the assert below may sometimes // fail: oldReader = reader; reader = refreshReader(reader); searcher = newSearcher(reader, false); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should find 2 hits...", 2, docs.totalHits); assertTrue(filter.missCount > missCount); missCount = filter.missCount; constantScore = new ConstantScoreQuery(filter); docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should find a hit...", 2, docs.totalHits); assertEquals(missCount, filter.missCount); // now delete the doc, refresh the reader, and see that it's not there writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); searcher = newSearcher(reader, false); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits); // CWF reused the same entry (it dynamically applied the deletes): assertEquals(missCount, filter.missCount); docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should *not* find a hit...", 0, docs.totalHits); // CWF reused the same entry (it dynamically applied the deletes): assertEquals(missCount, filter.missCount); // NOTE: silliness to make sure JRE does not eliminate // our holding onto oldReader to prevent // CachingWrapperFilter's WeakHashMap from dropping the // entry: assertTrue(oldReader != null); reader.close(); writer.close(); dir.close(); }