// case 3: tail segments, invariants hold, copy, invariants hold public void testNoMergeAfterCopy() throws IOException { // main directory Directory dir = newDirectory(); // auxiliary directory Directory aux = newDirectory(); setUpDirs(dir, aux); IndexWriter writer = newWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND) .setMaxBufferedDocs(10) .setMergePolicy(newLogMergePolicy(4))); writer.addIndexes( aux, new MockDirectoryWrapper(random(), new RAMDirectory(aux, newIOContext(random())))); assertEquals(1060, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); // make sure the index is correct verifyNumDocs(dir, 1060); dir.close(); aux.close(); }
public void testTypeChangeViaAddIndexesIR2() throws Exception { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); IndexWriter writer = new IndexWriter(dir, conf); Document doc = new Document(); doc.add(new NumericDocValuesField("dv", 0L)); writer.addDocument(doc); writer.close(); Directory dir2 = newDirectory(); writer = new IndexWriter(dir2, conf); IndexReader[] readers = new IndexReader[] {DirectoryReader.open(dir)}; writer.addIndexes(readers); readers[0].close(); doc = new Document(); doc.add(new SortedDocValuesField("dv", new BytesRef("foo"))); try { writer.addDocument(doc); fail("did not hit exception"); } catch (IllegalArgumentException iae) { // expected } writer.close(); dir2.close(); dir.close(); }
public void testQueries() throws Exception { single = single(random()); parallel = parallel(random()); queryTest(new TermQuery(new Term("f1", "v1"))); queryTest(new TermQuery(new Term("f1", "v2"))); queryTest(new TermQuery(new Term("f2", "v1"))); queryTest(new TermQuery(new Term("f2", "v2"))); queryTest(new TermQuery(new Term("f3", "v1"))); queryTest(new TermQuery(new Term("f3", "v2"))); queryTest(new TermQuery(new Term("f4", "v1"))); queryTest(new TermQuery(new Term("f4", "v2"))); BooleanQuery.Builder bq1 = new BooleanQuery.Builder(); bq1.add(new TermQuery(new Term("f1", "v1")), Occur.MUST); bq1.add(new TermQuery(new Term("f4", "v1")), Occur.MUST); queryTest(bq1.build()); single.getIndexReader().close(); single = null; parallel.getIndexReader().close(); parallel = null; dir.close(); dir = null; dir1.close(); dir1 = null; dir2.close(); dir2 = null; }
/** * Tests that index merging (specifically addIndexes(Directory...)) doesn't change the index order * of documents. */ public void testLucene() throws IOException { int num = 100; Directory indexA = newDirectory(); Directory indexB = newDirectory(); fillIndex(random(), indexA, 0, num); boolean fail = verifyIndex(indexA, 0); if (fail) { fail("Index a is invalid"); } fillIndex(random(), indexB, num, num); fail = verifyIndex(indexB, num); if (fail) { fail("Index b is invalid"); } Directory merged = newDirectory(); IndexWriter writer = new IndexWriter( merged, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(2))); writer.addIndexes(indexA, indexB); writer.forceMerge(1); writer.close(); fail = verifyIndex(merged, 0); assertFalse("The merged index is invalid", fail); indexA.close(); indexB.close(); merged.close(); }
public void testIgnoreStoredFields() throws IOException { Directory dir1 = getDir1(random()); Directory dir2 = getDir2(random()); LeafReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)); LeafReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)); // with overlapping ParallelLeafReader pr = new ParallelLeafReader(false, new LeafReader[] {ir1, ir2}, new LeafReader[] {ir1}); assertEquals("v1", pr.document(0).get("f1")); assertEquals("v1", pr.document(0).get("f2")); assertNull(pr.document(0).get("f3")); assertNull(pr.document(0).get("f4")); // check that fields are there assertNotNull(pr.terms("f1")); assertNotNull(pr.terms("f2")); assertNotNull(pr.terms("f3")); assertNotNull(pr.terms("f4")); pr.close(); // no stored fields at all pr = new ParallelLeafReader(false, new LeafReader[] {ir2}, new LeafReader[0]); assertNull(pr.document(0).get("f1")); assertNull(pr.document(0).get("f2")); assertNull(pr.document(0).get("f3")); assertNull(pr.document(0).get("f4")); // check that fields are there assertNull(pr.terms("f1")); assertNull(pr.terms("f2")); assertNotNull(pr.terms("f3")); assertNotNull(pr.terms("f4")); pr.close(); // without overlapping pr = new ParallelLeafReader(true, new LeafReader[] {ir2}, new LeafReader[] {ir1}); assertEquals("v1", pr.document(0).get("f1")); assertEquals("v1", pr.document(0).get("f2")); assertNull(pr.document(0).get("f3")); assertNull(pr.document(0).get("f4")); // check that fields are there assertNull(pr.terms("f1")); assertNull(pr.terms("f2")); assertNotNull(pr.terms("f3")); assertNotNull(pr.terms("f4")); pr.close(); // no main readers try { new ParallelLeafReader(true, new LeafReader[0], new LeafReader[] {ir1}); fail("didn't get expected exception: need a non-empty main-reader array"); } catch (IllegalArgumentException iae) { // pass } dir1.close(); dir2.close(); }
@Override public void tearDown() throws Exception { reader1.close(); reader2.close(); mergedDir.close(); merge1Dir.close(); merge2Dir.close(); super.tearDown(); }
@Override public void tearDown() throws Exception { readerA.close(); readerAclone.close(); readerB.close(); readerX.close(); dirA.close(); dirB.close(); super.tearDown(); }
public void test() throws Exception { final Directory d = newDirectory(); MockAnalyzer analyzer = new MockAnalyzer(random()); analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH)); final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); // Try to make an index that requires merging: w.getConfig().setMaxBufferedDocs(TestUtil.nextInt(random(), 2, 11)); final int numStartDocs = atLeast(20); final LineFileDocs docs = new LineFileDocs(random(), true); for (int docIDX = 0; docIDX < numStartDocs; docIDX++) { w.addDocument(docs.nextDoc()); } MergePolicy mp = w.getConfig().getMergePolicy(); final int mergeAtOnce = 1 + w.segmentInfos.size(); if (mp instanceof TieredMergePolicy) { ((TieredMergePolicy) mp).setMaxMergeAtOnce(mergeAtOnce); } else if (mp instanceof LogMergePolicy) { ((LogMergePolicy) mp).setMergeFactor(mergeAtOnce); } else { // skip test w.close(); d.close(); return; } final AtomicBoolean doStop = new AtomicBoolean(); w.getConfig().setMaxBufferedDocs(2); Thread t = new Thread() { @Override public void run() { try { while (!doStop.get()) { w.updateDocument( new Term("docid", "" + random().nextInt(numStartDocs)), docs.nextDoc()); // Force deletes to apply w.getReader().close(); } } catch (Throwable t) { throw new RuntimeException(t); } } }; t.start(); w.forceMerge(1); doStop.set(true); t.join(); assertTrue("merge count is " + w.mergeCount.get(), w.mergeCount.get() <= 1); w.close(); d.close(); docs.close(); }
public void testMultiConfig() throws Throwable { // test lots of smaller different params together int num = atLeast(3); for (int i = 0; i < num; i++) { // increase iterations for better testing if (VERBOSE) { System.out.println("\n\nTEST: top iter=" + i); } sameFieldOrder = random().nextBoolean(); mergeFactor = random().nextInt(3) + 2; maxBufferedDocs = random().nextInt(3) + 2; int maxThreadStates = 1 + random().nextInt(10); boolean doReaderPooling = random().nextBoolean(); seed++; int nThreads = random().nextInt(5) + 1; int iter = random().nextInt(5) + 1; int range = random().nextInt(20) + 1; Directory dir1 = newDirectory(); Directory dir2 = newDirectory(); if (VERBOSE) { System.out.println( " nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor + " maxBufferedDocs=" + maxBufferedDocs); } Map<String, Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling); if (VERBOSE) { System.out.println("TEST: index serial"); } indexSerial(random(), docs, dir2); if (VERBOSE) { System.out.println("TEST: verify"); } verifyEquals(dir1, dir2, "id"); dir1.close(); dir2.close(); } }
// case 0: add self or exceed maxMergeDocs, expect exception public void testAddSelf() throws IOException { // main directory Directory dir = newDirectory(); // auxiliary directory Directory aux = newDirectory(); IndexWriter writer = null; writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); // add 100 documents addDocs(writer, 100); assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter( aux, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.CREATE) .setMaxBufferedDocs(1000) .setMergePolicy(newLogMergePolicy(false))); // add 140 documents in separate files addDocs(writer, 40); writer.close(); writer = newWriter( aux, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.CREATE) .setMaxBufferedDocs(1000) .setMergePolicy(newLogMergePolicy(false))); addDocs(writer, 100); writer.close(); writer = newWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); try { // cannot add self writer.addIndexes(aux, dir); assertTrue(false); } catch (IllegalArgumentException e) { assertEquals(100, writer.maxDoc()); } writer.close(); // make sure the index is correct verifyNumDocs(dir, 100); dir.close(); aux.close(); }
@Test public void testOpenIfChangedReplaceTaxonomy() throws Exception { // test openIfChanged when replaceTaxonomy is called, which is equivalent to recreate // only can work with NRT as well Directory src = newDirectory(); DirectoryTaxonomyWriter w = new DirectoryTaxonomyWriter(src); FacetLabel cp_b = new FacetLabel("b"); w.addCategory(cp_b); w.close(); for (boolean nrt : new boolean[] {false, true}) { Directory dir = newDirectory(); DirectoryTaxonomyWriter writer = new DirectoryTaxonomyWriter(dir); FacetLabel cp_a = new FacetLabel("a"); writer.addCategory(cp_a); if (!nrt) writer.commit(); DirectoryTaxonomyReader r1 = nrt ? new DirectoryTaxonomyReader(writer) : new DirectoryTaxonomyReader(dir); // fill r1's caches assertEquals(1, r1.getOrdinal(cp_a)); assertEquals(cp_a, r1.getPath(1)); // now replace taxonomy writer.replaceTaxonomy(src); if (!nrt) writer.commit(); DirectoryTaxonomyReader r2 = TaxonomyReader.openIfChanged(r1); assertNotNull(r2); // fill r2's caches assertEquals(1, r2.getOrdinal(cp_b)); assertEquals(cp_b, r2.getPath(1)); // check that r1 doesn't see cp_b assertEquals(TaxonomyReader.INVALID_ORDINAL, r1.getOrdinal(cp_b)); assertEquals(cp_a, r1.getPath(1)); // check that r2 doesn't see cp_a assertEquals(TaxonomyReader.INVALID_ORDINAL, r2.getOrdinal(cp_a)); assertEquals(cp_b, r2.getPath(1)); r2.close(); r1.close(); writer.close(); dir.close(); } src.close(); }
@AfterClass public static void afterClass() throws Exception { searcher.close(); reader.close(); littleReader.close(); dir2.close(); directory.close(); bigSearcher.close(); searcher = null; reader = null; littleReader = null; dir2 = null; directory = null; bigSearcher = null; }
public void testEvilSearcherFactory() throws Exception { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); final SearcherFactory theEvilOne = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader ignored) { return LuceneTestCase.newSearcher(other); } }; try { new SearcherManager(w.w, false, theEvilOne); fail("didn't hit expected exception"); } catch (IllegalStateException ise) { // expected } w.close(); other.close(); dir.close(); }
public void testPrefix() throws Exception { Directory dir = TestUtil.getBookIndexDirectory(); IndexReader ireader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(ireader); Term term = new Term( "category", // #A "/technology/computers/programming"); // #A PrefixQuery query = new PrefixQuery(term); // #A TopDocs matches = searcher.search(query, 10); // #A int programmingAndBelow = matches.totalHits; System.out.println("Programming and below: "); for (int i = 0; i < matches.totalHits; i++) { Document doc = searcher.doc(matches.scoreDocs[i].doc); System.out.println("match " + i + ": " + doc.get("category") + " | " + doc.get("title")); } System.out.println("Just /technology/computers/programming: "); matches = searcher.search(new TermQuery(term), 10); // #B int justProgramming = matches.totalHits; System.out.println("Just Programming: "); for (int i = 0; i < matches.totalHits; i++) { Document doc = searcher.doc(matches.scoreDocs[i].doc); System.out.println("match " + i + ": " + doc.get("category") + " | " + doc.get("title")); } assertTrue(programmingAndBelow > justProgramming); // searcher.close(); dir.close(); }
public void testDemo() throws IOException, ParseException { Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); // Store the index in memory: Directory directory = new RAMDirectory(); // To store an index on disk, use this instead: // Directory directory = FSDirectory.open("/tmp/testindex"); IndexWriter iwriter = new IndexWriter(directory, analyzer, true, new IndexWriter.MaxFieldLength(25000)); Document doc = new Document(); String text = "This is the text to be indexed."; doc.add(new Field("fieldname", text, Field.Store.YES, Field.Index.ANALYZED)); iwriter.addDocument(doc); iwriter.close(); // Now search the index: IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true // Parse a simple query that searches for "text": QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer); Query query = parser.parse("text"); ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); // Iterate through the results: for (int i = 0; i < hits.length; i++) { Document hitDoc = isearcher.doc(hits[i].doc); assertEquals("This is the text to be indexed.", hitDoc.get("fieldname")); } isearcher.close(); directory.close(); }
public void testNullDocIdSetIterator() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir)); AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); final Filter filter = new Filter() { @Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) { return new DocIdSet() { @Override public DocIdSetIterator iterator() { return null; } }; } }; CachingWrapperFilter cacher = new CachingWrapperFilter(filter); // the caching filter should return the empty set constant assertNull(cacher.getDocIdSet(context, context.reader().getLiveDocs())); reader.close(); dir.close(); }
public void testSpanNot() throws Exception { SpanQuery[] clauses = new SpanQuery[2]; clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one")); clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "three")); SpanQuery spq = new SpanNearQuery(clauses, 5, true); SpanNotQuery snq = new SpanNotQuery(spq, new SpanTermQuery(new Term(PayloadHelper.FIELD, "two"))); Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter( random(), directory, newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(similarity)); Document doc = new Document(); doc.add(newTextField(PayloadHelper.FIELD, "one two three one four three", Field.Store.YES)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); checkSpans(MultiSpansWrapper.wrap(reader, snq, SpanWeight.Postings.PAYLOADS), 1, new int[] {2}); reader.close(); directory.close(); }
/** Test attributes map */ public void testAttributes() throws Exception { Directory dir = newDirectory(); Codec codec = getCodec(); byte id[] = StringHelper.randomId(); Map<String, String> attributes = new HashMap<>(); attributes.put("key1", "value1"); attributes.put("key2", "value2"); SegmentInfo info = new SegmentInfo( dir, getVersions()[0], "_123", 1, false, codec, Collections.emptyMap(), id, attributes, null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(attributes, info2.getAttributes()); // attributes map should be immutable expectThrows( UnsupportedOperationException.class, () -> { info2.getAttributes().put("bogus", "bogus"); }); dir.close(); }
public void testMethod() throws Exception { Directory directory = newDirectory(); String[] values = new String[] {"1", "2", "3", "4"}; RandomIndexWriter writer = new RandomIndexWriter(random(), directory); for (int i = 0; i < values.length; i++) { Document doc = new Document(); doc.add(newStringField(FIELD, values[i], Field.Store.YES)); writer.addDocument(doc); } IndexReader ir = writer.getReader(); writer.close(); BooleanQuery booleanQuery1 = new BooleanQuery(); booleanQuery1.add(new TermQuery(new Term(FIELD, "1")), BooleanClause.Occur.SHOULD); booleanQuery1.add(new TermQuery(new Term(FIELD, "2")), BooleanClause.Occur.SHOULD); BooleanQuery query = new BooleanQuery(); query.add(booleanQuery1, BooleanClause.Occur.MUST); query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT); IndexSearcher indexSearcher = newSearcher(ir); ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs; assertEquals("Number of matched documents", 2, hits.length); ir.close(); directory.close(); }
// Verifies no *.nrm exists when all fields omit norms: public void testNoNrmFile() throws Throwable { Directory ram = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer) .setMaxBufferedDocs(3) .setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); lmp.setMergeFactor(2); lmp.setNoCFSRatio(0.0); Document d = new Document(); FieldType customType = new FieldType(TextField.TYPE_NOT_STORED); customType.setOmitNorms(true); Field f1 = newField("f1", "This field has no norms", customType); d.add(f1); for (int i = 0; i < 30; i++) { writer.addDocument(d); } writer.commit(); assertNoNrm(ram); // force merge writer.forceMerge(1); // flush writer.close(); assertNoNrm(ram); ram.close(); }
public void testFirstClauseWithoutPayload() throws Exception { Spans spans; IndexSearcher searcher = getSearcher(); SpanQuery[] clauses = new SpanQuery[3]; clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "nopayload")); clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "qq")); clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "ss")); SpanNearQuery spanNearQuery = new SpanNearQuery(clauses, 6, true); SpanQuery[] clauses2 = new SpanQuery[2]; clauses2[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "pp")); clauses2[1] = spanNearQuery; SpanNearQuery snq = new SpanNearQuery(clauses2, 6, false); SpanQuery[] clauses3 = new SpanQuery[2]; clauses3[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "np")); clauses3[1] = snq; SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses3, 6, false); spans = MultiSpansWrapper.wrap( searcher.getIndexReader(), nestedSpanNearQuery, SpanWeight.Postings.PAYLOADS); assertTrue("spans is null and it shouldn't be", spans != null); checkSpans(spans, 1, new int[] {3}); closeIndexReader.close(); directory.close(); }
public void testListenerCalled() throws Exception { Directory dir = newDirectory(); IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, null)); final AtomicBoolean afterRefreshCalled = new AtomicBoolean(false); SearcherManager sm = new SearcherManager(iw, true, new SearcherFactory()); sm.addListener( new ReferenceManager.RefreshListener() { @Override public void beforeRefresh() {} @Override public void afterRefresh(boolean didRefresh) { if (didRefresh) { afterRefreshCalled.set(true); } } }); iw.addDocument(new Document()); iw.commit(); assertFalse(afterRefreshCalled.get()); sm.maybeRefreshBlocking(); assertTrue(afterRefreshCalled.get()); sm.close(); iw.close(); dir.close(); }
public void testFarsiRangeFilterCollating( Analyzer analyzer, String firstBeg, String firstEnd, String secondBeg, String secondEnd) throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, Field.Index.ANALYZED)); doc.add(new Field("body", "body", Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); writer.close(); IndexReader reader = IndexReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); Query query = new TermQuery(new Term("body", "body")); // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi // orders the U+0698 character before the U+0633 character, so the single // index Term below should NOT be returned by a TermRangeFilter with a Farsi // Collator (or an Arabic one for the case when Farsi searcher not // supported). ScoreDoc[] result = searcher.search(query, new TermRangeFilter("content", firstBeg, firstEnd, true, true), 1) .scoreDocs; assertEquals("The index Term should not be included.", 0, result.length); result = searcher.search(query, new TermRangeFilter("content", secondBeg, secondEnd, true, true), 1) .scoreDocs; assertEquals("The index Term should be included.", 1, result.length); searcher.close(); reader.close(); dir.close(); }
public void testUpdateSameDoc() throws Exception { final Directory dir = newDirectory(); final LineFileDocs docs = new LineFileDocs(random()); for (int r = 0; r < 3; r++) { final IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2)); final int numUpdates = atLeast(20); int numThreads = TestUtil.nextInt(random(), 2, 6); IndexingThread[] threads = new IndexingThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new IndexingThread(docs, w, numUpdates); threads[i].start(); } for (int i = 0; i < numThreads; i++) { threads[i].join(); } w.close(); } IndexReader open = DirectoryReader.open(dir); assertEquals(1, open.numDocs()); open.close(); docs.close(); dir.close(); }
public void testMoreThan32ProhibitedClauses() throws Exception { final Directory d = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); doc.add( new TextField( "field", "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33", Field.Store.NO)); w.addDocument(doc); doc = new Document(); doc.add(new TextField("field", "33", Field.Store.NO)); w.addDocument(doc); final IndexReader r = w.getReader(); w.close(); final IndexSearcher s = newSearcher(r); final BooleanQuery q = new BooleanQuery(); for (int term = 0; term < 33; term++) { q.add( new BooleanClause( new TermQuery(new Term("field", "" + term)), BooleanClause.Occur.MUST_NOT)); } q.add(new BooleanClause(new TermQuery(new Term("field", "33")), BooleanClause.Occur.SHOULD)); final int[] count = new int[1]; s.search( q, new Collector() { private Scorer scorer; @Override public void setScorer(Scorer scorer) { // Make sure we got BooleanScorer: this.scorer = scorer; assertEquals( "Scorer is implemented by wrong class", BooleanScorer.class.getName() + "$BucketScorer", scorer.getClass().getName()); } @Override public void collect(int doc) { count[0]++; } @Override public void setNextReader(AtomicReaderContext context) {} @Override public boolean acceptsDocsOutOfOrder() { return true; } }); assertEquals(1, count[0]); r.close(); d.close(); }
/** * testWriterTwice3 is yet another test which tests creating a taxonomy in two separate writing * sessions. This test used to fail because of a bug involving commit(), explained below, and now * should succeed. */ @Test public void testWriterTwice3() throws Exception { Directory indexDir = newDirectory(); // First, create and fill the taxonomy TaxonomyWriter tw = new DirectoryTaxonomyWriter(indexDir); fillTaxonomy(tw); tw.close(); // Now, open the same taxonomy and add the same categories again. // After a few categories, the LuceneTaxonomyWriter implementation // will stop looking for each category on disk, and rather read them // all into memory and close it's reader. The bug was that it closed // the reader, but forgot that it did (because it didn't set the reader // reference to null). tw = new DirectoryTaxonomyWriter(indexDir); fillTaxonomy(tw); // Add one new category, just to make commit() do something: tw.addCategory(new FacetLabel("hi")); // Do a commit(). Here was a bug - if tw had a reader open, it should // be reopened after the commit. However, in our case the reader should // not be open (as explained above) but because it was not set to null, // we forgot that, tried to reopen it, and got an AlreadyClosedException. tw.commit(); assertEquals(expectedCategories.length + 1, tw.getSize()); tw.close(); indexDir.close(); }
public void startSearch(String searchString) throws IOException { /*analyze(searchString);*/ try { Directory directory = FSDirectory.open(new File(".//Index")); // где находится индекс IndexSearcher is = new IndexSearcher(directory); // объект поиска QueryParser parser = new QueryParser( Version.LUCENE_31, "name", new RussianAnalyzer(Version.LUCENE_31)); // поле поиска + анализатор /* String str1 = "фотоаппарат"; String str2 = "телевизор"; String str3 = "SONY"; String total = "(" + str1 + " OR " + str2 + ")" + " AND " + str3; System.out.println(total);*/ Query query = parser.parse(searchString); // что ищем TopDocs results = is.search( query, null, 10); // включаем поиск ограничиваемся 10 документами, results содержит ... System.out.println( "getMaxScore()=" + results.getMaxScore() + " totalHits=" + results .totalHits); // MaxScore - наилучший результат(приоритет), totalHits - количество // найденных документов /*proposalController.getProposalList().clear();*/ for (ScoreDoc hits : results.scoreDocs) { // получаем подсказки Document doc = is.doc(hits.doc); // получаем документ по спец сылке doc for (Proposal proposal : proposalFacade.findPropolsalsByProduct(Long.valueOf(doc.get("recid")))) { proposalController.getProposalList().add(proposal); _log.info( "Предложение найдено:" + proposal.getRecid().toString() + ",Товар: " + doc.get("recid") + ", " + doc.get("name")); } /*System.out.println("doc="+hits.doc+" score="+hits.score);//выводим спец сылку doc + приоритет addMessage(doc.get("id") + " | " + doc.get("recid") + " | " + doc.get("name"));//выводим поля найденного документа*/ } directory.close(); } catch (ParseException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } addMessage("Поиск выполнен"); }
@Test public void testSeparateReaderAndWriter2() throws Exception { Directory indexDir = newDirectory(); TaxonomyWriter tw = new DirectoryTaxonomyWriter(indexDir); tw.commit(); TaxonomyReader tr = new DirectoryTaxonomyReader(indexDir); // Test getOrdinal(): FacetLabel author = new FacetLabel("Author"); assertEquals(1, tr.getSize()); // the empty taxonomy has size 1 (the root) assertEquals(TaxonomyReader.INVALID_ORDINAL, tr.getOrdinal(author)); tw.addCategory(author); // before commit and refresh, no change: assertEquals(TaxonomyReader.INVALID_ORDINAL, tr.getOrdinal(author)); assertEquals(1, tr.getSize()); // still root only... assertNull( TaxonomyReader.openIfChanged( tr)); // this is not enough, because tw.commit() hasn't been done yet assertEquals(TaxonomyReader.INVALID_ORDINAL, tr.getOrdinal(author)); assertEquals(1, tr.getSize()); // still root only... tw.commit(); // still not enough before refresh: assertEquals(TaxonomyReader.INVALID_ORDINAL, tr.getOrdinal(author)); assertEquals(1, tr.getSize()); // still root only... TaxonomyReader newTaxoReader = TaxonomyReader.openIfChanged(tr); assertNotNull(newTaxoReader); tr.close(); tr = newTaxoReader; assertEquals(1, tr.getOrdinal(author)); assertEquals(2, tr.getSize()); tw.close(); tr.close(); indexDir.close(); }
@Override public void tearDown() throws Exception { reader.close(); searcher.close(); dir.close(); super.tearDown(); }
public void testCachingWorks() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir)); AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); MockFilter filter = new MockFilter(); CachingWrapperFilter cacher = new CachingWrapperFilter(filter); // first time, nested filter is called DocIdSet strongRef = cacher.getDocIdSet(context, context.reader().getLiveDocs()); assertTrue("first time", filter.wasCalled()); // make sure no exception if cache is holding the wrong docIdSet cacher.getDocIdSet(context, context.reader().getLiveDocs()); // second time, nested filter should not be called filter.clear(); cacher.getDocIdSet(context, context.reader().getLiveDocs()); assertFalse("second time", filter.wasCalled()); reader.close(); dir.close(); }