@Override public void doWork() throws Throwable { IndexReader r1 = null, r2 = null; synchronized (lock) { try { r1 = DirectoryReader.open(dir1); r2 = DirectoryReader.open(dir2); } catch (IOException e) { if (!e.getMessage().contains("on purpose")) { throw e; } if (r1 != null) { r1.close(); } if (r2 != null) { r2.close(); } return; } } if (r1.numDocs() != r2.numDocs()) { throw new RuntimeException("doc counts differ: r1=" + r1.numDocs() + " r2=" + r2.numDocs()); } r1.close(); r2.close(); }
@Override public void tearDown() throws Exception { super.tearDown(); readerA.close(); readerB.close(); readerX.close(); }
// LUCENE-325: test forceMergeDeletes without waiting, when // many adjacent merges are required public void testForceMergeDeletes3() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())) .setMaxBufferedDocs(2) .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH) .setMergePolicy(newLogMergePolicy(50))); FieldType customType = new FieldType(); customType.setStored(true); FieldType customType1 = new FieldType(TextField.TYPE_NOT_STORED); customType1.setTokenized(false); customType1.setStoreTermVectors(true); customType1.setStoreTermVectorPositions(true); customType1.setStoreTermVectorOffsets(true); Document document = new Document(); Field storedField = newField("stored", "stored", customType); document.add(storedField); Field termVectorField = newField("termVector", "termVector", customType1); document.add(termVectorField); Field idField = newStringField("id", "", Field.Store.NO); document.add(idField); for (int i = 0; i < 98; i++) { idField.setStringValue("" + i); writer.addDocument(document); } writer.close(); IndexReader ir = DirectoryReader.open(dir); assertEquals(98, ir.maxDoc()); assertEquals(98, ir.numDocs()); ir.close(); IndexWriterConfig dontMergeConfig = new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE); writer = new IndexWriter(dir, dontMergeConfig); for (int i = 0; i < 98; i += 2) { writer.deleteDocuments(new Term("id", "" + i)); } writer.close(); ir = DirectoryReader.open(dir); assertEquals(49, ir.numDocs()); ir.close(); writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(3))); writer.forceMergeDeletes(false); writer.close(); ir = DirectoryReader.open(dir); assertEquals(49, ir.maxDoc()); assertEquals(49, ir.numDocs()); ir.close(); dir.close(); }
// test when delete terms only apply to disk segments public void testNonRAMDelete() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { addDoc(modifier, ++id, value); } modifier.commit(); assertEquals(0, modifier.getNumBufferedDocuments()); assertTrue(0 < modifier.getSegmentCount()); modifier.commit(); IndexReader reader = IndexReader.open(dir, true); assertEquals(7, reader.numDocs()); reader.close(); modifier.deleteDocuments(new Term("value", String.valueOf(value))); modifier.commit(); reader = IndexReader.open(dir, true); assertEquals(0, reader.numDocs()); reader.close(); modifier.close(); dir.close(); }
// test rollback of deleteAll() public void testDeleteAllRollback() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { addDoc(modifier, ++id, value); } modifier.commit(); addDoc(modifier, ++id, value); IndexReader reader = IndexReader.open(dir, true); assertEquals(7, reader.numDocs()); reader.close(); // Delete all modifier.deleteAll(); // Roll it back modifier.rollback(); modifier.close(); // Validate that the docs are still there reader = IndexReader.open(dir, true); assertEquals(7, reader.numDocs()); reader.close(); dir.close(); }
@AfterClass public static void afterClassBaseTestRangeFilter() throws Exception { signedIndexReader.close(); unsignedIndexReader.close(); signedIndexDir.index.close(); unsignedIndexDir.index.close(); signedIndexReader = null; unsignedIndexReader = null; signedIndexDir = null; unsignedIndexDir = null; }
// LUCENE-1274: test writer.prepareCommit() public void testPrepareCommit() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2) .setMergePolicy(newLogMergePolicy(5))); writer.commit(); for (int i = 0; i < 23; i++) TestIndexWriter.addDoc(writer); DirectoryReader reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); writer.prepareCommit(); IndexReader reader2 = DirectoryReader.open(dir); assertEquals(0, reader2.numDocs()); writer.commit(); IndexReader reader3 = DirectoryReader.openIfChanged(reader); assertNotNull(reader3); assertEquals(0, reader.numDocs()); assertEquals(0, reader2.numDocs()); assertEquals(23, reader3.numDocs()); reader.close(); reader2.close(); for (int i = 0; i < 17; i++) TestIndexWriter.addDoc(writer); assertEquals(23, reader3.numDocs()); reader3.close(); reader = DirectoryReader.open(dir); assertEquals(23, reader.numDocs()); reader.close(); writer.prepareCommit(); reader = DirectoryReader.open(dir); assertEquals(23, reader.numDocs()); reader.close(); writer.commit(); reader = DirectoryReader.open(dir); assertEquals(40, reader.numDocs()); reader.close(); writer.close(); dir.close(); }
@AfterClass public static void afterClass() throws Exception { searcher.close(); reader.close(); littleReader.close(); dir2.close(); directory.close(); bigSearcher.close(); searcher = null; reader = null; littleReader = null; dir2 = null; directory = null; bigSearcher = null; }
public void testUpdateSameDoc() throws Exception { final Directory dir = newDirectory(); final LineFileDocs docs = new LineFileDocs(random()); for (int r = 0; r < 3; r++) { final IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2)); final int numUpdates = atLeast(20); int numThreads = TestUtil.nextInt(random(), 2, 6); IndexingThread[] threads = new IndexingThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new IndexingThread(docs, w, numUpdates); threads[i].start(); } for (int i = 0; i < numThreads; i++) { threads[i].join(); } w.close(); } IndexReader open = DirectoryReader.open(dir); assertEquals(1, open.numDocs()); open.close(); docs.close(); dir.close(); }
public void testEvilSearcherFactory() throws Exception { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); final SearcherFactory theEvilOne = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader ignored) { return LuceneTestCase.newSearcher(other); } }; try { new SearcherManager(w.w, false, theEvilOne); fail("didn't hit expected exception"); } catch (IllegalStateException ise) { // expected } w.close(); other.close(); dir.close(); }
@Override public void tearDown() throws Exception { reader.close(); searcher.close(); dir.close(); super.tearDown(); }
public void testMoreThan32ProhibitedClauses() throws Exception { final Directory d = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); doc.add( new TextField( "field", "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33", Field.Store.NO)); w.addDocument(doc); doc = new Document(); doc.add(new TextField("field", "33", Field.Store.NO)); w.addDocument(doc); final IndexReader r = w.getReader(); w.close(); final IndexSearcher s = newSearcher(r); final BooleanQuery q = new BooleanQuery(); for (int term = 0; term < 33; term++) { q.add( new BooleanClause( new TermQuery(new Term("field", "" + term)), BooleanClause.Occur.MUST_NOT)); } q.add(new BooleanClause(new TermQuery(new Term("field", "33")), BooleanClause.Occur.SHOULD)); final int[] count = new int[1]; s.search( q, new Collector() { private Scorer scorer; @Override public void setScorer(Scorer scorer) { // Make sure we got BooleanScorer: this.scorer = scorer; assertEquals( "Scorer is implemented by wrong class", BooleanScorer.class.getName() + "$BucketScorer", scorer.getClass().getName()); } @Override public void collect(int doc) { count[0]++; } @Override public void setNextReader(AtomicReaderContext context) {} @Override public boolean acceptsDocsOutOfOrder() { return true; } }); assertEquals(1, count[0]); r.close(); d.close(); }
public void testFirstClauseWithoutPayload() throws Exception { Spans spans; IndexSearcher searcher = getSearcher(); SpanQuery[] clauses = new SpanQuery[3]; clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "nopayload")); clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "qq")); clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "ss")); SpanNearQuery spanNearQuery = new SpanNearQuery(clauses, 6, true); SpanQuery[] clauses2 = new SpanQuery[2]; clauses2[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "pp")); clauses2[1] = spanNearQuery; SpanNearQuery snq = new SpanNearQuery(clauses2, 6, false); SpanQuery[] clauses3 = new SpanQuery[2]; clauses3[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "np")); clauses3[1] = snq; SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses3, 6, false); spans = MultiSpansWrapper.wrap( searcher.getIndexReader(), nestedSpanNearQuery, SpanWeight.Postings.PAYLOADS); assertTrue("spans is null and it shouldn't be", spans != null); checkSpans(spans, 1, new int[] {3}); closeIndexReader.close(); directory.close(); }
public String getSpecificFreqTermInIndex( int KIntopK, ArrayList<String> sentQueries, int specificFrec, boolean allranges, boolean versionOld) { IndexReader indexReader = null; try { indexReader = IndexReader.open(indexDirectory); } catch (CorruptIndexException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } String mostFrerqTerm = ""; try { mostFrerqTerm = freqTermsFinderInIndex.SpecificFreqTerms( indexDirectory, analyzer, indexReader, KIntopK, sentQueries, specificFrec, allranges, versionOld); indexReader.close(); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } return mostFrerqTerm; }
public void testCachingWorks() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir)); AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); MockFilter filter = new MockFilter(); CachingWrapperFilter cacher = new CachingWrapperFilter(filter); // first time, nested filter is called DocIdSet strongRef = cacher.getDocIdSet(context, context.reader().getLiveDocs()); assertTrue("first time", filter.wasCalled()); // make sure no exception if cache is holding the wrong docIdSet cacher.getDocIdSet(context, context.reader().getLiveDocs()); // second time, nested filter should not be called filter.clear(); cacher.getDocIdSet(context, context.reader().getLiveDocs()); assertFalse("second time", filter.wasCalled()); reader.close(); dir.close(); }
/* * index all child directories(only first level directories) in parent directory * and indexed data is stored in the same name source directory */ private long indexDirectories(String parent, String[] dirs, String index, SetupParameters Pa) throws FileHandlerException, IOException { long sumDocs = 0; // index each directory in parent directory for (int i = 0; i < dirs.length; i++) { System.out.println("\t-----FOLDER----- :" + dirs[i].toUpperCase()); String dir_index = index + "/" + dirs[i]; if ((index.endsWith("\\")) || (index.endsWith("/"))) { dir_index = index + dirs[i]; } Directory di = FSDirectory.getDirectory(new File(dir_index), true); Pa.setDir(di); Pa.setWriter(new IndexWriter(Pa.getDir(), Pa.getAnalyzer(), true)); // //get name of directory contains website to index // int begin=dirs[i].lastIndexOf("\\"); // if(begin==-1) begin=dirs[i].lastIndexOf("/"); // int end=dirs[i].length()-1; // String dir_site=dirs[i].substring(begin, end); this.index(dirs[i].toLowerCase(), Pa.getWriter(), new File(parent + "\\" + dirs[i])); Pa.getWriter().optimize(); Pa.getWriter().close(); IndexReader reader = Pa.getReader().open(Pa.getDir()); sumDocs += reader.numDocs(); reader.close(); } return sumDocs; }
public static void main(String[] args) throws IOException, ParseException { String indexDir = "C:/lucenedir"; Directory directory = FSDirectory.open(Paths.get(indexDir)); IndexReader reader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(reader); int day = (int) (new Date().getTime() / Constans.DAY_MILLIS); QueryParser parser = new QueryParser("contents", new StandardAnalyzer()); Query query = parser.parse("java in action"); Query customScoreQuery = new RecencyBoostCustomScoreQuery(query, 2.0, day, 6 * 365, "pubmonthAsDay"); Sort sort = new Sort( new SortField[] { SortField.FIELD_SCORE, new SortField("title2", SortField.Type.STRING) }); TopDocs hits = searcher.search(customScoreQuery, null, Integer.MAX_VALUE, sort, true, false); for (int i = 0; i < hits.scoreDocs.length; i++) { // 两种方式取Document都行,其实searcher.doc内部本质还是调用reader.document // Document doc = reader.document(hits.scoreDocs[i].doc); Document doc = searcher.doc(hits.scoreDocs[i].doc); System.out.println( (1 + i) + ": " + doc.get("title") + ": pubmonth=" + doc.get("pubmonth") + " score=" + hits.scoreDocs[i].score); } reader.close(); directory.close(); }
private float checkPhraseQuery(Document doc, PhraseQuery query, int slop, int expectedNumResults) throws Exception { query.setSlop(slop); Directory ramDir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, ramDir, new MockAnalyzer(MockTokenizer.WHITESPACE, false)); writer.addDocument(doc); IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); TopDocs td = searcher.search(query, null, 10); // System.out.println("slop: "+slop+" query: "+query+" doc: "+doc+" Expecting number of hits: // "+expectedNumResults+" maxScore="+td.getMaxScore()); assertEquals( "slop: " + slop + " query: " + query + " doc: " + doc + " Wrong number of hits", expectedNumResults, td.totalHits); // QueryUtils.check(query,searcher); writer.close(); searcher.close(); reader.close(); ramDir.close(); return td.getMaxScore(); }
public void testNullDocIdSetIterator() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir)); AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); final Filter filter = new Filter() { @Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) { return new DocIdSet() { @Override public DocIdSetIterator iterator() { return null; } }; } }; CachingWrapperFilter cacher = new CachingWrapperFilter(filter); // the caching filter should return the empty set constant assertNull(cacher.getDocIdSet(context, context.reader().getLiveDocs())); reader.close(); dir.close(); }
// LUCENE-1262 public void testExceptions() throws Throwable { Path indexDir = createTempDir("testfieldswriterexceptions"); Directory fsDir = newFSDirectory(indexDir); FaultyFSDirectory dir = new FaultyFSDirectory(fsDir); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE); IndexWriter writer = new IndexWriter(dir, iwc); for (int i = 0; i < 2; i++) writer.addDocument(testDoc); writer.forceMerge(1); writer.close(); IndexReader reader = DirectoryReader.open(dir); dir.startFailing(); boolean exc = false; for (int i = 0; i < 2; i++) { try { reader.document(i); } catch (IOException ioe) { // expected exc = true; } try { reader.document(i); } catch (IOException ioe) { // expected exc = true; } } assertTrue(exc); reader.close(); dir.close(); }
/** * give the id list of sentences, from Lucene index * * @param input input word * @param catalogName catalog (domain) name which we'd like to search in * @param limit how many hits are needed (0 means all) */ public List<String> query(String input, String catalogName, int limit) { List<String> res = new ArrayList<String>(); try { catalog c = catalogs.get(catalogName); IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(c.indexPath))); IndexSearcher searcher = new IndexSearcher(reader); QueryParser parser = new QueryParser("contents", analyzer); Query query = parser.parse(QueryParser.escape(input)); int n = limit > 0 ? limit : searcher.count(query); if (n == 0) n = 1; TopDocs results = searcher.search(query, n); int endPos = limit; if (limit != 0) endPos = Math.min(results.totalHits, limit); // 1st n hits else endPos = results.totalHits; // all hits for (int i = 0; i < endPos; i++) { int id = results.scoreDocs[i].doc; Document doc = searcher.doc(id); res.add(doc.get("filename")); } reader.close(); return res; } catch (ParseException e) { log(e.getMessage()); } catch (IOException e) { log(e.getMessage()); } return res; }
public void test() throws IOException { assertTrue(dir != null); assertTrue(fieldInfos != null); IndexReader reader = DirectoryReader.open(dir); Document doc = reader.document(0); assertTrue(doc != null); assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null); Field field = (Field) doc.getField(DocHelper.TEXT_FIELD_2_KEY); assertTrue(field != null); assertTrue(field.fieldType().storeTermVectors()); assertFalse(field.fieldType().omitNorms()); assertTrue(field.fieldType().indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); field = (Field) doc.getField(DocHelper.TEXT_FIELD_3_KEY); assertTrue(field != null); assertFalse(field.fieldType().storeTermVectors()); assertTrue(field.fieldType().omitNorms()); assertTrue(field.fieldType().indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); field = (Field) doc.getField(DocHelper.NO_TF_KEY); assertTrue(field != null); assertFalse(field.fieldType().storeTermVectors()); assertFalse(field.fieldType().omitNorms()); assertTrue(field.fieldType().indexOptions() == IndexOptions.DOCS); DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); reader.document(0, visitor); final List<IndexableField> fields = visitor.getDocument().getFields(); assertEquals(1, fields.size()); assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name()); reader.close(); }
public void testSpanNot() throws Exception { SpanQuery[] clauses = new SpanQuery[2]; clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one")); clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "three")); SpanQuery spq = new SpanNearQuery(clauses, 5, true); SpanNotQuery snq = new SpanNotQuery(spq, new SpanTermQuery(new Term(PayloadHelper.FIELD, "two"))); Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter( random(), directory, newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(similarity)); Document doc = new Document(); doc.add(newTextField(PayloadHelper.FIELD, "one two three one four three", Field.Store.YES)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); checkSpans(MultiSpansWrapper.wrap(reader, snq, SpanWeight.Postings.PAYLOADS), 1, new int[] {2}); reader.close(); directory.close(); }
public void search01() { try { IndexReader reader = IndexReader.open(directory); IndexSearcher searcher = new IndexSearcher(reader); TermQuery query = new TermQuery(new Term("email", "*****@*****.**")); TopDocs tds = searcher.search(query, 10); for (ScoreDoc sd : tds.scoreDocs) { Document doc = searcher.doc(sd.doc); System.out.println( "(" + sd.doc + "-" + doc.getBoost() + "-" + sd.score + ")" + doc.get("name") + "[" + doc.get("email") + "]-->" + doc.get("id") + "," + doc.get("attach") + "," + doc.get("date") + "," + doc.getValues("email")[1]); } reader.close(); } catch (CorruptIndexException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } }
public void testFarsiRangeFilterCollating( Analyzer analyzer, String firstBeg, String firstEnd, String secondBeg, String secondEnd) throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, Field.Index.ANALYZED)); doc.add(new Field("body", "body", Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); writer.close(); IndexReader reader = IndexReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); Query query = new TermQuery(new Term("body", "body")); // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi // orders the U+0698 character before the U+0633 character, so the single // index Term below should NOT be returned by a TermRangeFilter with a Farsi // Collator (or an Arabic one for the case when Farsi searcher not // supported). ScoreDoc[] result = searcher.search(query, new TermRangeFilter("content", firstBeg, firstEnd, true, true), 1) .scoreDocs; assertEquals("The index Term should not be included.", 0, result.length); result = searcher.search(query, new TermRangeFilter("content", secondBeg, secondEnd, true, true), 1) .scoreDocs; assertEquals("The index Term should be included.", 1, result.length); searcher.close(); reader.close(); dir.close(); }
@Override protected void tearDown() throws Exception { s.close(); r.close(); index.close(); super.tearDown(); }
public void testMethod() throws Exception { Directory directory = newDirectory(); String[] values = new String[] {"1", "2", "3", "4"}; RandomIndexWriter writer = new RandomIndexWriter(random(), directory); for (int i = 0; i < values.length; i++) { Document doc = new Document(); doc.add(newStringField(FIELD, values[i], Field.Store.YES)); writer.addDocument(doc); } IndexReader ir = writer.getReader(); writer.close(); BooleanQuery booleanQuery1 = new BooleanQuery(); booleanQuery1.add(new TermQuery(new Term(FIELD, "1")), BooleanClause.Occur.SHOULD); booleanQuery1.add(new TermQuery(new Term(FIELD, "2")), BooleanClause.Occur.SHOULD); BooleanQuery query = new BooleanQuery(); query.add(booleanQuery1, BooleanClause.Occur.MUST); query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT); IndexSearcher indexSearcher = newSearcher(ir); ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs; assertEquals("Number of matched documents", 2, hits.length); ir.close(); directory.close(); }
public void doTest(int[] docs) throws Exception { Directory dir = makeIndex(); IndexReader reader = IndexReader.open(dir, true); for (int i = 0; i < docs.length; i++) { Document d = reader.document(docs[i], SELECTOR); d.get(MAGIC_FIELD); List<Fieldable> fields = d.getFields(); for (Iterator<Fieldable> fi = fields.iterator(); fi.hasNext(); ) { Fieldable f = null; try { f = fi.next(); String fname = f.name(); String fval = f.stringValue(); assertNotNull(docs[i] + " FIELD: " + fname, fval); String[] vals = fval.split("#"); if (!dataset.contains(vals[0]) || !dataset.contains(vals[1])) { fail("FIELD:" + fname + ",VAL:" + fval); } } catch (Exception e) { throw new Exception(docs[i] + " WTF: " + f.name(), e); } } } reader.close(); dir.close(); }
public static BoboIndexReader getBoboIndexReader(Directory idxDir) throws BrowseException { try { if (!BoboIndexReader.indexExists(idxDir)) { throw new BrowseException("Index does not exist at: " + idxDir); } } catch (IOException ioe) { throw new BrowseException(ioe.getMessage(), ioe); } IndexReader reader = null; try { reader = IndexReader.open(idxDir, true); } catch (IOException ioe) { throw new BrowseException(ioe.getMessage(), ioe); } BoboIndexReader bReader = null; try { bReader = BoboIndexReader.getInstance(reader); } catch (IOException ioe) { if (reader != null) { try { reader.close(); } catch (IOException e) { logger.error(e.getMessage(), e); } } throw new BrowseException(ioe.getMessage(), ioe); } return bReader; }
public void test() throws Exception { BaseDirectoryWrapper d = newDirectory(); d.setCheckIndexOnClose(false); // we nuke files, but verify the reader still works RandomIndexWriter w = new RandomIndexWriter(random(), d); int numDocs = atLeast(100); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(newField("foo", "bar", TextField.TYPE_NOT_STORED)); w.addDocument(doc); } IndexReader r = w.getReader(); w.commit(); w.close(); for (String fileName : d.listAll()) { try { d.deleteFile(fileName); // may succeed, e.g. if the file is completely read into RAM. } catch (IOException ioe) { // ignore: this means codec (correctly) is holding // the file open } } for (LeafReaderContext cxt : r.leaves()) { TestUtil.checkReader(cxt.reader()); } r.close(); d.close(); }