public void close() throws IOException { if (doClose) { try { secondaryDir.close(); } finally { primaryDir.close(); } doClose = false; } }
// setup the index @Override public void setUp() throws Exception { super.setUp(); indexDir = _TestUtil.getTempDir("RAMDirIndex"); Directory dir = newFSDirectory(indexDir); IndexWriter writer = new IndexWriter( dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setOpenMode(OpenMode.CREATE)); // add some documents Document doc = null; for (int i = 0; i < docsToAdd; i++) { doc = new Document(); doc.add( newField( "content", English.intToEnglish(i).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } assertEquals(docsToAdd, writer.maxDoc()); writer.close(); dir.close(); }
public void testUpdateSameDoc() throws Exception { final Directory dir = newDirectory(); final LineFileDocs docs = new LineFileDocs(random()); for (int r = 0; r < 3; r++) { final IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2)); final int numUpdates = atLeast(20); int numThreads = TestUtil.nextInt(random(), 2, 6); IndexingThread[] threads = new IndexingThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new IndexingThread(docs, w, numUpdates); threads[i].start(); } for (int i = 0; i < numThreads; i++) { threads[i].join(); } w.close(); } IndexReader open = DirectoryReader.open(dir); assertEquals(1, open.numDocs()); open.close(); docs.close(); dir.close(); }
public void testRAMDirectory() throws IOException { Directory dir = newFSDirectory(indexDir); MockDirectoryWrapper ramDir = new MockDirectoryWrapper(random, new RAMDirectory(dir)); // close the underlaying directory dir.close(); // Check size assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes()); // open reader to test document count IndexReader reader = IndexReader.open(ramDir, true); assertEquals(docsToAdd, reader.numDocs()); // open search zo check if all doc's are there IndexSearcher searcher = newSearcher(reader); // search for all documents for (int i = 0; i < docsToAdd; i++) { Document doc = searcher.doc(i); assertTrue(doc.getField("content") != null); } // cleanup reader.close(); searcher.close(); }
// Verify: do stress test, by opening IndexReaders and // IndexWriters over & over in 2 threads and making sure // no unexpected exceptions are raised: public void testStressLocks() throws Exception { Path tempPath = createTempDir(); assumeFalse("cannot handle buggy Files.delete", TestUtil.hasWindowsFS(tempPath)); Directory dir = getDirectory(tempPath); // First create a 1 doc index: IndexWriter w = new IndexWriter( dir, new IndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)); addDoc(w); w.close(); WriterThread writer = new WriterThread(100, dir); SearcherThread searcher = new SearcherThread(100, dir); writer.start(); searcher.start(); while (writer.isAlive() || searcher.isAlive()) { Thread.sleep(1000); } assertTrue("IndexWriter hit unexpected exceptions", !writer.hitException); assertTrue("IndexSearcher hit unexpected exceptions", !searcher.hitException); dir.close(); }
/** Test ensureValid returns true after acquire */ public void testValidAfterAcquire() throws IOException { Path tempPath = createTempDir(); Directory dir = getDirectory(tempPath); Lock l = dir.obtainLock("commit"); l.ensureValid(); // no exception l.close(); dir.close(); }
@Override public void close() throws IOException { for (Directory delegate : distributor.all()) { delegate.close(); } synchronized (mutex) { filesMetadata = ImmutableMap.of(); files = Strings.EMPTY_ARRAY; } }
/** Test closing locks twice */ public void testDoubleClose() throws IOException { Path tempPath = createTempDir(); Directory dir = getDirectory(tempPath); Lock l = dir.obtainLock("commit"); l.close(); l.close(); // close again, should be no exception dir.close(); }
private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, int MAX_DOCS) throws Exception { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random); IndexWriterConfig conf = newIndexWriterConfig(analyzer); final MergePolicy mp = conf.getMergePolicy(); mp.setNoCFSRatio(useCompoundFiles ? 1.0 : 0.0); IndexWriter writer = new IndexWriter(directory, conf); if (VERBOSE) { System.out.println("TEST: now build index MAX_DOCS=" + MAX_DOCS); } for (int j = 0; j < MAX_DOCS; j++) { Document d = new Document(); d.add(newTextField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES)); d.add(newTextField(ID_FIELD, Integer.toString(j), Field.Store.YES)); writer.addDocument(d); } writer.close(); // try a search without OR IndexReader reader = DirectoryReader.open(directory); IndexSearcher searcher = newSearcher(reader); Query query = new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY)); out.println("Query: " + query.toString(PRIORITY_FIELD)); if (VERBOSE) { System.out.println("TEST: search query=" + query); } final Sort sort = new Sort(SortField.FIELD_SCORE, new SortField(ID_FIELD, SortField.Type.INT)); ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs; printHits(out, hits, searcher); checkHits(hits, MAX_DOCS, searcher); // try a new search with OR searcher = newSearcher(reader); hits = null; BooleanQuery booleanQuery = new BooleanQuery(); booleanQuery.add( new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY)), BooleanClause.Occur.SHOULD); booleanQuery.add( new TermQuery(new Term(PRIORITY_FIELD, MED_PRIORITY)), BooleanClause.Occur.SHOULD); out.println("Query: " + booleanQuery.toString(PRIORITY_FIELD)); hits = searcher.search(booleanQuery, null, MAX_DOCS, sort).scoreDocs; printHits(out, hits, searcher); checkHits(hits, MAX_DOCS, searcher); reader.close(); directory.close(); }
synchronized void closeInternal() throws IOException { if (isOpen) { isOpen = false; for (Directory delegate : distributor.all()) { delegate.close(); } synchronized (mutex) { filesMetadata = ImmutableOpenMap.of(); files = Strings.EMPTY_ARRAY; } } }
public void testCreateTempOutputSameName() throws Exception { Directory fsDir = FSDirectory.open(createTempDir("verify")); NRTCachingDirectory nrtDir = new NRTCachingDirectory(fsDir, 2.0, 25.0); String name = "foo_bar_0.tmp"; nrtDir.createOutput(name, IOContext.DEFAULT).close(); IndexOutput out = nrtDir.createTempOutput("foo", "bar", IOContext.DEFAULT); assertFalse(name.equals(out.getName())); out.close(); nrtDir.close(); fsDir.close(); }
/** Test ensureValid throws exception after close */ public void testInvalidAfterClose() throws IOException { Path tempPath = createTempDir(); Directory dir = getDirectory(tempPath); Lock l = dir.obtainLock("commit"); l.close(); expectThrows( AlreadyClosedException.class, () -> { l.ensureValid(); }); dir.close(); }
/** * shut down everything * * @param force set to true, if everything shall go fast. For graceful end, set to false */ public void close(boolean force) { Logger.logGeneral(Logger.INFO, "TorJava ist closing down"); // shutdown mgmt mgmt.close(); // shut down connections fnh.close(force); // shutdown/save directory dir.close(TorConfig.getCacheFilename()); // write config file config.close(); // close hidden services // TODO close hidden services, once they are implemented and work // kill logger Logger.logGeneral(Logger.INFO, "Tor.close(): CLOSED"); log.close(); }
public void testRAMDirectorySize() throws IOException, InterruptedException { Directory dir = newFSDirectory(indexDir); final MockDirectoryWrapper ramDir = new MockDirectoryWrapper(random, new RAMDirectory(dir)); dir.close(); final IndexWriter writer = new IndexWriter( ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setOpenMode(OpenMode.APPEND)); writer.optimize(); assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes()); Thread[] threads = new Thread[numThreads]; for (int i = 0; i < numThreads; i++) { final int num = i; threads[i] = new Thread() { @Override public void run() { for (int j = 1; j < docsPerThread; j++) { Document doc = new Document(); doc.add( newField( "sizeContent", English.intToEnglish(num * docsPerThread + j).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED)); try { writer.addDocument(doc); } catch (IOException e) { throw new RuntimeException(e); } } } }; } for (int i = 0; i < numThreads; i++) threads[i].start(); for (int i = 0; i < numThreads; i++) threads[i].join(); writer.optimize(); assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes()); writer.close(); }
public void testObtainConcurrently() throws InterruptedException, IOException { Path tempPath = createTempDir(); final Directory directory = getDirectory(tempPath); final AtomicBoolean running = new AtomicBoolean(true); final AtomicInteger atomicCounter = new AtomicInteger(0); final ReentrantLock assertingLock = new ReentrantLock(); int numThreads = 2 + random().nextInt(10); final int runs = atLeast(10000); CyclicBarrier barrier = new CyclicBarrier(numThreads); Thread[] threads = new Thread[numThreads]; for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { @Override public void run() { try { barrier.await(); } catch (Exception e) { throw new RuntimeException(e); } while (running.get()) { try (Lock lock = directory.obtainLock("foo.lock")) { assertFalse(assertingLock.isLocked()); if (assertingLock.tryLock()) { assertingLock.unlock(); } else { fail(); } assert lock != null; // stupid compiler } catch (IOException ex) { // } if (atomicCounter.incrementAndGet() > runs) { running.set(false); } } } }; threads[i].start(); } for (int i = 0; i < threads.length; i++) { threads[i].join(); } directory.close(); }
public void testTermDocsEnum() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document d = new Document(); d.add(newStringField("f", "j", Field.Store.NO)); w.addDocument(d); w.commit(); w.addDocument(d); IndexReader r = w.getReader(); w.close(); DocsEnum de = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j")); assertEquals(0, de.nextDoc()); assertEquals(1, de.nextDoc()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc()); r.close(); dir.close(); }
public void testSeparateEnums() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document d = new Document(); d.add(newStringField("f", "j", Field.Store.NO)); w.addDocument(d); w.commit(); w.addDocument(d); IndexReader r = w.getReader(); w.close(); DocsEnum d1 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, 0); DocsEnum d2 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, 0); assertEquals(0, d1.nextDoc()); assertEquals(0, d2.nextDoc()); r.close(); dir.close(); }
public static Map<String, String> readChecksums(File[] locations) throws IOException { Directory[] dirs = new Directory[locations.length]; try { for (int i = 0; i < locations.length; i++) { dirs[i] = new SimpleFSDirectory(locations[i]); } return readChecksums(dirs, null); } finally { for (Directory dir : dirs) { if (dir != null) { try { dir.close(); } catch (IOException e) { // ignore } } } } }
/** Test obtaining and releasing locks, checking validity */ public void testBasics() throws IOException { Path tempPath = createTempDir(); Directory dir = getDirectory(tempPath); Lock l = dir.obtainLock("commit"); // shouldn't be able to get the lock twice expectThrows( LockObtainFailedException.class, () -> { dir.obtainLock("commit"); }); l.close(); // Make sure we can obtain first one again: l = dir.obtainLock("commit"); l.close(); dir.close(); }
public void testRandom() throws Exception { int num = atLeast(2); for (int iter = 0; iter < num; iter++) { if (VERBOSE) { System.out.println("TEST: iter=" + iter); } Directory dir = newDirectory(); IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES)); _TestUtil.keepFullyDeletedSegments(w); Map<BytesRef, List<Integer>> docs = new HashMap<BytesRef, List<Integer>>(); Set<Integer> deleted = new HashSet<Integer>(); List<BytesRef> terms = new ArrayList<BytesRef>(); int numDocs = _TestUtil.nextInt(random(), 1, 100 * RANDOM_MULTIPLIER); Document doc = new Document(); Field f = newStringField("field", "", Field.Store.NO); doc.add(f); Field id = newStringField("id", "", Field.Store.NO); doc.add(id); boolean onlyUniqueTerms = random().nextBoolean(); if (VERBOSE) { System.out.println("TEST: onlyUniqueTerms=" + onlyUniqueTerms + " numDocs=" + numDocs); } Set<BytesRef> uniqueTerms = new HashSet<BytesRef>(); for (int i = 0; i < numDocs; i++) { if (!onlyUniqueTerms && random().nextBoolean() && terms.size() > 0) { // re-use existing term BytesRef term = terms.get(random().nextInt(terms.size())); docs.get(term).add(i); f.setStringValue(term.utf8ToString()); } else { String s = _TestUtil.randomUnicodeString(random(), 10); BytesRef term = new BytesRef(s); if (!docs.containsKey(term)) { docs.put(term, new ArrayList<Integer>()); } docs.get(term).add(i); terms.add(term); uniqueTerms.add(term); f.setStringValue(s); } id.setStringValue("" + i); w.addDocument(doc); if (random().nextInt(4) == 1) { w.commit(); } if (i > 0 && random().nextInt(20) == 1) { int delID = random().nextInt(i); deleted.add(delID); w.deleteDocuments(new Term("id", "" + delID)); if (VERBOSE) { System.out.println("TEST: delete " + delID); } } } if (VERBOSE) { List<BytesRef> termsList = new ArrayList<BytesRef>(uniqueTerms); Collections.sort(termsList, BytesRef.getUTF8SortedAsUTF16Comparator()); System.out.println("TEST: terms in UTF16 order:"); for (BytesRef b : termsList) { System.out.println(" " + UnicodeUtil.toHexString(b.utf8ToString()) + " " + b); for (int docID : docs.get(b)) { if (deleted.contains(docID)) { System.out.println(" " + docID + " (deleted)"); } else { System.out.println(" " + docID); } } } } IndexReader reader = w.getReader(); w.close(); if (VERBOSE) { System.out.println("TEST: reader=" + reader); } Bits liveDocs = MultiFields.getLiveDocs(reader); for (int delDoc : deleted) { assertFalse(liveDocs.get(delDoc)); } for (int i = 0; i < 100; i++) { BytesRef term = terms.get(random().nextInt(terms.size())); if (VERBOSE) { System.out.println( "TEST: seek term=" + UnicodeUtil.toHexString(term.utf8ToString()) + " " + term); } DocsEnum docsEnum = _TestUtil.docs(random(), reader, "field", term, liveDocs, null, 0); assertNotNull(docsEnum); for (int docID : docs.get(term)) { if (!deleted.contains(docID)) { assertEquals(docID, docsEnum.nextDoc()); } } assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc()); } reader.close(); dir.close(); } }
@Override public void close() throws IOException { in.close(); }
public void testCheckIntegrity() throws IOException { Directory dir = newDirectory(); long luceneFileLength = 0; try (IndexOutput output = dir.createOutput("lucene_checksum.bin", IOContext.DEFAULT)) { int iters = scaledRandomIntBetween(10, 100); for (int i = 0; i < iters; i++) { BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024)); output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); luceneFileLength += bytesRef.length; } CodecUtil.writeFooter(output); luceneFileLength += CodecUtil.footerLength(); } final Adler32 adler32 = new Adler32(); long legacyFileLength = 0; try (IndexOutput output = dir.createOutput("legacy.bin", IOContext.DEFAULT)) { int iters = scaledRandomIntBetween(10, 100); for (int i = 0; i < iters; i++) { BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024)); output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); adler32.update(bytesRef.bytes, bytesRef.offset, bytesRef.length); legacyFileLength += bytesRef.length; } } final long luceneChecksum; final long adler32LegacyChecksum = adler32.getValue(); try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) { assertEquals(luceneFileLength, indexInput.length()); luceneChecksum = CodecUtil.retrieveChecksum(indexInput); } { // positive check StoreFileMetaData lucene = new StoreFileMetaData( "lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0); StoreFileMetaData legacy = new StoreFileMetaData( "legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum)); assertTrue(legacy.hasLegacyChecksum()); assertFalse(lucene.hasLegacyChecksum()); assertTrue(Store.checkIntegrityNoException(lucene, dir)); assertTrue(Store.checkIntegrityNoException(legacy, dir)); } { // negative check - wrong checksum StoreFileMetaData lucene = new StoreFileMetaData( "lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum + 1), Version.LUCENE_4_8_0); StoreFileMetaData legacy = new StoreFileMetaData( "legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum + 1)); assertTrue(legacy.hasLegacyChecksum()); assertFalse(lucene.hasLegacyChecksum()); assertFalse(Store.checkIntegrityNoException(lucene, dir)); assertFalse(Store.checkIntegrityNoException(legacy, dir)); } { // negative check - wrong length StoreFileMetaData lucene = new StoreFileMetaData( "lucene_checksum.bin", luceneFileLength + 1, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0); StoreFileMetaData legacy = new StoreFileMetaData( "legacy.bin", legacyFileLength + 1, Store.digestToString(adler32LegacyChecksum)); assertTrue(legacy.hasLegacyChecksum()); assertFalse(lucene.hasLegacyChecksum()); assertFalse(Store.checkIntegrityNoException(lucene, dir)); assertFalse(Store.checkIntegrityNoException(legacy, dir)); } { // negative check - wrong file StoreFileMetaData lucene = new StoreFileMetaData( "legacy.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0); StoreFileMetaData legacy = new StoreFileMetaData( "lucene_checksum.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum)); assertTrue(legacy.hasLegacyChecksum()); assertFalse(lucene.hasLegacyChecksum()); assertFalse(Store.checkIntegrityNoException(lucene, dir)); assertFalse(Store.checkIntegrityNoException(legacy, dir)); } dir.close(); }