// LUCENE-1274: test writer.prepareCommit() public void testPrepareCommitRollback() throws IOException { MockDirectoryWrapper dir = newDirectory(); dir.setPreventDoubleWrite(false); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2) .setMergePolicy(newLogMergePolicy(5))); writer.commit(); for (int i = 0; i < 23; i++) TestIndexWriter.addDoc(writer); DirectoryReader reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); writer.prepareCommit(); IndexReader reader2 = DirectoryReader.open(dir); assertEquals(0, reader2.numDocs()); writer.rollback(); IndexReader reader3 = DirectoryReader.openIfChanged(reader); assertNull(reader3); assertEquals(0, reader.numDocs()); assertEquals(0, reader2.numDocs()); reader.close(); reader2.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i = 0; i < 17; i++) TestIndexWriter.addDoc(writer); reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); reader.close(); writer.prepareCommit(); reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); reader.close(); writer.commit(); reader = DirectoryReader.open(dir); assertEquals(17, reader.numDocs()); reader.close(); writer.close(); dir.close(); }
public void testTransactions() throws Throwable { // we cant use non-ramdir on windows, because this test needs to double-write. MockDirectoryWrapper dir1 = new MockDirectoryWrapper(random(), new RAMDirectory()); MockDirectoryWrapper dir2 = new MockDirectoryWrapper(random(), new RAMDirectory()); dir1.setPreventDoubleWrite(false); dir2.setPreventDoubleWrite(false); dir1.failOn(new RandomFailure()); dir2.failOn(new RandomFailure()); dir1.setFailOnOpenInput(false); dir2.setFailOnOpenInput(false); // We throw exceptions in deleteFile, which creates // leftover files: dir1.setAssertNoUnrefencedFilesOnClose(false); dir2.setAssertNoUnrefencedFilesOnClose(false); initIndex(dir1); initIndex(dir2); TimedThread[] threads = new TimedThread[3]; int numThread = 0; IndexerThread indexerThread = new IndexerThread(this, dir1, dir2, threads); threads[numThread++] = indexerThread; indexerThread.start(); SearcherThread searcherThread1 = new SearcherThread(this, dir1, dir2, threads); threads[numThread++] = searcherThread1; searcherThread1.start(); SearcherThread searcherThread2 = new SearcherThread(this, dir1, dir2, threads); threads[numThread++] = searcherThread2; searcherThread2.start(); for (int i = 0; i < numThread; i++) threads[i].join(); for (int i = 0; i < numThread; i++) assertTrue(!threads[i].failed); dir1.close(); dir2.close(); }
public void testWriterAfterCrash() throws IOException { // This test relies on being able to open a reader before any commit // happened, so we must create an initial commit just to allow that, but // before any documents were added. IndexWriter writer = initIndex(random, true); MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); dir.setPreventDoubleWrite(false); crash(writer); writer = initIndex(random, dir, false); writer.close(); IndexReader reader = IndexReader.open(dir, false); assertTrue(reader.numDocs() < 314); reader.close(); dir.close(); }
@Slow public void testNoWaitClose() throws Throwable { Directory directory = newDirectory(); if (directory instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper) directory).setPreventDoubleWrite(false); } final Document doc = new Document(); FieldType customType = new FieldType(TextField.TYPE_NOT_STORED); customType.setTokenized(false); Field idField = newField("id", "", customType); doc.add(idField); for (int pass = 0; pass < 2; pass++) { if (VERBOSE) { System.out.println("TEST: pass="******"TEST: iter=" + iter); } for (int j = 0; j < 199; j++) { idField.setStringValue(Integer.toString(iter * 201 + j)); writer.addDocument(doc); } int delID = iter * 199; for (int j = 0; j < 20; j++) { writer.deleteDocuments(new Term("id", Integer.toString(delID))); delID += 5; } writer.commit(); // Force a bunch of merge threads to kick off so we // stress out aborting them on close: ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2); final IndexWriter finalWriter = writer; final AtomicReference<Throwable> failure = new AtomicReference<>(); Thread t1 = new Thread() { @Override public void run() { boolean done = false; while (!done) { for (int i = 0; i < 100; i++) { try { finalWriter.addDocument(doc); } catch (AlreadyClosedException e) { done = true; break; } catch (NullPointerException e) { done = true; break; } catch (Throwable e) { e.printStackTrace(System.out); failure.set(e); done = true; break; } } Thread.yield(); } } }; t1.start(); writer.close(); t1.join(); if (failure.get() != null) { throw failure.get(); } // Make sure reader can read IndexReader reader = DirectoryReader.open(directory); reader.close(); // Reopen writer = new IndexWriter( directory, newIndexWriterConfig(new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND) .setMergePolicy(newLogMergePolicy()) .setCommitOnClose(false)); } writer.close(); } directory.close(); }
/* * Simple test for "commit on close": open writer, then * add a bunch of docs, making sure reader does not see * them until writer has closed. Then instead of * closing the writer, call abort and verify reader sees * nothing was added. Then verify we can open the index * and add docs to it. */ public void testCommitOnCloseAbort() throws IOException { MockDirectoryWrapper dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(10)); for (int i = 0; i < 14; i++) { TestIndexWriter.addDoc(writer); } writer.close(); Term searchTerm = new Term("content", "aaa"); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("first number of hits", 14, hits.length); reader.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND) .setMaxBufferedDocs(10)); for (int j = 0; j < 17; j++) { TestIndexWriter.addDoc(writer); } // Delete all docs: writer.deleteDocuments(searchTerm); reader = DirectoryReader.open(dir); searcher = new IndexSearcher(reader); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("reader incorrectly sees changes from writer", 14, hits.length); reader.close(); // Now, close the writer: writer.rollback(); TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()"); reader = DirectoryReader.open(dir); searcher = new IndexSearcher(reader); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("saw changes after writer.abort", 14, hits.length); reader.close(); // Now make sure we can re-open the index, add docs, // and all is good: writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND) .setMaxBufferedDocs(10)); // On abort, writer in fact may write to the same // segments_N file: dir.setPreventDoubleWrite(false); for (int i = 0; i < 12; i++) { for (int j = 0; j < 17; j++) { TestIndexWriter.addDoc(writer); } IndexReader r = DirectoryReader.open(dir); searcher = new IndexSearcher(r); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("reader incorrectly sees changes from writer", 14, hits.length); r.close(); } writer.close(); IndexReader r = DirectoryReader.open(dir); searcher = new IndexSearcher(r); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("didn't see changes after close", 218, hits.length); r.close(); dir.close(); }
/* * Verify that calling forceMerge when writer is open for * "commit on close" works correctly both for rollback() * and close(). */ public void testCommitOnCloseForceMerge() throws IOException { MockDirectoryWrapper dir = newDirectory(); // Must disable throwing exc on double-write: this // test uses IW.rollback which easily results in // writing to same file more than once dir.setPreventDoubleWrite(false); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(10) .setMergePolicy(newLogMergePolicy(10))); for (int j = 0; j < 17; j++) { TestIndexWriter.addDocWithIndex(writer, j); } writer.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); // Open a reader before closing (commiting) the writer: DirectoryReader reader = DirectoryReader.open(dir); // Reader should see index as multi-seg at this // point: assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1); reader.close(); // Abort the writer: writer.rollback(); TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.open(dir); // Reader should still see index as multi-segment assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1); reader.close(); if (VERBOSE) { System.out.println("TEST: do real full merge"); } writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); writer.close(); if (VERBOSE) { System.out.println("TEST: writer closed"); } TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.open(dir); // Reader should see index as one segment assertEquals( "Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().size()); reader.close(); dir.close(); }