@Override public void doWork() throws Throwable { IndexWriter writer1 = new IndexWriter( dir1, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(3) .setMergeScheduler(new ConcurrentMergeScheduler()) .setMergePolicy(newLogMergePolicy(2))); ((ConcurrentMergeScheduler) writer1.getConfig().getMergeScheduler()).setSuppressExceptions(); // Intentionally use different params so flush/merge // happen @ different times IndexWriter writer2 = new IndexWriter( dir2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2) .setMergeScheduler(new ConcurrentMergeScheduler()) .setMergePolicy(newLogMergePolicy(3))); ((ConcurrentMergeScheduler) writer2.getConfig().getMergeScheduler()).setSuppressExceptions(); update(writer1); update(writer2); TestTransactions.doFail = true; try { synchronized (lock) { try { writer1.prepareCommit(); } catch (Throwable t) { writer1.rollback(); writer2.rollback(); return; } try { writer2.prepareCommit(); } catch (Throwable t) { writer1.rollback(); writer2.rollback(); return; } writer1.commit(); writer2.commit(); } } finally { TestTransactions.doFail = false; } writer1.close(); writer2.close(); }
// test rollback of deleteAll() public void testDeleteAllRollback() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { addDoc(modifier, ++id, value); } modifier.commit(); addDoc(modifier, ++id, value); IndexReader reader = IndexReader.open(dir, true); assertEquals(7, reader.numDocs()); reader.close(); // Delete all modifier.deleteAll(); // Roll it back modifier.rollback(); modifier.close(); // Validate that the docs are still there reader = IndexReader.open(dir, true); assertEquals(7, reader.numDocs()); reader.close(); dir.close(); }
// LUCENE-1274: test writer.prepareCommit() public void testPrepareCommitRollback() throws IOException { MockDirectoryWrapper dir = newDirectory(); dir.setPreventDoubleWrite(false); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2) .setMergePolicy(newLogMergePolicy(5))); writer.commit(); for (int i = 0; i < 23; i++) TestIndexWriter.addDoc(writer); DirectoryReader reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); writer.prepareCommit(); IndexReader reader2 = DirectoryReader.open(dir); assertEquals(0, reader2.numDocs()); writer.rollback(); IndexReader reader3 = DirectoryReader.openIfChanged(reader); assertNull(reader3); assertEquals(0, reader.numDocs()); assertEquals(0, reader2.numDocs()); reader.close(); reader2.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i = 0; i < 17; i++) TestIndexWriter.addDoc(writer); reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); reader.close(); writer.prepareCommit(); reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); reader.close(); writer.commit(); reader = DirectoryReader.open(dir); assertEquals(17, reader.numDocs()); reader.close(); writer.close(); dir.close(); }
/* Uses KeepAllDeletionPolicy to keep all commits around, * then, opens a new IndexWriter on a previous commit * point. */ public void testOpenPriorSnapshot() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())) .setIndexDeletionPolicy(new KeepAllDeletionPolicy(dir)) .setMaxBufferedDocs(2) .setMergePolicy(newLogMergePolicy(10))); KeepAllDeletionPolicy policy = (KeepAllDeletionPolicy) writer.getConfig().getIndexDeletionPolicy(); for (int i = 0; i < 10; i++) { addDoc(writer); if ((1 + i) % 2 == 0) writer.commit(); } writer.close(); Collection<IndexCommit> commits = DirectoryReader.listCommits(dir); assertEquals(5, commits.size()); IndexCommit lastCommit = null; for (final IndexCommit commit : commits) { if (lastCommit == null || commit.getGeneration() > lastCommit.getGeneration()) lastCommit = commit; } assertTrue(lastCommit != null); // Now add 1 doc and merge writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())).setIndexDeletionPolicy(policy)); addDoc(writer); assertEquals(11, writer.numDocs()); writer.forceMerge(1); writer.close(); assertEquals(6, DirectoryReader.listCommits(dir).size()); // Now open writer on the commit just before merge: writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())) .setIndexDeletionPolicy(policy) .setIndexCommit(lastCommit)); assertEquals(10, writer.numDocs()); // Should undo our rollback: writer.rollback(); DirectoryReader r = DirectoryReader.open(dir); // Still merged, still 11 docs assertEquals(1, r.leaves().size()); assertEquals(11, r.numDocs()); r.close(); writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())) .setIndexDeletionPolicy(policy) .setIndexCommit(lastCommit)); assertEquals(10, writer.numDocs()); // Commits the rollback: writer.close(); // Now 8 because we made another commit assertEquals(7, DirectoryReader.listCommits(dir).size()); r = DirectoryReader.open(dir); // Not fully merged because we rolled it back, and now only // 10 docs assertTrue(r.leaves().size() > 1); assertEquals(10, r.numDocs()); r.close(); // Re-merge writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())).setIndexDeletionPolicy(policy)); writer.forceMerge(1); writer.close(); r = DirectoryReader.open(dir); assertEquals(1, r.leaves().size()); assertEquals(10, r.numDocs()); r.close(); // Now open writer on the commit just before merging, // but this time keeping only the last commit: writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())).setIndexCommit(lastCommit)); assertEquals(10, writer.numDocs()); // Reader still sees fully merged index, because writer // opened on the prior commit has not yet committed: r = DirectoryReader.open(dir); assertEquals(1, r.leaves().size()); assertEquals(10, r.numDocs()); r.close(); writer.close(); // Now reader sees not-fully-merged index: r = DirectoryReader.open(dir); assertTrue(r.leaves().size() > 1); assertEquals(10, r.numDocs()); r.close(); dir.close(); }
/* * Make sure IndexWriter cleans up on hitting a disk * full exception in addDocument. * TODO: how to do this on windows with FSDirectory? */ public void testAddDocumentOnDiskFull() throws IOException { for (int pass = 0; pass < 2; pass++) { if (VERBOSE) { System.out.println("TEST: pass="******"TEST: cycle: diskFree=" + diskFree); } MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory()); dir.setMaxSizeInBytes(diskFree); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); writer.setInfoStream(VERBOSE ? System.out : null); MergeScheduler ms = writer.getConfig().getMergeScheduler(); if (ms instanceof ConcurrentMergeScheduler) { // This test intentionally produces exceptions // in the threads that CMS launches; we don't // want to pollute test output with these. ((ConcurrentMergeScheduler) ms).setSuppressExceptions(); } boolean hitError = false; try { for (int i = 0; i < 200; i++) { addDoc(writer); } if (VERBOSE) { System.out.println("TEST: done adding docs; now commit"); } writer.commit(); } catch (IOException e) { if (VERBOSE) { System.out.println("TEST: exception on addDoc"); e.printStackTrace(System.out); } hitError = true; } if (hitError) { if (doAbort) { if (VERBOSE) { System.out.println("TEST: now rollback"); } writer.rollback(); } else { try { if (VERBOSE) { System.out.println("TEST: now close"); } writer.close(); } catch (IOException e) { if (VERBOSE) { System.out.println("TEST: exception on close; retry w/ no disk space limit"); e.printStackTrace(System.out); } dir.setMaxSizeInBytes(0); writer.close(); } } // _TestUtil.syncConcurrentMerges(ms); if (_TestUtil.anyFilesExceptWriteLock(dir)) { assertNoUnreferencedFiles(dir, "after disk full during addDocument"); // Make sure reader can open the index: IndexReader.open(dir, true).close(); } dir.close(); // Now try again w/ more space: diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 400, 600) : _TestUtil.nextInt(random, 3000, 5000); } else { // _TestUtil.syncConcurrentMerges(writer); dir.setMaxSizeInBytes(0); writer.close(); dir.close(); break; } } } }
/* * Simple test for "commit on close": open writer, then * add a bunch of docs, making sure reader does not see * them until writer has closed. Then instead of * closing the writer, call abort and verify reader sees * nothing was added. Then verify we can open the index * and add docs to it. */ public void testCommitOnCloseAbort() throws IOException { MockDirectoryWrapper dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(10)); for (int i = 0; i < 14; i++) { TestIndexWriter.addDoc(writer); } writer.close(); Term searchTerm = new Term("content", "aaa"); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("first number of hits", 14, hits.length); reader.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND) .setMaxBufferedDocs(10)); for (int j = 0; j < 17; j++) { TestIndexWriter.addDoc(writer); } // Delete all docs: writer.deleteDocuments(searchTerm); reader = DirectoryReader.open(dir); searcher = new IndexSearcher(reader); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("reader incorrectly sees changes from writer", 14, hits.length); reader.close(); // Now, close the writer: writer.rollback(); TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()"); reader = DirectoryReader.open(dir); searcher = new IndexSearcher(reader); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("saw changes after writer.abort", 14, hits.length); reader.close(); // Now make sure we can re-open the index, add docs, // and all is good: writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND) .setMaxBufferedDocs(10)); // On abort, writer in fact may write to the same // segments_N file: dir.setPreventDoubleWrite(false); for (int i = 0; i < 12; i++) { for (int j = 0; j < 17; j++) { TestIndexWriter.addDoc(writer); } IndexReader r = DirectoryReader.open(dir); searcher = new IndexSearcher(r); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("reader incorrectly sees changes from writer", 14, hits.length); r.close(); } writer.close(); IndexReader r = DirectoryReader.open(dir); searcher = new IndexSearcher(r); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("didn't see changes after close", 218, hits.length); r.close(); dir.close(); }
/* * Verify that calling forceMerge when writer is open for * "commit on close" works correctly both for rollback() * and close(). */ public void testCommitOnCloseForceMerge() throws IOException { MockDirectoryWrapper dir = newDirectory(); // Must disable throwing exc on double-write: this // test uses IW.rollback which easily results in // writing to same file more than once dir.setPreventDoubleWrite(false); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(10) .setMergePolicy(newLogMergePolicy(10))); for (int j = 0; j < 17; j++) { TestIndexWriter.addDocWithIndex(writer, j); } writer.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); // Open a reader before closing (commiting) the writer: DirectoryReader reader = DirectoryReader.open(dir); // Reader should see index as multi-seg at this // point: assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1); reader.close(); // Abort the writer: writer.rollback(); TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.open(dir); // Reader should still see index as multi-segment assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1); reader.close(); if (VERBOSE) { System.out.println("TEST: do real full merge"); } writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); writer.close(); if (VERBOSE) { System.out.println("TEST: writer closed"); } TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.open(dir); // Reader should see index as one segment assertEquals( "Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().size()); reader.close(); dir.close(); }