// Used by near real-time search DirectoryReader( IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor, boolean applyAllDeletes) throws IOException { this.directory = writer.getDirectory(); this.readOnly = true; this.applyAllDeletes = applyAllDeletes; // saved for reopen this.termInfosIndexDivisor = termInfosIndexDivisor; readerFinishedListeners = writer.getReaderFinishedListeners(); // IndexWriter synchronizes externally before calling // us, which ensures infos will not change; so there's // no need to process segments in reverse order final int numSegments = infos.size(); List<SegmentReader> readers = new ArrayList<SegmentReader>(); final Directory dir = writer.getDirectory(); segmentInfos = (SegmentInfos) infos.clone(); int infosUpto = 0; for (int i = 0; i < numSegments; i++) { boolean success = false; try { final SegmentInfo info = infos.info(i); assert info.dir == dir; final SegmentReader reader = writer.readerPool.getReadOnlyClone(info, true, termInfosIndexDivisor); if (reader.numDocs() > 0 || writer.getKeepFullyDeletedSegments()) { reader.readerFinishedListeners = readerFinishedListeners; readers.add(reader); infosUpto++; } else { reader.close(); segmentInfos.remove(infosUpto); } success = true; } finally { if (!success) { // Close all readers we had opened: for (SegmentReader reader : readers) { try { reader.close(); } catch (Throwable ignore) { // keep going - we want to clean up as much as possible } } } } } this.writer = writer; initialize(readers.toArray(new SegmentReader[readers.size()])); }
/** * Returns true if this single info is already fully merged (has no pending deletes, is in the * same dir as the writer, and matches the current compound file setting */ protected final boolean isMerged(SegmentInfos infos, SegmentCommitInfo info) throws IOException { IndexWriter w = writer.get(); assert w != null; boolean hasDeletions = w.numDeletedDocs(info) > 0; return !hasDeletions && !info.info.hasSeparateNorms() && info.info.dir == w.getDirectory() && useCompoundFile(infos, info) == info.info.getUseCompoundFile(); }
private void crash(final IndexWriter writer) throws IOException { final MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler(); cms.sync(); dir.crash(); cms.sync(); dir.clearCrash(); }
public void testCrashWhileIndexing() throws IOException { // This test relies on being able to open a reader before any commit // happened, so we must create an initial commit just to allow that, but // before any documents were added. IndexWriter writer = initIndex(random, true); Directory dir = writer.getDirectory(); crash(writer); IndexReader reader = IndexReader.open(dir, false); assertTrue(reader.numDocs() < 157); reader.close(); dir.close(); }
private synchronized void initDynamicDefaults(IndexWriter writer) throws IOException { if (maxThreadCount == AUTO_DETECT_MERGES_AND_THREADS) { boolean spins = IOUtils.spins(writer.getDirectory()); setDefaultMaxMergesAndThreads(spins); if (verbose()) { message( "initDynamicDefaults spins=" + spins + " maxThreadCount=" + maxThreadCount + " maxMergeCount=" + maxMergeCount); } } }
public void testWriterAfterCrash() throws IOException { // This test relies on being able to open a reader before any commit // happened, so we must create an initial commit just to allow that, but // before any documents were added. IndexWriter writer = initIndex(random, true); MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); dir.setPreventDoubleWrite(false); crash(writer); writer = initIndex(random, dir, false); writer.close(); IndexReader reader = IndexReader.open(dir, false); assertTrue(reader.numDocs() < 314); reader.close(); dir.close(); }
@Override public void run() { try { if (verbose()) { message(" merge thread: start"); } doMerge(writer, merge); if (verbose()) { message(" merge thread: done"); } // Let CMS run new merges if necessary: try { merge(writer, MergeTrigger.MERGE_FINISHED, true); } catch (AlreadyClosedException ace) { // OK } catch (IOException ioe) { throw new RuntimeException(ioe); } } catch (Throwable exc) { if (exc instanceof MergePolicy.MergeAbortedException) { // OK to ignore } else if (suppressExceptions == false) { // suppressExceptions is normally only set during // testing. handleMergeException(writer.getDirectory(), exc); } } finally { synchronized (ConcurrentMergeScheduler.this) { removeMergeThread(); updateMergeThreads(); // In case we had stalled indexing, we can now wake up // and possibly unstall: ConcurrentMergeScheduler.this.notifyAll(); } } }
public void testCrashAfterCloseNoWait() throws IOException { IndexWriter writer = initIndex(random, false); MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); writer.close(false); dir.crash(); /* String[] l = dir.list(); Arrays.sort(l); for(int i=0;i<l.length;i++) System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes"); */ IndexReader reader = IndexReader.open(dir, false); assertEquals(157, reader.numDocs()); reader.close(); dir.close(); }
public void testCrashAfterReopen() throws IOException { IndexWriter writer = initIndex(random, false); MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory(); writer.close(); writer = initIndex(random, dir, false); assertEquals(314, writer.maxDoc()); crash(writer); /* System.out.println("\n\nTEST: open reader"); String[] l = dir.list(); Arrays.sort(l); for(int i=0;i<l.length;i++) System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes"); */ IndexReader reader = IndexReader.open(dir, false); assertTrue(reader.numDocs() >= 157); reader.close(); dir.close(); }