public void testSimpleSkip() throws IOException { Directory dir = new CountingRAMDirectory(new RAMDirectory()); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()) .setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat())) .setMergePolicy(newLogMergePolicy())); Term term = new Term("test", "a"); for (int i = 0; i < 5000; i++) { Document d1 = new Document(); d1.add(newTextField(term.field(), term.text(), Field.Store.NO)); writer.addDocument(d1); } writer.commit(); writer.forceMerge(1); writer.close(); AtomicReader reader = getOnlySegmentReader(DirectoryReader.open(dir)); for (int i = 0; i < 2; i++) { counter = 0; DocsAndPositionsEnum tp = reader.termPositionsEnum(term); checkSkipTo(tp, 14, 185); // no skips checkSkipTo(tp, 17, 190); // one skip on level 0 checkSkipTo(tp, 287, 200); // one skip on level 1, two on level 0 // this test would fail if we had only one skip level, // because than more bytes would be read from the freqStream checkSkipTo(tp, 4800, 250); // one skip on level 2 } }
private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, int MAX_DOCS) throws Exception { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random); IndexWriterConfig conf = newIndexWriterConfig(analyzer); final MergePolicy mp = conf.getMergePolicy(); mp.setNoCFSRatio(useCompoundFiles ? 1.0 : 0.0); IndexWriter writer = new IndexWriter(directory, conf); if (VERBOSE) { System.out.println("TEST: now build index MAX_DOCS=" + MAX_DOCS); } for (int j = 0; j < MAX_DOCS; j++) { Document d = new Document(); d.add(newTextField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES)); d.add(newTextField(ID_FIELD, Integer.toString(j), Field.Store.YES)); writer.addDocument(d); } writer.close(); // try a search without OR IndexReader reader = DirectoryReader.open(directory); IndexSearcher searcher = newSearcher(reader); Query query = new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY)); out.println("Query: " + query.toString(PRIORITY_FIELD)); if (VERBOSE) { System.out.println("TEST: search query=" + query); } final Sort sort = new Sort(SortField.FIELD_SCORE, new SortField(ID_FIELD, SortField.Type.INT)); ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs; printHits(out, hits, searcher); checkHits(hits, MAX_DOCS, searcher); // try a new search with OR searcher = newSearcher(reader); hits = null; BooleanQuery booleanQuery = new BooleanQuery(); booleanQuery.add( new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY)), BooleanClause.Occur.SHOULD); booleanQuery.add( new TermQuery(new Term(PRIORITY_FIELD, MED_PRIORITY)), BooleanClause.Occur.SHOULD); out.println("Query: " + booleanQuery.toString(PRIORITY_FIELD)); hits = searcher.search(booleanQuery, null, MAX_DOCS, sort).scoreDocs; printHits(out, hits, searcher); checkHits(hits, MAX_DOCS, searcher); reader.close(); directory.close(); }
// LUCENE-1274 public void testPrepareCommitNoChanges() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); writer.prepareCommit(); writer.commit(); writer.close(); IndexReader reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); reader.close(); dir.close(); }
/* * Simple test for "commit on close": open writer then * add a bunch of docs, making sure reader does not see * these docs until writer is closed. */ public void testCommitOnClose() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i = 0; i < 14; i++) { TestIndexWriter.addDoc(writer); } writer.close(); Term searchTerm = new Term("content", "aaa"); DirectoryReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("first number of hits", 14, hits.length); reader.close(); reader = DirectoryReader.open(dir); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i = 0; i < 3; i++) { for (int j = 0; j < 11; j++) { TestIndexWriter.addDoc(writer); } IndexReader r = DirectoryReader.open(dir); searcher = new IndexSearcher(r); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("reader incorrectly sees changes from writer", 14, hits.length); r.close(); assertTrue("reader should have still been current", reader.isCurrent()); } // Now, close the writer: writer.close(); assertFalse("reader should not be current now", reader.isCurrent()); IndexReader r = DirectoryReader.open(dir); searcher = new IndexSearcher(r); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("reader did not see changes after writer was closed", 47, hits.length); r.close(); reader.close(); dir.close(); }
public void testZeroCommits() throws Exception { // Tests that if we don't call commit(), the directory has 0 commits. This has // changed since LUCENE-2386, where before IW would always commit on a fresh // new index. Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); try { DirectoryReader.listCommits(dir); fail("listCommits should have thrown an exception over empty index"); } catch (IndexNotFoundException e) { // that's expected ! } // No changes still should generate a commit, because it's a new index. writer.close(); assertEquals("expected 1 commits!", 1, DirectoryReader.listCommits(dir).size()); dir.close(); }
// LUCENE-1274: test writer.prepareCommit() public void testPrepareCommitRollback() throws IOException { MockDirectoryWrapper dir = newDirectory(); dir.setPreventDoubleWrite(false); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2) .setMergePolicy(newLogMergePolicy(5))); writer.commit(); for (int i = 0; i < 23; i++) TestIndexWriter.addDoc(writer); DirectoryReader reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); writer.prepareCommit(); IndexReader reader2 = DirectoryReader.open(dir); assertEquals(0, reader2.numDocs()); writer.rollback(); IndexReader reader3 = DirectoryReader.openIfChanged(reader); assertNull(reader3); assertEquals(0, reader.numDocs()); assertEquals(0, reader2.numDocs()); reader.close(); reader2.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); for (int i = 0; i < 17; i++) TestIndexWriter.addDoc(writer); reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); reader.close(); writer.prepareCommit(); reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); reader.close(); writer.commit(); reader = DirectoryReader.open(dir); assertEquals(17, reader.numDocs()); reader.close(); writer.close(); dir.close(); }
// LUCENE-1044: test writer.commit() when ac=false public void testForceCommit() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2) .setMergePolicy(newLogMergePolicy(5))); writer.commit(); for (int i = 0; i < 23; i++) TestIndexWriter.addDoc(writer); DirectoryReader reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); writer.commit(); DirectoryReader reader2 = DirectoryReader.openIfChanged(reader); assertNotNull(reader2); assertEquals(0, reader.numDocs()); assertEquals(23, reader2.numDocs()); reader.close(); for (int i = 0; i < 17; i++) TestIndexWriter.addDoc(writer); assertEquals(23, reader2.numDocs()); reader2.close(); reader = DirectoryReader.open(dir); assertEquals(23, reader.numDocs()); reader.close(); writer.commit(); reader = DirectoryReader.open(dir); assertEquals(40, reader.numDocs()); reader.close(); writer.close(); dir.close(); }
public void testTermDocsEnum() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document d = new Document(); d.add(newStringField("f", "j", Field.Store.NO)); w.addDocument(d); w.commit(); w.addDocument(d); IndexReader r = w.getReader(); w.close(); DocsEnum de = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j")); assertEquals(0, de.nextDoc()); assertEquals(1, de.nextDoc()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc()); r.close(); dir.close(); }
public void testSeparateEnums() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document d = new Document(); d.add(newStringField("f", "j", Field.Store.NO)); w.addDocument(d); w.commit(); w.addDocument(d); IndexReader r = w.getReader(); w.close(); DocsEnum d1 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, 0); DocsEnum d2 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, 0); assertEquals(0, d1.nextDoc()); assertEquals(0, d2.nextDoc()); r.close(); dir.close(); }
// LUCENE-1382 public void testCommitUserData() throws IOException { Directory dir = newDirectory(); IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2)); for (int j = 0; j < 17; j++) TestIndexWriter.addDoc(w); w.close(); DirectoryReader r = DirectoryReader.open(dir); // commit(Map) never called for this index assertEquals(0, r.getIndexCommit().getUserData().size()); r.close(); w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(2)); for (int j = 0; j < 17; j++) TestIndexWriter.addDoc(w); Map<String, String> data = new HashMap<String, String>(); data.put("label", "test1"); w.commit(data); w.close(); r = DirectoryReader.open(dir); assertEquals("test1", r.getIndexCommit().getUserData().get("label")); r.close(); w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); w.forceMerge(1); w.close(); dir.close(); }
public void testRandom() throws Exception { int num = atLeast(2); for (int iter = 0; iter < num; iter++) { if (VERBOSE) { System.out.println("TEST: iter=" + iter); } Directory dir = newDirectory(); IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES)); _TestUtil.keepFullyDeletedSegments(w); Map<BytesRef, List<Integer>> docs = new HashMap<BytesRef, List<Integer>>(); Set<Integer> deleted = new HashSet<Integer>(); List<BytesRef> terms = new ArrayList<BytesRef>(); int numDocs = _TestUtil.nextInt(random(), 1, 100 * RANDOM_MULTIPLIER); Document doc = new Document(); Field f = newStringField("field", "", Field.Store.NO); doc.add(f); Field id = newStringField("id", "", Field.Store.NO); doc.add(id); boolean onlyUniqueTerms = random().nextBoolean(); if (VERBOSE) { System.out.println("TEST: onlyUniqueTerms=" + onlyUniqueTerms + " numDocs=" + numDocs); } Set<BytesRef> uniqueTerms = new HashSet<BytesRef>(); for (int i = 0; i < numDocs; i++) { if (!onlyUniqueTerms && random().nextBoolean() && terms.size() > 0) { // re-use existing term BytesRef term = terms.get(random().nextInt(terms.size())); docs.get(term).add(i); f.setStringValue(term.utf8ToString()); } else { String s = _TestUtil.randomUnicodeString(random(), 10); BytesRef term = new BytesRef(s); if (!docs.containsKey(term)) { docs.put(term, new ArrayList<Integer>()); } docs.get(term).add(i); terms.add(term); uniqueTerms.add(term); f.setStringValue(s); } id.setStringValue("" + i); w.addDocument(doc); if (random().nextInt(4) == 1) { w.commit(); } if (i > 0 && random().nextInt(20) == 1) { int delID = random().nextInt(i); deleted.add(delID); w.deleteDocuments(new Term("id", "" + delID)); if (VERBOSE) { System.out.println("TEST: delete " + delID); } } } if (VERBOSE) { List<BytesRef> termsList = new ArrayList<BytesRef>(uniqueTerms); Collections.sort(termsList, BytesRef.getUTF8SortedAsUTF16Comparator()); System.out.println("TEST: terms in UTF16 order:"); for (BytesRef b : termsList) { System.out.println(" " + UnicodeUtil.toHexString(b.utf8ToString()) + " " + b); for (int docID : docs.get(b)) { if (deleted.contains(docID)) { System.out.println(" " + docID + " (deleted)"); } else { System.out.println(" " + docID); } } } } IndexReader reader = w.getReader(); w.close(); if (VERBOSE) { System.out.println("TEST: reader=" + reader); } Bits liveDocs = MultiFields.getLiveDocs(reader); for (int delDoc : deleted) { assertFalse(liveDocs.get(delDoc)); } for (int i = 0; i < 100; i++) { BytesRef term = terms.get(random().nextInt(terms.size())); if (VERBOSE) { System.out.println( "TEST: seek term=" + UnicodeUtil.toHexString(term.utf8ToString()) + " " + term); } DocsEnum docsEnum = _TestUtil.docs(random(), reader, "field", term, liveDocs, null, 0); assertNotNull(docsEnum); for (int docID : docs.get(term)) { if (!deleted.contains(docID)) { assertEquals(docID, docsEnum.nextDoc()); } } assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc()); } reader.close(); dir.close(); } }
/* * Simple test for "commit on close": open writer, then * add a bunch of docs, making sure reader does not see * them until writer has closed. Then instead of * closing the writer, call abort and verify reader sees * nothing was added. Then verify we can open the index * and add docs to it. */ public void testCommitOnCloseAbort() throws IOException { MockDirectoryWrapper dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(10)); for (int i = 0; i < 14; i++) { TestIndexWriter.addDoc(writer); } writer.close(); Term searchTerm = new Term("content", "aaa"); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("first number of hits", 14, hits.length); reader.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND) .setMaxBufferedDocs(10)); for (int j = 0; j < 17; j++) { TestIndexWriter.addDoc(writer); } // Delete all docs: writer.deleteDocuments(searchTerm); reader = DirectoryReader.open(dir); searcher = new IndexSearcher(reader); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("reader incorrectly sees changes from writer", 14, hits.length); reader.close(); // Now, close the writer: writer.rollback(); TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()"); reader = DirectoryReader.open(dir); searcher = new IndexSearcher(reader); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("saw changes after writer.abort", 14, hits.length); reader.close(); // Now make sure we can re-open the index, add docs, // and all is good: writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND) .setMaxBufferedDocs(10)); // On abort, writer in fact may write to the same // segments_N file: dir.setPreventDoubleWrite(false); for (int i = 0; i < 12; i++) { for (int j = 0; j < 17; j++) { TestIndexWriter.addDoc(writer); } IndexReader r = DirectoryReader.open(dir); searcher = new IndexSearcher(r); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("reader incorrectly sees changes from writer", 14, hits.length); r.close(); } writer.close(); IndexReader r = DirectoryReader.open(dir); searcher = new IndexSearcher(r); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("didn't see changes after close", 218, hits.length); r.close(); dir.close(); }
public void testFutureCommit() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); Document doc = new Document(); w.addDocument(doc); // commit to "first" Map<String, String> commitData = new HashMap<String, String>(); commitData.put("tag", "first"); w.commit(commitData); // commit to "second" w.addDocument(doc); commitData.put("tag", "second"); w.commit(commitData); w.close(); // open "first" with IndexWriter IndexCommit commit = null; for (IndexCommit c : DirectoryReader.listCommits(dir)) { if (c.getUserData().get("tag").equals("first")) { commit = c; break; } } assertNotNull(commit); w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE) .setIndexCommit(commit)); assertEquals(1, w.numDocs()); // commit IndexWriter to "third" w.addDocument(doc); commitData.put("tag", "third"); w.commit(commitData); w.close(); // make sure "second" commit is still there commit = null; for (IndexCommit c : DirectoryReader.listCommits(dir)) { if (c.getUserData().get("tag").equals("second")) { commit = c; break; } } assertNotNull(commit); dir.close(); }
/* * Verify that calling forceMerge when writer is open for * "commit on close" works correctly both for rollback() * and close(). */ public void testCommitOnCloseForceMerge() throws IOException { MockDirectoryWrapper dir = newDirectory(); // Must disable throwing exc on double-write: this // test uses IW.rollback which easily results in // writing to same file more than once dir.setPreventDoubleWrite(false); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setMaxBufferedDocs(10) .setMergePolicy(newLogMergePolicy(10))); for (int j = 0; j < 17; j++) { TestIndexWriter.addDocWithIndex(writer, j); } writer.close(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); // Open a reader before closing (commiting) the writer: DirectoryReader reader = DirectoryReader.open(dir); // Reader should see index as multi-seg at this // point: assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1); reader.close(); // Abort the writer: writer.rollback(); TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.open(dir); // Reader should still see index as multi-segment assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1); reader.close(); if (VERBOSE) { System.out.println("TEST: do real full merge"); } writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())) .setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); writer.close(); if (VERBOSE) { System.out.println("TEST: writer closed"); } TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.open(dir); // Reader should see index as one segment assertEquals( "Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().size()); reader.close(); dir.close(); }
/* * Verify that a writer with "commit on close" indeed * cleans up the temp segments created after opening * that are not referenced by the starting segments * file. We check this by using MockDirectoryWrapper to * measure max temp disk space used. */ public void testCommitOnCloseDiskUsage() throws IOException { // MemoryCodec, since it uses FST, is not necessarily // "additive", ie if you add up N small FSTs, then merge // them, the merged result can easily be larger than the // sum because the merged FST may use array encoding for // some arcs (which uses more space): final String idFormat = _TestUtil.getPostingsFormat("id"); final String contentFormat = _TestUtil.getPostingsFormat("content"); assumeFalse( "This test cannot run with Memory codec", idFormat.equals("Memory") || contentFormat.equals("Memory")); MockDirectoryWrapper dir = newDirectory(); Analyzer analyzer; if (random().nextBoolean()) { // no payloads analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents( new MockTokenizer(reader, MockTokenizer.WHITESPACE, true)); } }; } else { // fixed length payloads final int length = random().nextInt(200); analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); return new TokenStreamComponents( tokenizer, new MockFixedLengthPayloadFilter(random(), tokenizer, length)); } }; } IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer) .setMaxBufferedDocs(10) .setReaderPooling(false) .setMergePolicy(newLogMergePolicy(10))); for (int j = 0; j < 30; j++) { TestIndexWriter.addDocWithIndex(writer, j); } writer.close(); dir.resetMaxUsedSizeInBytes(); dir.setTrackDiskUsage(true); long startDiskUsage = dir.getMaxUsedSizeInBytes(); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer) .setOpenMode(OpenMode.APPEND) .setMaxBufferedDocs(10) .setMergeScheduler(new SerialMergeScheduler()) .setReaderPooling(false) .setMergePolicy(newLogMergePolicy(10))); for (int j = 0; j < 1470; j++) { TestIndexWriter.addDocWithIndex(writer, j); } long midDiskUsage = dir.getMaxUsedSizeInBytes(); dir.resetMaxUsedSizeInBytes(); writer.forceMerge(1); writer.close(); DirectoryReader.open(dir).close(); long endDiskUsage = dir.getMaxUsedSizeInBytes(); // Ending index is 50X as large as starting index; due // to 3X disk usage normally we allow 150X max // transient usage. If something is wrong w/ deleter // and it doesn't delete intermediate segments then it // will exceed this 150X: // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + // endDiskUsage); assertTrue( "writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage * 150), midDiskUsage < 150 * startDiskUsage); assertTrue( "writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage * 150), endDiskUsage < 150 * startDiskUsage); dir.close(); }