// LUCENE-1274: test writer.prepareCommit()
  public void testPrepareCommitRollback() throws IOException {
    MockDirectoryWrapper dir = newDirectory();
    dir.setPreventDoubleWrite(false);

    IndexWriter writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(2)
                .setMergePolicy(newLogMergePolicy(5)));
    writer.commit();

    for (int i = 0; i < 23; i++) TestIndexWriter.addDoc(writer);

    DirectoryReader reader = DirectoryReader.open(dir);
    assertEquals(0, reader.numDocs());

    writer.prepareCommit();

    IndexReader reader2 = DirectoryReader.open(dir);
    assertEquals(0, reader2.numDocs());

    writer.rollback();

    IndexReader reader3 = DirectoryReader.openIfChanged(reader);
    assertNull(reader3);
    assertEquals(0, reader.numDocs());
    assertEquals(0, reader2.numDocs());
    reader.close();
    reader2.close();

    writer =
        new IndexWriter(
            dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    for (int i = 0; i < 17; i++) TestIndexWriter.addDoc(writer);

    reader = DirectoryReader.open(dir);
    assertEquals(0, reader.numDocs());
    reader.close();

    writer.prepareCommit();

    reader = DirectoryReader.open(dir);
    assertEquals(0, reader.numDocs());
    reader.close();

    writer.commit();
    reader = DirectoryReader.open(dir);
    assertEquals(17, reader.numDocs());
    reader.close();
    writer.close();
    dir.close();
  }
  public void testNoExtraFiles() throws IOException {
    Directory directory = newDirectory();
    IndexWriter writer =
        new IndexWriter(
            directory,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(2));

    for (int iter = 0; iter < 7; iter++) {
      if (VERBOSE) {
        System.out.println("TEST: iter=" + iter);
      }

      for (int j = 0; j < 21; j++) {
        Document doc = new Document();
        doc.add(newTextField("content", "a b c", Field.Store.NO));
        writer.addDocument(doc);
      }

      writer.close();
      TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");

      // Reopen
      writer =
          new IndexWriter(
              directory,
              newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                  .setOpenMode(OpenMode.APPEND)
                  .setMaxBufferedDocs(2));
    }

    writer.close();

    directory.close();
  }
  /*
   * Simple test for "commit on close": open writer then
   * add a bunch of docs, making sure reader does not see
   * these docs until writer is closed.
   */
  public void testCommitOnClose() throws IOException {
    Directory dir = newDirectory();
    IndexWriter writer =
        new IndexWriter(
            dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    for (int i = 0; i < 14; i++) {
      TestIndexWriter.addDoc(writer);
    }
    writer.close();

    Term searchTerm = new Term("content", "aaa");
    DirectoryReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("first number of hits", 14, hits.length);
    reader.close();

    reader = DirectoryReader.open(dir);

    writer =
        new IndexWriter(
            dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    for (int i = 0; i < 3; i++) {
      for (int j = 0; j < 11; j++) {
        TestIndexWriter.addDoc(writer);
      }
      IndexReader r = DirectoryReader.open(dir);
      searcher = new IndexSearcher(r);
      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
      assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
      r.close();
      assertTrue("reader should have still been current", reader.isCurrent());
    }

    // Now, close the writer:
    writer.close();
    assertFalse("reader should not be current now", reader.isCurrent());

    IndexReader r = DirectoryReader.open(dir);
    searcher = new IndexSearcher(r);
    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("reader did not see changes after writer was closed", 47, hits.length);
    r.close();
    reader.close();
    dir.close();
  }
  // LUCENE-1382
  public void testCommitUserData() throws IOException {
    Directory dir = newDirectory();
    IndexWriter w =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(2));
    for (int j = 0; j < 17; j++) TestIndexWriter.addDoc(w);
    w.close();

    DirectoryReader r = DirectoryReader.open(dir);
    // commit(Map) never called for this index
    assertEquals(0, r.getIndexCommit().getUserData().size());
    r.close();

    w =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(2));
    for (int j = 0; j < 17; j++) TestIndexWriter.addDoc(w);
    Map<String, String> data = new HashMap<String, String>();
    data.put("label", "test1");
    w.commit(data);
    w.close();

    r = DirectoryReader.open(dir);
    assertEquals("test1", r.getIndexCommit().getUserData().get("label"));
    r.close();

    w =
        new IndexWriter(
            dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    w.forceMerge(1);
    w.close();

    dir.close();
  }
  // LUCENE-1044: test writer.commit() when ac=false
  public void testForceCommit() throws IOException {
    Directory dir = newDirectory();

    IndexWriter writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(2)
                .setMergePolicy(newLogMergePolicy(5)));
    writer.commit();

    for (int i = 0; i < 23; i++) TestIndexWriter.addDoc(writer);

    DirectoryReader reader = DirectoryReader.open(dir);
    assertEquals(0, reader.numDocs());
    writer.commit();
    DirectoryReader reader2 = DirectoryReader.openIfChanged(reader);
    assertNotNull(reader2);
    assertEquals(0, reader.numDocs());
    assertEquals(23, reader2.numDocs());
    reader.close();

    for (int i = 0; i < 17; i++) TestIndexWriter.addDoc(writer);
    assertEquals(23, reader2.numDocs());
    reader2.close();
    reader = DirectoryReader.open(dir);
    assertEquals(23, reader.numDocs());
    reader.close();
    writer.commit();

    reader = DirectoryReader.open(dir);
    assertEquals(40, reader.numDocs());
    reader.close();
    writer.close();
    dir.close();
  }
  @Test
  public void testRollingUpdates() throws Exception {
    Random random = new Random(random().nextLong());
    final BaseDirectoryWrapper dir = newDirectory();
    // test checks for no unref'ed files with the IW helper method, which isn't aware of "tried to
    // delete files"
    if (dir instanceof MockDirectoryWrapper) {
      ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
    }

    final LineFileDocs docs = new LineFileDocs(random, true);

    // provider.register(new MemoryCodec());
    if (random().nextBoolean()) {
      Codec.setDefault(
          TestUtil.alwaysPostingsFormat(
              new MemoryPostingsFormat(random().nextBoolean(), random.nextFloat())));
    }

    MockAnalyzer analyzer = new MockAnalyzer(random());
    analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));

    final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
    final int SIZE = atLeast(20);
    int id = 0;
    IndexReader r = null;
    IndexSearcher s = null;
    final int numUpdates =
        (int)
            (SIZE * (2 + (TEST_NIGHTLY ? 200 * random().nextDouble() : 5 * random().nextDouble())));
    if (VERBOSE) {
      System.out.println("TEST: numUpdates=" + numUpdates);
    }
    int updateCount = 0;
    // TODO: sometimes update ids not in order...
    for (int docIter = 0; docIter < numUpdates; docIter++) {
      final Document doc = docs.nextDoc();
      final String myID = Integer.toString(id);
      if (id == SIZE - 1) {
        id = 0;
      } else {
        id++;
      }
      if (VERBOSE) {
        System.out.println("  docIter=" + docIter + " id=" + id);
      }
      ((Field) doc.getField("docid")).setStringValue(myID);

      Term idTerm = new Term("docid", myID);

      final boolean doUpdate;
      if (s != null && updateCount < SIZE) {
        TopDocs hits = s.search(new TermQuery(idTerm), 1);
        assertEquals(1, hits.totalHits);
        doUpdate = !w.tryDeleteDocument(r, hits.scoreDocs[0].doc);
        if (VERBOSE) {
          if (doUpdate) {
            System.out.println("  tryDeleteDocument failed");
          } else {
            System.out.println("  tryDeleteDocument succeeded");
          }
        }
      } else {
        doUpdate = true;
        if (VERBOSE) {
          System.out.println("  no searcher: doUpdate=true");
        }
      }

      updateCount++;

      if (doUpdate) {
        if (random().nextBoolean()) {
          w.updateDocument(idTerm, doc);
        } else {
          // It's OK to not be atomic for this test (no separate thread reopening readers):
          w.deleteDocuments(new TermQuery(idTerm));
          w.addDocument(doc);
        }
      } else {
        w.addDocument(doc);
      }

      if (docIter >= SIZE && random().nextInt(50) == 17) {
        if (r != null) {
          r.close();
        }

        final boolean applyDeletions = random().nextBoolean();

        if (VERBOSE) {
          System.out.println("TEST: reopen applyDeletions=" + applyDeletions);
        }

        r = w.getReader(applyDeletions);
        if (applyDeletions) {
          s = newSearcher(r);
        } else {
          s = null;
        }
        assertTrue(
            "applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE,
            !applyDeletions || r.numDocs() == SIZE);
        updateCount = 0;
      }
    }

    if (r != null) {
      r.close();
    }

    w.commit();
    assertEquals(SIZE, w.numDocs());

    w.close();

    TestIndexWriter.assertNoUnreferencedFiles(dir, "leftover files after rolling updates");

    docs.close();

    // LUCENE-4455:
    SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
    long totalBytes = 0;
    for (SegmentCommitInfo sipc : infos) {
      totalBytes += sipc.sizeInBytes();
    }
    long totalBytes2 = 0;

    for (String fileName : dir.listAll()) {
      if (IndexFileNames.CODEC_FILE_PATTERN.matcher(fileName).matches()) {
        totalBytes2 += dir.fileLength(fileName);
      }
    }
    assertEquals(totalBytes2, totalBytes);
    dir.close();
  }
  /*
   * Simple test for "commit on close": open writer, then
   * add a bunch of docs, making sure reader does not see
   * them until writer has closed.  Then instead of
   * closing the writer, call abort and verify reader sees
   * nothing was added.  Then verify we can open the index
   * and add docs to it.
   */
  public void testCommitOnCloseAbort() throws IOException {
    MockDirectoryWrapper dir = newDirectory();
    IndexWriter writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(10));
    for (int i = 0; i < 14; i++) {
      TestIndexWriter.addDoc(writer);
    }
    writer.close();

    Term searchTerm = new Term("content", "aaa");
    IndexReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("first number of hits", 14, hits.length);
    reader.close();

    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(10));
    for (int j = 0; j < 17; j++) {
      TestIndexWriter.addDoc(writer);
    }
    // Delete all docs:
    writer.deleteDocuments(searchTerm);

    reader = DirectoryReader.open(dir);
    searcher = new IndexSearcher(reader);
    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
    reader.close();

    // Now, close the writer:
    writer.rollback();

    TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");

    reader = DirectoryReader.open(dir);
    searcher = new IndexSearcher(reader);
    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("saw changes after writer.abort", 14, hits.length);
    reader.close();

    // Now make sure we can re-open the index, add docs,
    // and all is good:
    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(10));

    // On abort, writer in fact may write to the same
    // segments_N file:
    dir.setPreventDoubleWrite(false);

    for (int i = 0; i < 12; i++) {
      for (int j = 0; j < 17; j++) {
        TestIndexWriter.addDoc(writer);
      }
      IndexReader r = DirectoryReader.open(dir);
      searcher = new IndexSearcher(r);
      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
      assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
      r.close();
    }

    writer.close();
    IndexReader r = DirectoryReader.open(dir);
    searcher = new IndexSearcher(r);
    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("didn't see changes after close", 218, hits.length);
    r.close();

    dir.close();
  }
  /*
   * Verify that calling forceMerge when writer is open for
   * "commit on close" works correctly both for rollback()
   * and close().
   */
  public void testCommitOnCloseForceMerge() throws IOException {
    MockDirectoryWrapper dir = newDirectory();
    // Must disable throwing exc on double-write: this
    // test uses IW.rollback which easily results in
    // writing to same file more than once
    dir.setPreventDoubleWrite(false);
    IndexWriter writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(10)
                .setMergePolicy(newLogMergePolicy(10)));
    for (int j = 0; j < 17; j++) {
      TestIndexWriter.addDocWithIndex(writer, j);
    }
    writer.close();

    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND));
    writer.forceMerge(1);

    // Open a reader before closing (commiting) the writer:
    DirectoryReader reader = DirectoryReader.open(dir);

    // Reader should see index as multi-seg at this
    // point:
    assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1);
    reader.close();

    // Abort the writer:
    writer.rollback();
    TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge");

    // Open a reader after aborting writer:
    reader = DirectoryReader.open(dir);

    // Reader should still see index as multi-segment
    assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1);
    reader.close();

    if (VERBOSE) {
      System.out.println("TEST: do real full merge");
    }
    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND));
    writer.forceMerge(1);
    writer.close();

    if (VERBOSE) {
      System.out.println("TEST: writer closed");
    }
    TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge");

    // Open a reader after aborting writer:
    reader = DirectoryReader.open(dir);

    // Reader should see index as one segment
    assertEquals(
        "Reader incorrectly sees more than one segment",
        1,
        reader.getSequentialSubReaders().size());
    reader.close();
    dir.close();
  }
  /*
   * Verify that a writer with "commit on close" indeed
   * cleans up the temp segments created after opening
   * that are not referenced by the starting segments
   * file.  We check this by using MockDirectoryWrapper to
   * measure max temp disk space used.
   */
  public void testCommitOnCloseDiskUsage() throws IOException {
    // MemoryCodec, since it uses FST, is not necessarily
    // "additive", ie if you add up N small FSTs, then merge
    // them, the merged result can easily be larger than the
    // sum because the merged FST may use array encoding for
    // some arcs (which uses more space):

    final String idFormat = _TestUtil.getPostingsFormat("id");
    final String contentFormat = _TestUtil.getPostingsFormat("content");
    assumeFalse(
        "This test cannot run with Memory codec",
        idFormat.equals("Memory") || contentFormat.equals("Memory"));
    MockDirectoryWrapper dir = newDirectory();
    Analyzer analyzer;
    if (random().nextBoolean()) {
      // no payloads
      analyzer =
          new Analyzer() {
            @Override
            public TokenStreamComponents createComponents(String fieldName, Reader reader) {
              return new TokenStreamComponents(
                  new MockTokenizer(reader, MockTokenizer.WHITESPACE, true));
            }
          };
    } else {
      // fixed length payloads
      final int length = random().nextInt(200);
      analyzer =
          new Analyzer() {
            @Override
            public TokenStreamComponents createComponents(String fieldName, Reader reader) {
              Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
              return new TokenStreamComponents(
                  tokenizer, new MockFixedLengthPayloadFilter(random(), tokenizer, length));
            }
          };
    }

    IndexWriter writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)
                .setMaxBufferedDocs(10)
                .setReaderPooling(false)
                .setMergePolicy(newLogMergePolicy(10)));
    for (int j = 0; j < 30; j++) {
      TestIndexWriter.addDocWithIndex(writer, j);
    }
    writer.close();
    dir.resetMaxUsedSizeInBytes();

    dir.setTrackDiskUsage(true);
    long startDiskUsage = dir.getMaxUsedSizeInBytes();
    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(10)
                .setMergeScheduler(new SerialMergeScheduler())
                .setReaderPooling(false)
                .setMergePolicy(newLogMergePolicy(10)));

    for (int j = 0; j < 1470; j++) {
      TestIndexWriter.addDocWithIndex(writer, j);
    }
    long midDiskUsage = dir.getMaxUsedSizeInBytes();
    dir.resetMaxUsedSizeInBytes();
    writer.forceMerge(1);
    writer.close();

    DirectoryReader.open(dir).close();

    long endDiskUsage = dir.getMaxUsedSizeInBytes();

    // Ending index is 50X as large as starting index; due
    // to 3X disk usage normally we allow 150X max
    // transient usage.  If something is wrong w/ deleter
    // and it doesn't delete intermediate segments then it
    // will exceed this 150X:
    // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " +
    // endDiskUsage);
    assertTrue(
        "writer used too much space while adding documents: mid="
            + midDiskUsage
            + " start="
            + startDiskUsage
            + " end="
            + endDiskUsage
            + " max="
            + (startDiskUsage * 150),
        midDiskUsage < 150 * startDiskUsage);
    assertTrue(
        "writer used too much space after close: endDiskUsage="
            + endDiskUsage
            + " startDiskUsage="
            + startDiskUsage
            + " max="
            + (startDiskUsage * 150),
        endDiskUsage < 150 * startDiskUsage);
    dir.close();
  }
  /**
   * Make sure if modifier tries to commit but hits disk full that modifier remains consistent and
   * usable. Similar to TestIndexReader.testDiskFull().
   */
  private void testOperationsOnDiskFull(boolean updates) throws IOException {

    boolean debug = false;
    Term searchTerm = new Term("content", "aaa");
    int START_COUNT = 157;
    int END_COUNT = 144;

    // First build up a starting index:
    MockRAMDirectory startDir = new MockRAMDirectory();
    IndexWriter writer =
        new IndexWriter(
            startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
    for (int i = 0; i < 157; i++) {
      Document d = new Document();
      d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
      d.add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
      writer.addDocument(d);
    }
    writer.close();

    long diskUsage = startDir.sizeInBytes();
    long diskFree = diskUsage + 10;

    IOException err = null;

    boolean done = false;

    // Iterate w/ ever increasing free disk space:
    while (!done) {
      MockRAMDirectory dir = new MockRAMDirectory(startDir);
      dir.setPreventDoubleWrite(false);
      IndexWriter modifier =
          new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);

      modifier.setMaxBufferedDocs(1000); // use flush or close
      modifier.setMaxBufferedDeleteTerms(1000); // use flush or close

      // For each disk size, first try to commit against
      // dir that will hit random IOExceptions & disk
      // full; after, give it infinite disk space & turn
      // off random IOExceptions & retry w/ same reader:
      boolean success = false;

      for (int x = 0; x < 2; x++) {

        double rate = 0.1;
        double diskRatio = ((double) diskFree) / diskUsage;
        long thisDiskFree;
        String testName;

        if (0 == x) {
          thisDiskFree = diskFree;
          if (diskRatio >= 2.0) {
            rate /= 2;
          }
          if (diskRatio >= 4.0) {
            rate /= 2;
          }
          if (diskRatio >= 6.0) {
            rate = 0.0;
          }
          if (debug) {
            System.out.println("\ncycle: " + diskFree + " bytes");
          }
          testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
        } else {
          thisDiskFree = 0;
          rate = 0.0;
          if (debug) {
            System.out.println("\ncycle: same writer: unlimited disk space");
          }
          testName = "reader re-use after disk full";
        }

        dir.setMaxSizeInBytes(thisDiskFree);
        dir.setRandomIOExceptionRate(rate, diskFree);

        try {
          if (0 == x) {
            int docId = 12;
            for (int i = 0; i < 13; i++) {
              if (updates) {
                Document d = new Document();
                d.add(
                    new Field(
                        "id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
                d.add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED));
                modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
              } else { // deletes
                modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
                // modifier.setNorm(docId, "contents", (float)2.0);
              }
              docId += 12;
            }
          }
          modifier.close();
          success = true;
          if (0 == x) {
            done = true;
          }
        } catch (IOException e) {
          if (debug) {
            System.out.println("  hit IOException: " + e);
            e.printStackTrace(System.out);
          }
          err = e;
          if (1 == x) {
            e.printStackTrace();
            fail(testName + " hit IOException after disk space was freed up");
          }
        }

        // If the close() succeeded, make sure there are
        // no unreferenced files.
        if (success) {
          _TestUtil.checkIndex(dir);
          TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close");
        }

        // Finally, verify index is not corrupt, and, if
        // we succeeded, we see all docs changed, and if
        // we failed, we see either all docs or no docs
        // changed (transactional semantics):
        IndexReader newReader = null;
        try {
          newReader = IndexReader.open(dir, true);
        } catch (IOException e) {
          e.printStackTrace();
          fail(
              testName + ":exception when creating IndexReader after disk full during close: " + e);
        }

        IndexSearcher searcher = new IndexSearcher(newReader);
        ScoreDoc[] hits = null;
        try {
          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
        } catch (IOException e) {
          e.printStackTrace();
          fail(testName + ": exception when searching: " + e);
        }
        int result2 = hits.length;
        if (success) {
          if (x == 0 && result2 != END_COUNT) {
            fail(
                testName
                    + ": method did not throw exception but hits.length for search on term 'aaa' is "
                    + result2
                    + " instead of expected "
                    + END_COUNT);
          } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) {
            // It's possible that the first exception was
            // "recoverable" wrt pending deletes, in which
            // case the pending deletes are retained and
            // then re-flushing (with plenty of disk
            // space) will succeed in flushing the
            // deletes:
            fail(
                testName
                    + ": method did not throw exception but hits.length for search on term 'aaa' is "
                    + result2
                    + " instead of expected "
                    + START_COUNT
                    + " or "
                    + END_COUNT);
          }
        } else {
          // On hitting exception we still may have added
          // all docs:
          if (result2 != START_COUNT && result2 != END_COUNT) {
            err.printStackTrace();
            fail(
                testName
                    + ": method did throw exception but hits.length for search on term 'aaa' is "
                    + result2
                    + " instead of expected "
                    + START_COUNT
                    + " or "
                    + END_COUNT);
          }
        }

        searcher.close();
        newReader.close();

        if (result2 == END_COUNT) {
          break;
        }
      }

      dir.close();

      // Try again with 10 more bytes of free space:
      diskFree += 10;
    }
  }