Example #1
0
  // case 0: add self or exceed maxMergeDocs, expect exception
  public void testAddSelf() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // auxiliary directory
    Directory aux = newDirectory();

    IndexWriter writer = null;

    writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    // add 100 documents
    addDocs(writer, 100);
    assertEquals(100, writer.maxDoc());
    writer.close();

    writer =
        newWriter(
            aux,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setMaxBufferedDocs(1000)
                .setMergePolicy(newLogMergePolicy(false)));
    // add 140 documents in separate files
    addDocs(writer, 40);
    writer.close();
    writer =
        newWriter(
            aux,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setMaxBufferedDocs(1000)
                .setMergePolicy(newLogMergePolicy(false)));
    addDocs(writer, 100);
    writer.close();

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND));
    try {
      // cannot add self
      writer.addIndexes(aux, dir);
      assertTrue(false);
    } catch (IllegalArgumentException e) {
      assertEquals(100, writer.maxDoc());
    }
    writer.close();

    // make sure the index is correct
    verifyNumDocs(dir, 100);
    dir.close();
    aux.close();
  }
Example #2
0
  // case 3: tail segments, invariants hold, copy, invariants hold
  public void testNoMergeAfterCopy() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // auxiliary directory
    Directory aux = newDirectory();

    setUpDirs(dir, aux);

    IndexWriter writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(10)
                .setMergePolicy(newLogMergePolicy(4)));

    writer.addIndexes(
        aux, new MockDirectoryWrapper(random(), new RAMDirectory(aux, newIOContext(random()))));
    assertEquals(1060, writer.maxDoc());
    assertEquals(1000, writer.getDocCount(0));
    writer.close();

    // make sure the index is correct
    verifyNumDocs(dir, 1060);
    dir.close();
    aux.close();
  }
Example #3
0
  private void setUpDirs(Directory dir, Directory aux, boolean withID) throws IOException {
    IndexWriter writer = null;

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setMaxBufferedDocs(1000));
    // add 1000 documents in 1 segment
    if (withID) {
      addDocsWithID(writer, 1000, 0);
    } else {
      addDocs(writer, 1000);
    }
    assertEquals(1000, writer.maxDoc());
    assertEquals(1, writer.getSegmentCount());
    writer.close();

    writer =
        newWriter(
            aux,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setMaxBufferedDocs(1000)
                .setMergePolicy(newLogMergePolicy(false, 10)));
    // add 30 documents in 3 segments
    for (int i = 0; i < 3; i++) {
      if (withID) {
        addDocsWithID(writer, 10, 10 * i);
      } else {
        addDocs(writer, 10);
      }
      writer.close();
      writer =
          newWriter(
              aux,
              newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                  .setOpenMode(OpenMode.APPEND)
                  .setMaxBufferedDocs(1000)
                  .setMergePolicy(newLogMergePolicy(false, 10)));
    }
    assertEquals(30, writer.maxDoc());
    assertEquals(3, writer.getSegmentCount());
    writer.close();
  }
  protected void runFlushByRam(int numThreads, double maxRamMB, boolean ensureNotStalled)
      throws IOException, InterruptedException {
    final int numDocumentsToIndex = 10 + atLeast(30);
    AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
    Directory dir = newDirectory();
    MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy();
    IndexWriterConfig iwc =
        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setFlushPolicy(flushPolicy);
    final int numDWPT = 1 + atLeast(2);
    DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool(numDWPT);
    iwc.setIndexerThreadPool(threadPool);
    iwc.setRAMBufferSizeMB(maxRamMB);
    iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
    iwc.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
    IndexWriter writer = new IndexWriter(dir, iwc);
    flushPolicy = (MockDefaultFlushPolicy) writer.getConfig().getFlushPolicy();
    assertFalse(flushPolicy.flushOnDocCount());
    assertFalse(flushPolicy.flushOnDeleteTerms());
    assertTrue(flushPolicy.flushOnRAM());
    DocumentsWriter docsWriter = writer.getDocsWriter();
    assertNotNull(docsWriter);
    DocumentsWriterFlushControl flushControl = docsWriter.flushControl;
    assertEquals(" bytes must be 0 after init", 0, flushControl.flushBytes());

    IndexThread[] threads = new IndexThread[numThreads];
    for (int x = 0; x < threads.length; x++) {
      threads[x] = new IndexThread(numDocs, numThreads, writer, lineDocFile, false);
      threads[x].start();
    }

    for (int x = 0; x < threads.length; x++) {
      threads[x].join();
    }
    final long maxRAMBytes = (long) (iwc.getRAMBufferSizeMB() * 1024. * 1024.);
    assertEquals(" all flushes must be due numThreads=" + numThreads, 0, flushControl.flushBytes());
    assertEquals(numDocumentsToIndex, writer.numDocs());
    assertEquals(numDocumentsToIndex, writer.maxDoc());
    assertTrue(
        "peak bytes without flush exceeded watermark",
        flushPolicy.peakBytesWithoutFlush <= maxRAMBytes);
    assertActiveBytesAfter(flushControl);
    if (flushPolicy.hasMarkedPending) {
      assertTrue(maxRAMBytes < flushControl.peakActiveBytes);
    }
    if (ensureNotStalled) {
      assertFalse(docsWriter.flushControl.stallControl.wasStalled());
    }
    writer.close();
    assertEquals(0, flushControl.activeBytes());
    dir.close();
  }
  public void testFlushDocCount() throws IOException, InterruptedException {
    int[] numThreads = new int[] {2 + atLeast(1), 1};
    for (int i = 0; i < numThreads.length; i++) {

      final int numDocumentsToIndex = 50 + atLeast(30);
      AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
      Directory dir = newDirectory();
      MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy();
      IndexWriterConfig iwc =
          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
              .setFlushPolicy(flushPolicy);

      final int numDWPT = 1 + atLeast(2);
      DocumentsWriterPerThreadPool threadPool =
          new ThreadAffinityDocumentsWriterThreadPool(numDWPT);
      iwc.setIndexerThreadPool(threadPool);
      iwc.setMaxBufferedDocs(2 + atLeast(10));
      iwc.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
      iwc.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
      IndexWriter writer = new IndexWriter(dir, iwc);
      flushPolicy = (MockDefaultFlushPolicy) writer.getConfig().getFlushPolicy();
      assertTrue(flushPolicy.flushOnDocCount());
      assertFalse(flushPolicy.flushOnDeleteTerms());
      assertFalse(flushPolicy.flushOnRAM());
      DocumentsWriter docsWriter = writer.getDocsWriter();
      assertNotNull(docsWriter);
      DocumentsWriterFlushControl flushControl = docsWriter.flushControl;
      assertEquals(" bytes must be 0 after init", 0, flushControl.flushBytes());

      IndexThread[] threads = new IndexThread[numThreads[i]];
      for (int x = 0; x < threads.length; x++) {
        threads[x] = new IndexThread(numDocs, numThreads[i], writer, lineDocFile, false);
        threads[x].start();
      }

      for (int x = 0; x < threads.length; x++) {
        threads[x].join();
      }

      assertEquals(
          " all flushes must be due numThreads=" + numThreads[i], 0, flushControl.flushBytes());
      assertEquals(numDocumentsToIndex, writer.numDocs());
      assertEquals(numDocumentsToIndex, writer.maxDoc());
      assertTrue(
          "peak bytes without flush exceeded watermark",
          flushPolicy.peakDocCountWithoutFlush <= iwc.getMaxBufferedDocs());
      assertActiveBytesAfter(flushControl);
      writer.close();
      assertEquals(0, flushControl.activeBytes());
      dir.close();
    }
  }
  public void testStallControl() throws InterruptedException, IOException {

    int[] numThreads = new int[] {4 + random().nextInt(8), 1};
    final int numDocumentsToIndex = 50 + random().nextInt(50);
    for (int i = 0; i < numThreads.length; i++) {
      AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
      MockDirectoryWrapper dir = newMockDirectory();
      // mock a very slow harddisk sometimes here so that flushing is very slow
      dir.setThrottling(MockDirectoryWrapper.Throttling.SOMETIMES);
      IndexWriterConfig iwc =
          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
      iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
      iwc.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
      FlushPolicy flushPolicy = new FlushByRamOrCountsPolicy();
      iwc.setFlushPolicy(flushPolicy);

      DocumentsWriterPerThreadPool threadPool =
          new ThreadAffinityDocumentsWriterThreadPool(numThreads[i] == 1 ? 1 : 2);
      iwc.setIndexerThreadPool(threadPool);
      // with such a small ram buffer we should be stalled quiet quickly
      iwc.setRAMBufferSizeMB(0.25);
      IndexWriter writer = new IndexWriter(dir, iwc);
      IndexThread[] threads = new IndexThread[numThreads[i]];
      for (int x = 0; x < threads.length; x++) {
        threads[x] = new IndexThread(numDocs, numThreads[i], writer, lineDocFile, false);
        threads[x].start();
      }

      for (int x = 0; x < threads.length; x++) {
        threads[x].join();
      }
      DocumentsWriter docsWriter = writer.getDocsWriter();
      assertNotNull(docsWriter);
      DocumentsWriterFlushControl flushControl = docsWriter.flushControl;
      assertEquals(" all flushes must be due", 0, flushControl.flushBytes());
      assertEquals(numDocumentsToIndex, writer.numDocs());
      assertEquals(numDocumentsToIndex, writer.maxDoc());
      if (numThreads[i] == 1) {
        assertFalse(
            "single thread must not block numThreads: " + numThreads[i],
            docsWriter.flushControl.stallControl.hasBlocked());
      }
      if (docsWriter.flushControl.peakNetBytes
          > (2.d * iwc.getRAMBufferSizeMB() * 1024.d * 1024.d)) {
        assertTrue(docsWriter.flushControl.stallControl.wasStalled());
      }
      assertActiveBytesAfter(flushControl);
      writer.close(true);
      dir.close();
    }
  }
Example #7
0
  // case 4: tail segments, invariants hold, copy, invariants not hold
  public void testMergeAfterCopy() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // auxiliary directory
    Directory aux = newDirectory();

    setUpDirs(dir, aux, true);

    IndexWriterConfig dontMergeConfig =
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(NoMergePolicy.COMPOUND_FILES);
    @SuppressWarnings("resource")
    IndexWriter writer = new IndexWriter(aux, dontMergeConfig);
    for (int i = 0; i < 20; i++) {
      writer.deleteDocuments(new Term("id", "" + i));
    }
    writer.close();
    IndexReader reader = DirectoryReader.open(aux);
    assertEquals(10, reader.numDocs());
    reader.close();

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(4)
                .setMergePolicy(newLogMergePolicy(4)));

    if (VERBOSE) {
      System.out.println("\nTEST: now addIndexes");
    }
    writer.addIndexes(
        aux, new MockDirectoryWrapper(random(), new RAMDirectory(aux, newIOContext(random()))));
    assertEquals(1020, writer.maxDoc());
    assertEquals(1000, writer.getDocCount(0));
    writer.close();
    dir.close();
    aux.close();
  }
Example #8
0
  public void testCrashAfterReopen() throws IOException {
    IndexWriter writer = initIndex(random, false);
    MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
    writer.close();
    writer = initIndex(random, dir, false);
    assertEquals(314, writer.maxDoc());
    crash(writer);

    /*
    System.out.println("\n\nTEST: open reader");
    String[] l = dir.list();
    Arrays.sort(l);
    for(int i=0;i<l.length;i++)
      System.out.println("file " + i + " = " + l[i] + " " +
    dir.fileLength(l[i]) + " bytes");
    */

    IndexReader reader = IndexReader.open(dir, false);
    assertTrue(reader.numDocs() >= 157);
    reader.close();
    dir.close();
  }
Example #9
0
  public void testSimpleCase() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // two auxiliary directories
    Directory aux = newDirectory();
    Directory aux2 = newDirectory();

    IndexWriter writer = null;

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE));
    // add 100 documents
    addDocs(writer, 100);
    assertEquals(100, writer.maxDoc());
    writer.close();
    TestUtil.checkIndex(dir);

    writer =
        newWriter(
            aux,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setMergePolicy(newLogMergePolicy(false)));
    // add 40 documents in separate files
    addDocs(writer, 40);
    assertEquals(40, writer.maxDoc());
    writer.close();

    writer =
        newWriter(
            aux2,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE));
    // add 50 documents in compound files
    addDocs2(writer, 50);
    assertEquals(50, writer.maxDoc());
    writer.close();

    // test doc count before segments are merged
    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND));
    assertEquals(100, writer.maxDoc());
    writer.addIndexes(aux, aux2);
    assertEquals(190, writer.maxDoc());
    writer.close();
    TestUtil.checkIndex(dir);

    // make sure the old index is correct
    verifyNumDocs(aux, 40);

    // make sure the new index is correct
    verifyNumDocs(dir, 190);

    // now add another set in.
    Directory aux3 = newDirectory();
    writer =
        newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    // add 40 documents
    addDocs(writer, 40);
    assertEquals(40, writer.maxDoc());
    writer.close();

    // test doc count before segments are merged
    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND));
    assertEquals(190, writer.maxDoc());
    writer.addIndexes(aux3);
    assertEquals(230, writer.maxDoc());
    writer.close();

    // make sure the new index is correct
    verifyNumDocs(dir, 230);

    verifyTermDocs(dir, new Term("content", "aaa"), 180);

    verifyTermDocs(dir, new Term("content", "bbb"), 50);

    // now fully merge it.
    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND));
    writer.forceMerge(1);
    writer.close();

    // make sure the new index is correct
    verifyNumDocs(dir, 230);

    verifyTermDocs(dir, new Term("content", "aaa"), 180);

    verifyTermDocs(dir, new Term("content", "bbb"), 50);

    // now add a single document
    Directory aux4 = newDirectory();
    writer =
        newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
    addDocs2(writer, 1);
    writer.close();

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND));
    assertEquals(230, writer.maxDoc());
    writer.addIndexes(aux4);
    assertEquals(231, writer.maxDoc());
    writer.close();

    verifyNumDocs(dir, 231);

    verifyTermDocs(dir, new Term("content", "bbb"), 51);
    dir.close();
    aux.close();
    aux2.close();
    aux3.close();
    aux4.close();
  }
Example #10
0
  // case 5: tail segments, invariants not hold
  public void testMoreMerges() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // auxiliary directory
    Directory aux = newDirectory();
    Directory aux2 = newDirectory();

    setUpDirs(dir, aux, true);

    IndexWriter writer =
        newWriter(
            aux2,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setMaxBufferedDocs(100)
                .setMergePolicy(newLogMergePolicy(10)));
    writer.addIndexes(aux);
    assertEquals(30, writer.maxDoc());
    assertEquals(3, writer.getSegmentCount());
    writer.close();

    IndexWriterConfig dontMergeConfig =
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(NoMergePolicy.COMPOUND_FILES);
    writer = new IndexWriter(aux, dontMergeConfig);
    for (int i = 0; i < 27; i++) {
      writer.deleteDocuments(new Term("id", "" + i));
    }
    writer.close();
    IndexReader reader = DirectoryReader.open(aux);
    assertEquals(3, reader.numDocs());
    reader.close();

    dontMergeConfig =
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(NoMergePolicy.COMPOUND_FILES);
    writer = new IndexWriter(aux2, dontMergeConfig);
    for (int i = 0; i < 8; i++) {
      writer.deleteDocuments(new Term("id", "" + i));
    }
    writer.close();
    reader = DirectoryReader.open(aux2);
    assertEquals(22, reader.numDocs());
    reader.close();

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(6)
                .setMergePolicy(newLogMergePolicy(4)));

    writer.addIndexes(aux, aux2);
    assertEquals(1040, writer.maxDoc());
    assertEquals(1000, writer.getDocCount(0));
    writer.close();
    dir.close();
    aux.close();
    aux2.close();
  }
Example #11
0
  public void testSimpleCaseCustomCodec() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // two auxiliary directories
    Directory aux = newDirectory();
    Directory aux2 = newDirectory();
    Codec codec = new CustomPerFieldCodec();
    IndexWriter writer = null;

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setCodec(codec));
    // add 100 documents
    addDocsWithID(writer, 100, 0);
    assertEquals(100, writer.maxDoc());
    writer.commit();
    writer.close();
    TestUtil.checkIndex(dir);

    writer =
        newWriter(
            aux,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setCodec(codec)
                .setMaxBufferedDocs(10)
                .setMergePolicy(newLogMergePolicy(false)));
    // add 40 documents in separate files
    addDocs(writer, 40);
    assertEquals(40, writer.maxDoc());
    writer.commit();
    writer.close();

    writer =
        newWriter(
            aux2,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setCodec(codec));
    // add 40 documents in compound files
    addDocs2(writer, 50);
    assertEquals(50, writer.maxDoc());
    writer.commit();
    writer.close();

    // test doc count before segments are merged
    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setCodec(codec));
    assertEquals(100, writer.maxDoc());
    writer.addIndexes(aux, aux2);
    assertEquals(190, writer.maxDoc());
    writer.close();

    dir.close();
    aux.close();
    aux2.close();
  }
  // LUCENE-325: test forceMergeDeletes, when 2 singular merges
  // are required
  public void testForceMergeDeletes() throws IOException {
    Directory dir = newDirectory();
    IndexWriter writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(new MockAnalyzer(random()))
                .setMaxBufferedDocs(2)
                .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
    Document document = new Document();

    FieldType customType = new FieldType();
    customType.setStored(true);

    FieldType customType1 = new FieldType(TextField.TYPE_STORED);
    customType1.setTokenized(false);
    customType1.setStoreTermVectors(true);
    customType1.setStoreTermVectorPositions(true);
    customType1.setStoreTermVectorOffsets(true);

    Field idField = newStringField("id", "", Field.Store.NO);
    document.add(idField);
    Field storedField = newField("stored", "stored", customType);
    document.add(storedField);
    Field termVectorField = newField("termVector", "termVector", customType1);
    document.add(termVectorField);
    for (int i = 0; i < 10; i++) {
      idField.setStringValue("" + i);
      writer.addDocument(document);
    }
    writer.close();

    IndexReader ir = DirectoryReader.open(dir);
    assertEquals(10, ir.maxDoc());
    assertEquals(10, ir.numDocs());
    ir.close();

    IndexWriterConfig dontMergeConfig =
        new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE);
    writer = new IndexWriter(dir, dontMergeConfig);
    writer.deleteDocuments(new Term("id", "0"));
    writer.deleteDocuments(new Term("id", "7"));
    writer.close();

    ir = DirectoryReader.open(dir);
    assertEquals(8, ir.numDocs());
    ir.close();

    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
    assertEquals(8, writer.numDocs());
    assertEquals(10, writer.maxDoc());
    writer.forceMergeDeletes();
    assertEquals(8, writer.numDocs());
    writer.close();
    ir = DirectoryReader.open(dir);
    assertEquals(8, ir.maxDoc());
    assertEquals(8, ir.numDocs());
    ir.close();
    dir.close();
  }
  public void testRandom() throws IOException, InterruptedException {
    final int numThreads = 1 + random().nextInt(8);
    final int numDocumentsToIndex = 50 + atLeast(70);
    AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
    Directory dir = newDirectory();
    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
    MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy();
    iwc.setFlushPolicy(flushPolicy);

    final int numDWPT = 1 + random().nextInt(8);
    DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool(numDWPT);
    iwc.setIndexerThreadPool(threadPool);

    IndexWriter writer = new IndexWriter(dir, iwc);
    flushPolicy = (MockDefaultFlushPolicy) writer.getConfig().getFlushPolicy();
    DocumentsWriter docsWriter = writer.getDocsWriter();
    assertNotNull(docsWriter);
    DocumentsWriterFlushControl flushControl = docsWriter.flushControl;

    assertEquals(" bytes must be 0 after init", 0, flushControl.flushBytes());

    IndexThread[] threads = new IndexThread[numThreads];
    for (int x = 0; x < threads.length; x++) {
      threads[x] = new IndexThread(numDocs, numThreads, writer, lineDocFile, true);
      threads[x].start();
    }

    for (int x = 0; x < threads.length; x++) {
      threads[x].join();
    }
    assertEquals(" all flushes must be due", 0, flushControl.flushBytes());
    assertEquals(numDocumentsToIndex, writer.numDocs());
    assertEquals(numDocumentsToIndex, writer.maxDoc());
    if (flushPolicy.flushOnRAM()
        && !flushPolicy.flushOnDocCount()
        && !flushPolicy.flushOnDeleteTerms()) {
      final long maxRAMBytes = (long) (iwc.getRAMBufferSizeMB() * 1024. * 1024.);
      assertTrue(
          "peak bytes without flush exceeded watermark",
          flushPolicy.peakBytesWithoutFlush <= maxRAMBytes);
      if (flushPolicy.hasMarkedPending) {
        assertTrue(
            "max: " + maxRAMBytes + " " + flushControl.peakActiveBytes,
            maxRAMBytes <= flushControl.peakActiveBytes);
      }
    }
    assertActiveBytesAfter(flushControl);
    writer.commit();
    assertEquals(0, flushControl.activeBytes());
    IndexReader r = DirectoryReader.open(dir);
    assertEquals(numDocumentsToIndex, r.numDocs());
    assertEquals(numDocumentsToIndex, r.maxDoc());
    if (!flushPolicy.flushOnRAM()) {
      assertFalse(
          "never stall if we don't flush on RAM",
          docsWriter.flushControl.stallControl.wasStalled());
      assertFalse(
          "never block if we don't flush on RAM",
          docsWriter.flushControl.stallControl.hasBlocked());
    }
    r.close();
    writer.close();
    dir.close();
  }