private void crash(final IndexWriter writer) throws IOException {
   final MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
   ConcurrentMergeScheduler cms =
       (ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler();
   cms.sync();
   dir.crash();
   cms.sync();
   dir.clearCrash();
 }
Exemple #2
0
  @Override
  protected void tearDown() throws Exception {
    try {
      // this isn't as useful as calling directly from the scope where the
      // index readers are used, because they could be gc'ed just before
      // tearDown is called.
      // But it's better then nothing.
      assertSaneFieldCaches(getTestLabel());

      if (ConcurrentMergeScheduler.anyUnhandledExceptions()) {
        // Clear the failure so that we don't just keep
        // failing subsequent test cases
        ConcurrentMergeScheduler.clearUnhandledExceptions();
        fail("ConcurrentMergeScheduler hit unhandled exceptions");
      }
    } finally {
      purgeFieldCache(FieldCache.DEFAULT);
    }

    super.tearDown();
  }
Exemple #3
0
 @Override
 protected void setUp() throws Exception {
   super.setUp();
   ConcurrentMergeScheduler.setTestMode();
 }
  /*
   * Make sure IndexWriter cleans up on hitting a disk
   * full exception in addDocument.
   * TODO: how to do this on windows with FSDirectory?
   */
  public void testAddDocumentOnDiskFull() throws IOException {

    for (int pass = 0; pass < 2; pass++) {
      if (VERBOSE) {
        System.out.println("TEST: pass="******"TEST: cycle: diskFree=" + diskFree);
        }
        MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory());
        dir.setMaxSizeInBytes(diskFree);
        IndexWriter writer =
            new IndexWriter(
                dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
        writer.setInfoStream(VERBOSE ? System.out : null);
        MergeScheduler ms = writer.getConfig().getMergeScheduler();
        if (ms instanceof ConcurrentMergeScheduler) {
          // This test intentionally produces exceptions
          // in the threads that CMS launches; we don't
          // want to pollute test output with these.
          ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
        }

        boolean hitError = false;
        try {
          for (int i = 0; i < 200; i++) {
            addDoc(writer);
          }
          if (VERBOSE) {
            System.out.println("TEST: done adding docs; now commit");
          }
          writer.commit();
        } catch (IOException e) {
          if (VERBOSE) {
            System.out.println("TEST: exception on addDoc");
            e.printStackTrace(System.out);
          }
          hitError = true;
        }

        if (hitError) {
          if (doAbort) {
            if (VERBOSE) {
              System.out.println("TEST: now rollback");
            }
            writer.rollback();
          } else {
            try {
              if (VERBOSE) {
                System.out.println("TEST: now close");
              }
              writer.close();
            } catch (IOException e) {
              if (VERBOSE) {
                System.out.println("TEST: exception on close; retry w/ no disk space limit");
                e.printStackTrace(System.out);
              }
              dir.setMaxSizeInBytes(0);
              writer.close();
            }
          }

          // _TestUtil.syncConcurrentMerges(ms);

          if (_TestUtil.anyFilesExceptWriteLock(dir)) {
            assertNoUnreferencedFiles(dir, "after disk full during addDocument");

            // Make sure reader can open the index:
            IndexReader.open(dir, true).close();
          }

          dir.close();
          // Now try again w/ more space:

          diskFree +=
              TEST_NIGHTLY
                  ? _TestUtil.nextInt(random, 400, 600)
                  : _TestUtil.nextInt(random, 3000, 5000);
        } else {
          // _TestUtil.syncConcurrentMerges(writer);
          dir.setMaxSizeInBytes(0);
          writer.close();
          dir.close();
          break;
        }
      }
    }
  }
  /*
  Test: make sure when we run out of disk space or hit
  random IOExceptions in any of the addIndexes(*) calls
  that 1) index is not corrupt (searcher can open/search
  it) and 2) transactional semantics are followed:
  either all or none of the incoming documents were in
  fact added.
   */
  public void testAddIndexOnDiskFull() throws IOException {
    int START_COUNT = 57;
    int NUM_DIR = 50;
    int END_COUNT = START_COUNT + NUM_DIR * 25;

    // Build up a bunch of dirs that have indexes which we
    // will then merge together by calling addIndexes(*):
    Directory[] dirs = new Directory[NUM_DIR];
    long inputDiskUsage = 0;
    for (int i = 0; i < NUM_DIR; i++) {
      dirs[i] = newDirectory();
      IndexWriter writer =
          new IndexWriter(
              dirs[i], newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
      for (int j = 0; j < 25; j++) {
        addDocWithIndex(writer, 25 * i + j);
      }
      writer.close();
      String[] files = dirs[i].listAll();
      for (int j = 0; j < files.length; j++) {
        inputDiskUsage += dirs[i].fileLength(files[j]);
      }
    }

    // Now, build a starting index that has START_COUNT docs.  We
    // will then try to addIndexesNoOptimize into a copy of this:
    MockDirectoryWrapper startDir = newDirectory();
    IndexWriter writer =
        new IndexWriter(
            startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
    for (int j = 0; j < START_COUNT; j++) {
      addDocWithIndex(writer, j);
    }
    writer.close();

    // Make sure starting index seems to be working properly:
    Term searchTerm = new Term("content", "aaa");
    IndexReader reader = IndexReader.open(startDir, true);
    assertEquals("first docFreq", 57, reader.docFreq(searchTerm));

    IndexSearcher searcher = newSearcher(reader);
    ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("first number of hits", 57, hits.length);
    searcher.close();
    reader.close();

    // Iterate with larger and larger amounts of free
    // disk space.  With little free disk space,
    // addIndexes will certainly run out of space &
    // fail.  Verify that when this happens, index is
    // not corrupt and index in fact has added no
    // documents.  Then, we increase disk space by 2000
    // bytes each iteration.  At some point there is
    // enough free disk space and addIndexes should
    // succeed and index should show all documents were
    // added.

    // String[] files = startDir.listAll();
    long diskUsage = startDir.sizeInBytes();

    long startDiskUsage = 0;
    String[] files = startDir.listAll();
    for (int i = 0; i < files.length; i++) {
      startDiskUsage += startDir.fileLength(files[i]);
    }

    for (int iter = 0; iter < 3; iter++) {

      if (VERBOSE) System.out.println("TEST: iter=" + iter);

      // Start with 100 bytes more than we are currently using:
      long diskFree = diskUsage + _TestUtil.nextInt(random, 50, 200);

      int method = iter;

      boolean success = false;
      boolean done = false;

      String methodName;
      if (0 == method) {
        methodName = "addIndexes(Directory[]) + optimize()";
      } else if (1 == method) {
        methodName = "addIndexes(IndexReader[])";
      } else {
        methodName = "addIndexes(Directory[])";
      }

      while (!done) {
        if (VERBOSE) {
          System.out.println("TEST: cycle...");
        }

        // Make a new dir that will enforce disk usage:
        MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
        writer =
            new IndexWriter(
                dir,
                newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
                    .setOpenMode(OpenMode.APPEND)
                    .setMergePolicy(newLogMergePolicy()));
        IOException err = null;
        writer.setInfoStream(VERBOSE ? System.out : null);

        MergeScheduler ms = writer.getConfig().getMergeScheduler();
        for (int x = 0; x < 2; x++) {
          if (ms instanceof ConcurrentMergeScheduler)
            // This test intentionally produces exceptions
            // in the threads that CMS launches; we don't
            // want to pollute test output with these.
            if (0 == x) ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
            else ((ConcurrentMergeScheduler) ms).clearSuppressExceptions();

          // Two loops: first time, limit disk space &
          // throw random IOExceptions; second time, no
          // disk space limit:

          double rate = 0.05;
          double diskRatio = ((double) diskFree) / diskUsage;
          long thisDiskFree;

          String testName = null;

          if (0 == x) {
            thisDiskFree = diskFree;
            if (diskRatio >= 2.0) {
              rate /= 2;
            }
            if (diskRatio >= 4.0) {
              rate /= 2;
            }
            if (diskRatio >= 6.0) {
              rate = 0.0;
            }
            if (VERBOSE)
              testName =
                  "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
          } else {
            thisDiskFree = 0;
            rate = 0.0;
            if (VERBOSE) testName = "disk full test " + methodName + " with unlimited disk space";
          }

          if (VERBOSE) System.out.println("\ncycle: " + testName);

          dir.setTrackDiskUsage(true);
          dir.setMaxSizeInBytes(thisDiskFree);
          dir.setRandomIOExceptionRate(rate);

          try {

            if (0 == method) {
              writer.addIndexes(dirs);
              writer.optimize();
            } else if (1 == method) {
              IndexReader readers[] = new IndexReader[dirs.length];
              for (int i = 0; i < dirs.length; i++) {
                readers[i] = IndexReader.open(dirs[i], true);
              }
              try {
                writer.addIndexes(readers);
              } finally {
                for (int i = 0; i < dirs.length; i++) {
                  readers[i].close();
                }
              }
            } else {
              writer.addIndexes(dirs);
            }

            success = true;
            if (VERBOSE) {
              System.out.println("  success!");
            }

            if (0 == x) {
              done = true;
            }

          } catch (IOException e) {
            success = false;
            err = e;
            if (VERBOSE) {
              System.out.println("  hit IOException: " + e);
              e.printStackTrace(System.out);
            }

            if (1 == x) {
              e.printStackTrace(System.out);
              fail(methodName + " hit IOException after disk space was freed up");
            }
          }

          // Make sure all threads from
          // ConcurrentMergeScheduler are done
          _TestUtil.syncConcurrentMerges(writer);

          if (VERBOSE) {
            System.out.println("  now test readers");
          }

          // Finally, verify index is not corrupt, and, if
          // we succeeded, we see all docs added, and if we
          // failed, we see either all docs or no docs added
          // (transactional semantics):
          try {
            reader = IndexReader.open(dir, true);
          } catch (IOException e) {
            e.printStackTrace(System.out);
            fail(testName + ": exception when creating IndexReader: " + e);
          }
          int result = reader.docFreq(searchTerm);
          if (success) {
            if (result != START_COUNT) {
              fail(
                  testName
                      + ": method did not throw exception but docFreq('aaa') is "
                      + result
                      + " instead of expected "
                      + START_COUNT);
            }
          } else {
            // On hitting exception we still may have added
            // all docs:
            if (result != START_COUNT && result != END_COUNT) {
              err.printStackTrace(System.out);
              fail(
                  testName
                      + ": method did throw exception but docFreq('aaa') is "
                      + result
                      + " instead of expected "
                      + START_COUNT
                      + " or "
                      + END_COUNT);
            }
          }

          searcher = newSearcher(reader);
          try {
            hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
          } catch (IOException e) {
            e.printStackTrace(System.out);
            fail(testName + ": exception when searching: " + e);
          }
          int result2 = hits.length;
          if (success) {
            if (result2 != result) {
              fail(
                  testName
                      + ": method did not throw exception but hits.length for search on term 'aaa' is "
                      + result2
                      + " instead of expected "
                      + result);
            }
          } else {
            // On hitting exception we still may have added
            // all docs:
            if (result2 != result) {
              err.printStackTrace(System.out);
              fail(
                  testName
                      + ": method did throw exception but hits.length for search on term 'aaa' is "
                      + result2
                      + " instead of expected "
                      + result);
            }
          }

          searcher.close();
          reader.close();
          if (VERBOSE) {
            System.out.println("  count is " + result);
          }

          if (done || result == END_COUNT) {
            break;
          }
        }

        if (VERBOSE) {
          System.out.println(
              "  start disk = "
                  + startDiskUsage
                  + "; input disk = "
                  + inputDiskUsage
                  + "; max used = "
                  + dir.getMaxUsedSizeInBytes());
        }

        if (done) {
          // Javadocs state that temp free Directory space
          // required is at most 2X total input size of
          // indices so let's make sure:
          assertTrue(
              "max free Directory space required exceeded 1X the total input index sizes during "
                  + methodName
                  + ": max temp usage = "
                  + (dir.getMaxUsedSizeInBytes() - startDiskUsage)
                  + " bytes vs limit="
                  + (2 * (startDiskUsage + inputDiskUsage))
                  + "; starting disk usage = "
                  + startDiskUsage
                  + " bytes; "
                  + "input index disk usage = "
                  + inputDiskUsage
                  + " bytes",
              (dir.getMaxUsedSizeInBytes() - startDiskUsage)
                  < 2 * (startDiskUsage + inputDiskUsage));
        }

        // Make sure we don't hit disk full during close below:
        dir.setMaxSizeInBytes(0);
        dir.setRandomIOExceptionRate(0.0);

        writer.close();

        // Wait for all BG threads to finish else
        // dir.close() will throw IOException because
        // there are still open files
        _TestUtil.syncConcurrentMerges(ms);

        dir.close();

        // Try again with more free space:
        diskFree +=
            TEST_NIGHTLY
                ? _TestUtil.nextInt(random, 4000, 8000)
                : _TestUtil.nextInt(random, 40000, 80000);
      }
    }

    startDir.close();
    for (Directory dir : dirs) dir.close();
  }
  public void testLiveMaxMergeCount() throws Exception {
    Directory d = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
    TieredMergePolicy tmp = new TieredMergePolicy();
    tmp.setSegmentsPerTier(1000);
    tmp.setMaxMergeAtOnce(1000);
    tmp.setMaxMergeAtOnceExplicit(10);
    iwc.setMergePolicy(tmp);
    iwc.setMaxBufferedDocs(2);
    iwc.setRAMBufferSizeMB(-1);

    final AtomicInteger maxRunningMergeCount = new AtomicInteger();

    ConcurrentMergeScheduler cms =
        new ConcurrentMergeScheduler() {

          final AtomicInteger runningMergeCount = new AtomicInteger();

          @Override
          public void doMerge(MergePolicy.OneMerge merge) throws IOException {
            int count = runningMergeCount.incrementAndGet();
            // evil?
            synchronized (this) {
              if (count > maxRunningMergeCount.get()) {
                maxRunningMergeCount.set(count);
              }
            }
            try {
              super.doMerge(merge);
            } finally {
              runningMergeCount.decrementAndGet();
            }
          }
        };

    cms.setMaxMergesAndThreads(5, 3);

    iwc.setMergeScheduler(cms);

    IndexWriter w = new IndexWriter(d, iwc);
    // Makes 100 segments
    for (int i = 0; i < 200; i++) {
      w.addDocument(new Document());
    }

    // No merges should have run so far, because TMP has high segmentsPerTier:
    assertEquals(0, maxRunningMergeCount.get());

    w.forceMerge(1);

    // At most 5 merge threads should have launched at once:
    assertTrue("maxRunningMergeCount=" + maxRunningMergeCount, maxRunningMergeCount.get() <= 5);
    maxRunningMergeCount.set(0);

    // Makes another 100 segments
    for (int i = 0; i < 200; i++) {
      w.addDocument(new Document());
    }

    ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).setMaxMergesAndThreads(1, 1);
    w.forceMerge(1);

    // At most 1 merge thread should have launched at once:
    assertEquals(1, maxRunningMergeCount.get());

    w.close();
    d.close();
  }
 @Override
 public void doMerge(MergePolicy.OneMerge merge) throws IOException {
   totMergedBytes += merge.totalBytesSize();
   super.doMerge(merge);
 }
  // LUCENE-4544
  public void testMaxMergeCount() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));

    final int maxMergeCount = TestUtil.nextInt(random(), 1, 5);
    final int maxMergeThreads = TestUtil.nextInt(random(), 1, maxMergeCount);
    final CountDownLatch enoughMergesWaiting = new CountDownLatch(maxMergeCount);
    final AtomicInteger runningMergeCount = new AtomicInteger(0);
    final AtomicBoolean failed = new AtomicBoolean();

    if (VERBOSE) {
      System.out.println(
          "TEST: maxMergeCount=" + maxMergeCount + " maxMergeThreads=" + maxMergeThreads);
    }

    ConcurrentMergeScheduler cms =
        new ConcurrentMergeScheduler() {

          @Override
          protected void doMerge(MergePolicy.OneMerge merge) throws IOException {
            try {
              // Stall all incoming merges until we see
              // maxMergeCount:
              int count = runningMergeCount.incrementAndGet();
              try {
                assertTrue(
                    "count=" + count + " vs maxMergeCount=" + maxMergeCount,
                    count <= maxMergeCount);
                enoughMergesWaiting.countDown();

                // Stall this merge until we see exactly
                // maxMergeCount merges waiting
                while (true) {
                  if (enoughMergesWaiting.await(10, TimeUnit.MILLISECONDS) || failed.get()) {
                    break;
                  }
                }
                // Then sleep a bit to give a chance for the bug
                // (too many pending merges) to appear:
                Thread.sleep(20);
                super.doMerge(merge);
              } finally {
                runningMergeCount.decrementAndGet();
              }
            } catch (Throwable t) {
              failed.set(true);
              writer.mergeFinish(merge);
              throw new RuntimeException(t);
            }
          }
        };
    cms.setMaxMergesAndThreads(maxMergeCount, maxMergeThreads);
    iwc.setMergeScheduler(cms);
    iwc.setMaxBufferedDocs(2);

    TieredMergePolicy tmp = new TieredMergePolicy();
    iwc.setMergePolicy(tmp);
    tmp.setMaxMergeAtOnce(2);
    tmp.setSegmentsPerTier(2);

    IndexWriter w = new IndexWriter(dir, iwc);
    Document doc = new Document();
    doc.add(newField("field", "field", TextField.TYPE_NOT_STORED));
    while (enoughMergesWaiting.getCount() != 0 && !failed.get()) {
      for (int i = 0; i < 10; i++) {
        w.addDocument(doc);
      }
    }
    w.close(false);
    dir.close();
  }