public void testMaxBufferedDeletes() throws IOException {
   Directory dir = new MockRAMDirectory();
   IndexWriter writer =
       new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
   writer.setMaxBufferedDeleteTerms(1);
   writer.deleteDocuments(new Term("foobar", "1"));
   writer.deleteDocuments(new Term("foobar", "1"));
   writer.deleteDocuments(new Term("foobar", "1"));
   assertEquals(3, writer.getFlushDeletesCount());
   writer.close();
   dir.close();
 }
  /**
   * static boolean hasPendingDeletes(SegmentInfos infos) { for (SegmentInfo info : infos) { if
   * (info.deletes.any()) { return true; } } return false; }
   */
  void part2(IndexWriter writer, RangeMergePolicy fsmp) throws Exception {
    for (int x = 20; x < 25; x++) {
      writer.addDocument(DocHelper.createDocument(x, "5", 2));
      // System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
    }
    writer.flush(false, false);
    for (int x = 25; x < 30; x++) {
      writer.addDocument(DocHelper.createDocument(x, "5", 2));
      // System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
    }
    writer.flush(false, false);

    // System.out.println("infos3:"+writer.segmentInfos);

    Term delterm = new Term("id", "8");
    writer.deleteDocuments(delterm);
    // System.out.println("segdels3:" + writer.docWriter.deletesToString());

    fsmp.doMerge = true;
    fsmp.start = 1;
    fsmp.length = 2;
    writer.maybeMerge();

    // deletes for info1, the newly created segment from the
    // merge should have no deletes because they were applied in
    // the merge
    // SegmentInfo info1 = writer.segmentInfos.info(1);
    // assertFalse(exists(info1, writer.docWriter.segmentDeletes));

    // System.out.println("infos4:"+writer.segmentInfos);
    // System.out.println("segdels4:" + writer.docWriter.deletesToString());
  }
  public void testTotalBytesSize() throws Exception {
    Directory d = newDirectory();
    if (d instanceof MockDirectoryWrapper) {
      ((MockDirectoryWrapper) d).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
    }
    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
    iwc.setMaxBufferedDocs(5);
    iwc.setMergeScheduler(new TrackingCMS());
    if (TestUtil.getPostingsFormat("id").equals("SimpleText")) {
      // no
      iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
    }
    IndexWriter w = new IndexWriter(d, iwc);
    for (int i = 0; i < 1000; i++) {
      Document doc = new Document();
      doc.add(new StringField("id", "" + i, Field.Store.NO));
      w.addDocument(doc);

      if (random().nextBoolean()) {
        w.deleteDocuments(new Term("id", "" + random().nextInt(i + 1)));
      }
    }
    assertTrue(((TrackingCMS) w.getConfig().getMergeScheduler()).totMergedBytes != 0);
    w.close();
    d.close();
  }
  // test the simple case
  public void testSimpleCase() throws IOException {
    String[] keywords = {"1", "2"};
    String[] unindexed = {"Netherlands", "Italy"};
    String[] unstored = {"Amsterdam has lots of bridges", "Venice has lots of canals"};
    String[] text = {"Amsterdam", "Venice"};

    Directory dir = new MockRAMDirectory();
    IndexWriter modifier =
        new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
    modifier.setUseCompoundFile(true);
    modifier.setMaxBufferedDeleteTerms(1);

    for (int i = 0; i < keywords.length; i++) {
      Document doc = new Document();
      doc.add(new Field("id", keywords[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
      doc.add(new Field("country", unindexed[i], Field.Store.YES, Field.Index.NO));
      doc.add(new Field("contents", unstored[i], Field.Store.NO, Field.Index.ANALYZED));
      doc.add(new Field("city", text[i], Field.Store.YES, Field.Index.ANALYZED));
      modifier.addDocument(doc);
    }
    modifier.optimize();
    modifier.commit();

    Term term = new Term("city", "Amsterdam");
    int hitCount = getHitCount(dir, term);
    assertEquals(1, hitCount);
    modifier.deleteDocuments(term);
    modifier.commit();
    hitCount = getHitCount(dir, term);
    assertEquals(0, hitCount);

    modifier.close();
    dir.close();
  }
Beispiel #5
0
  // LUCENE-2996: tests that addIndexes(IndexReader) applies existing deletes correctly.
  public void testExistingDeletes() throws Exception {
    Directory[] dirs = new Directory[2];
    for (int i = 0; i < dirs.length; i++) {
      dirs[i] = newDirectory();
      IndexWriterConfig conf =
          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
      IndexWriter writer = new IndexWriter(dirs[i], conf);
      Document doc = new Document();
      doc.add(new StringField("id", "myid", Field.Store.NO));
      writer.addDocument(doc);
      writer.close();
    }

    IndexWriterConfig conf =
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
    IndexWriter writer = new IndexWriter(dirs[0], conf);

    // Now delete the document
    writer.deleteDocuments(new Term("id", "myid"));
    IndexReader r = DirectoryReader.open(dirs[1]);
    try {
      writer.addIndexes(r);
    } finally {
      r.close();
    }
    writer.commit();
    assertEquals(
        "Documents from the incoming index should not have been deleted", 1, writer.numDocs());
    writer.close();

    for (Directory dir : dirs) {
      dir.close();
    }
  }
Beispiel #6
0
  public void testBackToTheFuture() throws Exception {
    Directory dir = newDirectory();
    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null));

    Document doc = new Document();
    doc.add(newStringField("foo", "bar", Field.Store.NO));
    iw.addDocument(doc);

    doc = new Document();
    doc.add(newStringField("foo", "baz", Field.Store.NO));
    iw.addDocument(doc);

    DirectoryReader r1 = DirectoryReader.open(iw, true);

    iw.deleteDocuments(new Term("foo", "baz"));
    DirectoryReader r2 = DirectoryReader.open(iw, true);

    FieldCache.DEFAULT.getDocTermOrds(getOnlySegmentReader(r2), "foo");

    SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(getOnlySegmentReader(r1), "foo");
    assertEquals(2, v.getValueCount());
    v.setDocument(1);
    assertEquals(1, v.nextOrd());

    iw.close();
    r1.close();
    r2.close();
    dir.close();
  }
Beispiel #7
0
  /**
   * Remove a stale file (uidIter.term().text()) from the index database (and the xref file)
   *
   * @throws java.io.IOException if an error occurs
   */
  private void removeFile() throws IOException {
    String path = Util.uid2url(uidIter.term().utf8ToString());

    for (IndexChangedListener listener : listeners) {
      listener.fileRemove(path);
    }
    writer.deleteDocuments(new Term(QueryBuilder.U, uidIter.term()));
    writer.prepareCommit();
    writer.commit();

    File xrefFile;
    if (RuntimeEnvironment.getInstance().isCompressXref()) {
      xrefFile = new File(xrefDir, path + ".gz");
    } else {
      xrefFile = new File(xrefDir, path);
    }
    File parent = xrefFile.getParentFile();

    if (!xrefFile.delete() && xrefFile.exists()) {
      log.log(Level.INFO, "Failed to remove obsolete xref-file: {0}", xrefFile.getAbsolutePath());
    }

    // Remove the parent directory if it's empty
    if (parent.delete()) {
      log.log(Level.FINE, "Removed empty xref dir:{0}", parent.getAbsolutePath());
    }
    setDirty();
    for (IndexChangedListener listener : listeners) {
      listener.fileRemoved(path);
    }
  }
  // test when delete terms only apply to disk segments
  public void testNonRAMDelete() throws IOException {

    Directory dir = new MockRAMDirectory();
    IndexWriter modifier =
        new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
    modifier.setMaxBufferedDocs(2);
    modifier.setMaxBufferedDeleteTerms(2);

    int id = 0;
    int value = 100;

    for (int i = 0; i < 7; i++) {
      addDoc(modifier, ++id, value);
    }
    modifier.commit();

    assertEquals(0, modifier.getNumBufferedDocuments());
    assertTrue(0 < modifier.getSegmentCount());

    modifier.commit();

    IndexReader reader = IndexReader.open(dir, true);
    assertEquals(7, reader.numDocs());
    reader.close();

    modifier.deleteDocuments(new Term("value", String.valueOf(value)));

    modifier.commit();

    reader = IndexReader.open(dir, true);
    assertEquals(0, reader.numDocs());
    reader.close();
    modifier.close();
    dir.close();
  }
  // LUCENE-325: test forceMergeDeletes without waiting, when
  // many adjacent merges are required
  public void testForceMergeDeletes3() throws IOException {
    Directory dir = newDirectory();
    IndexWriter writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(new MockAnalyzer(random()))
                .setMaxBufferedDocs(2)
                .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                .setMergePolicy(newLogMergePolicy(50)));

    FieldType customType = new FieldType();
    customType.setStored(true);

    FieldType customType1 = new FieldType(TextField.TYPE_NOT_STORED);
    customType1.setTokenized(false);
    customType1.setStoreTermVectors(true);
    customType1.setStoreTermVectorPositions(true);
    customType1.setStoreTermVectorOffsets(true);

    Document document = new Document();
    Field storedField = newField("stored", "stored", customType);
    document.add(storedField);
    Field termVectorField = newField("termVector", "termVector", customType1);
    document.add(termVectorField);
    Field idField = newStringField("id", "", Field.Store.NO);
    document.add(idField);
    for (int i = 0; i < 98; i++) {
      idField.setStringValue("" + i);
      writer.addDocument(document);
    }
    writer.close();

    IndexReader ir = DirectoryReader.open(dir);
    assertEquals(98, ir.maxDoc());
    assertEquals(98, ir.numDocs());
    ir.close();

    IndexWriterConfig dontMergeConfig =
        new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE);
    writer = new IndexWriter(dir, dontMergeConfig);
    for (int i = 0; i < 98; i += 2) {
      writer.deleteDocuments(new Term("id", "" + i));
    }
    writer.close();
    ir = DirectoryReader.open(dir);
    assertEquals(49, ir.numDocs());
    ir.close();

    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(3)));
    writer.forceMergeDeletes(false);
    writer.close();
    ir = DirectoryReader.open(dir);
    assertEquals(49, ir.maxDoc());
    assertEquals(49, ir.numDocs());
    ir.close();
    dir.close();
  }
 public void deleteByQuery() throws IOException {
   String idString = getIdString();
   if (VERBOSE) {
     System.out.println(Thread.currentThread().getName() + ": del query id:" + idString);
   }
   w.deleteDocuments(new TermQuery(new Term("id", idString)));
   docs.remove(idString);
 }
  // test that batched delete terms are flushed together
  public void testBatchDeletes() throws IOException {
    Directory dir = new MockRAMDirectory();
    IndexWriter modifier =
        new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
    modifier.setMaxBufferedDocs(2);
    modifier.setMaxBufferedDeleteTerms(2);

    int id = 0;
    int value = 100;

    for (int i = 0; i < 7; i++) {
      addDoc(modifier, ++id, value);
    }
    modifier.commit();

    IndexReader reader = IndexReader.open(dir, true);
    assertEquals(7, reader.numDocs());
    reader.close();

    id = 0;
    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
    modifier.deleteDocuments(new Term("id", String.valueOf(++id)));

    modifier.commit();

    reader = IndexReader.open(dir, true);
    assertEquals(5, reader.numDocs());
    reader.close();

    Term[] terms = new Term[3];
    for (int i = 0; i < terms.length; i++) {
      terms[i] = new Term("id", String.valueOf(++id));
    }
    modifier.deleteDocuments(terms);
    modifier.commit();
    reader = IndexReader.open(dir, true);
    assertEquals(2, reader.numDocs());
    reader.close();

    modifier.close();
    dir.close();
  }
 /** {@inheritDoc} */
 public void pageRemoved(WikiPage page) {
   IndexWriter writer = null;
   try {
     Directory luceneDir = new SimpleFSDirectory(new File(m_luceneDirectory), null);
     writer = getIndexWriter(luceneDir);
     Query query = new TermQuery(new Term(LUCENE_ID, page.getName()));
     writer.deleteDocuments(query);
   } catch (Exception e) {
     log.error("Unable to remove page '" + page.getName() + "' from Lucene index", e);
   } finally {
     close(writer);
   }
 }
  // test when delete terms only apply to ram segments
  public void testRAMDeletes() throws IOException {
    for (int t = 0; t < 2; t++) {
      Directory dir = new MockRAMDirectory();
      IndexWriter modifier =
          new IndexWriter(
              dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
      modifier.setMaxBufferedDocs(4);
      modifier.setMaxBufferedDeleteTerms(4);

      int id = 0;
      int value = 100;

      addDoc(modifier, ++id, value);
      if (0 == t) modifier.deleteDocuments(new Term("value", String.valueOf(value)));
      else modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));
      addDoc(modifier, ++id, value);
      if (0 == t) {
        modifier.deleteDocuments(new Term("value", String.valueOf(value)));
        assertEquals(2, modifier.getNumBufferedDeleteTerms());
        assertEquals(1, modifier.getBufferedDeleteTermsSize());
      } else modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value))));

      addDoc(modifier, ++id, value);
      assertEquals(0, modifier.getSegmentCount());
      modifier.commit();

      modifier.commit();

      IndexReader reader = IndexReader.open(dir, true);
      assertEquals(1, reader.numDocs());

      int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
      assertEquals(1, hitCount);
      reader.close();
      modifier.close();
      dir.close();
    }
  }
  public void testNoWaitClose() throws IOException {
    Directory directory = newDirectory();
    Document doc = new Document();
    Field idField = newStringField("id", "", Field.Store.YES);
    doc.add(idField);

    IndexWriter writer =
        new IndexWriter(
            directory,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(2)
                .setMergePolicy(newLogMergePolicy(100)));

    for (int iter = 0; iter < 10; iter++) {

      for (int j = 0; j < 201; j++) {
        idField.setStringValue(Integer.toString(iter * 201 + j));
        writer.addDocument(doc);
      }

      int delID = iter * 201;
      for (int j = 0; j < 20; j++) {
        writer.deleteDocuments(new Term("id", Integer.toString(delID)));
        delID += 5;
      }

      // Force a bunch of merge threads to kick off so we
      // stress out aborting them on close:
      ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
      writer.addDocument(doc);
      writer.commit();

      writer.close(false);

      IndexReader reader = DirectoryReader.open(directory);
      assertEquals((1 + iter) * 182, reader.numDocs());
      reader.close();

      // Reopen
      writer =
          new IndexWriter(
              directory,
              newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                  .setOpenMode(OpenMode.APPEND)
                  .setMergePolicy(newLogMergePolicy(100)));
    }
    writer.close();

    directory.close();
  }
  // Test that deletes committed after a merge started and
  // before it finishes, are correctly merged back:
  public void testDeleteMerging() throws IOException {
    Directory directory = newDirectory();

    LogDocMergePolicy mp = new LogDocMergePolicy();
    // Force degenerate merging so we can get a mix of
    // merging of segments with and without deletes at the
    // start:
    mp.setMinMergeDocs(1000);
    IndexWriter writer =
        new IndexWriter(
            directory,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMergePolicy(mp));

    Document doc = new Document();
    Field idField = newStringField("id", "", Field.Store.YES);
    doc.add(idField);
    for (int i = 0; i < 10; i++) {
      if (VERBOSE) {
        System.out.println("\nTEST: cycle");
      }
      for (int j = 0; j < 100; j++) {
        idField.setStringValue(Integer.toString(i * 100 + j));
        writer.addDocument(doc);
      }

      int delID = i;
      while (delID < 100 * (1 + i)) {
        if (VERBOSE) {
          System.out.println("TEST: del " + delID);
        }
        writer.deleteDocuments(new Term("id", "" + delID));
        delID += 10;
      }

      writer.commit();
    }

    writer.close();
    IndexReader reader = DirectoryReader.open(directory);
    // Verify that we did not lose any deletes...
    assertEquals(450, reader.numDocs());
    reader.close();
    directory.close();
  }
  public static void deleteDocuments(String searchEngineId, long companyId, Collection<String> uids)
      throws SearchException {

    if (isIndexReadOnly() || (uids == null) || uids.isEmpty()) {
      return;
    }

    SearchEngine searchEngine = getSearchEngine(searchEngineId);

    IndexWriter indexWriter = searchEngine.getIndexWriter();

    SearchContext searchContext = new SearchContext();

    searchContext.setCompanyId(companyId);
    searchContext.setSearchEngineId(searchEngineId);

    indexWriter.deleteDocuments(searchContext, uids);
  }
    public void update(IndexWriter writer) throws IOException {
      // Add 10 docs:
      FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
      customType.setStoreTermVectors(true);
      for (int j = 0; j < 10; j++) {
        Document d = new Document();
        int n = random().nextInt();
        d.add(newField("id", Integer.toString(nextID++), customType));
        d.add(newTextField("contents", English.intToEnglish(n), Field.Store.NO));
        writer.addDocument(d);
      }

      // Delete 5 docs:
      int deleteID = nextID - 1;
      for (int j = 0; j < 5; j++) {
        writer.deleteDocuments(new Term("id", "" + deleteID));
        deleteID -= 2;
      }
    }
  private static void deleteFromFeature(
      UUID pictureId, Term term, String prefix, IndexWriterConfig conf) throws IOException {

    File file = getPath(prefix);

    // Creating an Lucene IndexWriter
    log.debug("Is Lucene configured: " + (conf == null));
    if (conf == null) {
      conf =
          new IndexWriterConfig(
              LuceneUtils.LUCENE_VERSION, new WhitespaceAnalyzer(LuceneUtils.LUCENE_VERSION));
      conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    }
    IndexWriter iw = new IndexWriter(FSDirectory.open(file), conf);

    iw.deleteDocuments(term);

    iw.close();
  }
  // LUCENE-2593
  public void testCorruptionAfterDiskFullDuringMerge() throws IOException {
    MockDirectoryWrapper dir = newDirectory();
    // IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new
    // MockAnalyzer(random)).setReaderPooling(true));
    IndexWriter w =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
                .setMergeScheduler(new SerialMergeScheduler())
                .setReaderPooling(true)
                .setMergePolicy(newLogMergePolicy(2)));

    _TestUtil.keepFullyDeletedSegments(w);

    ((LogMergePolicy) w.getMergePolicy()).setMergeFactor(2);

    Document doc = new Document();
    doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
    w.addDocument(doc);
    w.commit();

    w.deleteDocuments(new Term("f", "who"));
    w.addDocument(doc);

    // disk fills up!
    FailTwiceDuringMerge ftdm = new FailTwiceDuringMerge();
    ftdm.setDoFail();
    dir.failOn(ftdm);

    try {
      w.commit();
      fail("fake disk full IOExceptions not hit");
    } catch (IOException ioe) {
      // expected
      assertTrue(ftdm.didFail1 || ftdm.didFail2);
    }
    _TestUtil.checkIndex(dir);
    ftdm.clearDoFail();
    w.addDocument(doc);
    w.close();

    dir.close();
  }
Beispiel #20
0
  public void testWithPendingDeletes3() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // auxiliary directory
    Directory aux = newDirectory();

    setUpDirs(dir, aux);
    IndexWriter writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND));

    // Adds 10 docs, then replaces them with another 10
    // docs, so 10 pending deletes:
    for (int i = 0; i < 20; i++) {
      Document doc = new Document();
      doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
      doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
      writer.updateDocument(new Term("id", "" + (i % 10)), doc);
    }

    // Deletes one of the 10 added docs, leaving 9:
    PhraseQuery q = new PhraseQuery();
    q.add(new Term("content", "bbb"));
    q.add(new Term("content", "14"));
    writer.deleteDocuments(q);

    writer.addIndexes(aux);

    writer.forceMerge(1);
    writer.commit();

    verifyNumDocs(dir, 1039);
    verifyTermDocs(dir, new Term("content", "aaa"), 1030);
    verifyTermDocs(dir, new Term("content", "bbb"), 9);

    writer.close();
    dir.close();
    aux.close();
  }
Beispiel #21
0
  // case 4: tail segments, invariants hold, copy, invariants not hold
  public void testMergeAfterCopy() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // auxiliary directory
    Directory aux = newDirectory();

    setUpDirs(dir, aux, true);

    IndexWriterConfig dontMergeConfig =
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(NoMergePolicy.COMPOUND_FILES);
    @SuppressWarnings("resource")
    IndexWriter writer = new IndexWriter(aux, dontMergeConfig);
    for (int i = 0; i < 20; i++) {
      writer.deleteDocuments(new Term("id", "" + i));
    }
    writer.close();
    IndexReader reader = DirectoryReader.open(aux);
    assertEquals(10, reader.numDocs());
    reader.close();

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(4)
                .setMergePolicy(newLogMergePolicy(4)));

    if (VERBOSE) {
      System.out.println("\nTEST: now addIndexes");
    }
    writer.addIndexes(
        aux, new MockDirectoryWrapper(random(), new RAMDirectory(aux, newIOContext(random()))));
    assertEquals(1020, writer.maxDoc());
    assertEquals(1000, writer.getDocCount(0));
    writer.close();
    dir.close();
    aux.close();
  }
  public void testDeletes1() throws Exception {
    // IndexWriter.debug2 = System.out;
    Directory dir = new MockDirectoryWrapper(new Random(random().nextLong()), new RAMDirectory());
    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
    iwc.setMergeScheduler(new SerialMergeScheduler());
    iwc.setMaxBufferedDocs(5000);
    iwc.setRAMBufferSizeMB(100);
    RangeMergePolicy fsmp = new RangeMergePolicy(false);
    iwc.setMergePolicy(fsmp);
    IndexWriter writer = new IndexWriter(dir, iwc);
    for (int x = 0; x < 5; x++) {
      writer.addDocument(DocHelper.createDocument(x, "1", 2));
      // System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
    }
    // System.out.println("commit1");
    writer.commit();
    assertEquals(1, writer.segmentInfos.size());
    for (int x = 5; x < 10; x++) {
      writer.addDocument(DocHelper.createDocument(x, "2", 2));
      // System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
    }
    // System.out.println("commit2");
    writer.commit();
    assertEquals(2, writer.segmentInfos.size());

    for (int x = 10; x < 15; x++) {
      writer.addDocument(DocHelper.createDocument(x, "3", 2));
      // System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
    }

    writer.deleteDocuments(new Term("id", "1"));

    writer.deleteDocuments(new Term("id", "11"));

    // flushing without applying deletes means
    // there will still be deletes in the segment infos
    writer.flush(false, false);
    assertTrue(writer.bufferedUpdatesStream.any());

    // get reader flushes pending deletes
    // so there should not be anymore
    IndexReader r1 = writer.getReader();
    assertFalse(writer.bufferedUpdatesStream.any());
    r1.close();

    // delete id:2 from the first segment
    // merge segments 0 and 1
    // which should apply the delete id:2
    writer.deleteDocuments(new Term("id", "2"));
    writer.flush(false, false);
    fsmp = (RangeMergePolicy) writer.getConfig().getMergePolicy();
    fsmp.doMerge = true;
    fsmp.start = 0;
    fsmp.length = 2;
    writer.maybeMerge();

    assertEquals(2, writer.segmentInfos.size());

    // id:2 shouldn't exist anymore because
    // it's been applied in the merge and now it's gone
    IndexReader r2 = writer.getReader();
    int[] id2docs = toDocsArray(new Term("id", "2"), null, r2);
    assertTrue(id2docs == null);
    r2.close();

    /**
     * // added docs are in the ram buffer for (int x = 15; x < 20; x++) {
     * writer.addDocument(TestIndexWriterReader.createDocument(x, "4", 2));
     * System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs()); }
     * assertTrue(writer.numRamDocs() > 0); // delete from the ram buffer writer.deleteDocuments(new
     * Term("id", Integer.toString(13)));
     *
     * <p>Term id3 = new Term("id", Integer.toString(3));
     *
     * <p>// delete from the 1st segment writer.deleteDocuments(id3);
     *
     * <p>assertTrue(writer.numRamDocs() > 0);
     *
     * <p>//System.out // .println("segdels1:" + writer.docWriter.deletesToString());
     *
     * <p>//assertTrue(writer.docWriter.segmentDeletes.size() > 0);
     *
     * <p>// we cause a merge to happen fsmp.doMerge = true; fsmp.start = 0; fsmp.length = 2;
     * System.out.println("maybeMerge "+writer.segmentInfos);
     *
     * <p>SegmentInfo info0 = writer.segmentInfos.info(0); SegmentInfo info1 =
     * writer.segmentInfos.info(1);
     *
     * <p>writer.maybeMerge(); System.out.println("maybeMerge after "+writer.segmentInfos); // there
     * should be docs in RAM assertTrue(writer.numRamDocs() > 0);
     *
     * <p>// assert we've merged the 1 and 2 segments // and still have a segment leftover == 2
     * assertEquals(2, writer.segmentInfos.size()); assertFalse(segThere(info0,
     * writer.segmentInfos)); assertFalse(segThere(info1, writer.segmentInfos));
     *
     * <p>//System.out.println("segdels2:" + writer.docWriter.deletesToString());
     *
     * <p>//assertTrue(writer.docWriter.segmentDeletes.size() > 0);
     *
     * <p>IndexReader r = writer.getReader(); IndexReader r1 = r.getSequentialSubReaders()[0];
     * printDelDocs(r1.getLiveDocs()); int[] docs = toDocsArray(id3, null, r);
     * System.out.println("id3 docs:"+Arrays.toString(docs)); // there shouldn't be any docs for
     * id:3 assertTrue(docs == null); r.close();
     *
     * <p>part2(writer, fsmp);
     */
    // System.out.println("segdels2:"+writer.docWriter.segmentDeletes.toString());
    // System.out.println("close");
    writer.close();
    dir.close();
  }
  /*
   * Test a deletion policy that keeps last N commits
   * around, through creates.
   */
  public void testKeepLastNDeletionPolicyWithCreates() throws IOException {

    final int N = 10;

    for (int pass = 0; pass < 2; pass++) {

      boolean useCompoundFile = (pass % 2) != 0;

      Directory dir = newDirectory();
      if (dir instanceof MockDirectoryWrapper) {
        // test manually deletes files
        ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
      }
      IndexWriterConfig conf =
          newIndexWriterConfig(new MockAnalyzer(random()))
              .setOpenMode(OpenMode.CREATE)
              .setIndexDeletionPolicy(new KeepLastNDeletionPolicy(N))
              .setMaxBufferedDocs(10);
      MergePolicy mp = conf.getMergePolicy();
      mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
      IndexWriter writer = new IndexWriter(dir, conf);
      KeepLastNDeletionPolicy policy =
          (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
      writer.close();
      Term searchTerm = new Term("content", "aaa");
      Query query = new TermQuery(searchTerm);

      for (int i = 0; i < N + 1; i++) {

        conf =
            newIndexWriterConfig(new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setIndexDeletionPolicy(policy)
                .setMaxBufferedDocs(10);
        mp = conf.getMergePolicy();
        mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
        writer = new IndexWriter(dir, conf);
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        for (int j = 0; j < 17; j++) {
          addDocWithID(writer, i * (N + 1) + j);
        }
        // this is a commit
        writer.close();
        conf =
            new IndexWriterConfig(new MockAnalyzer(random()))
                .setIndexDeletionPolicy(policy)
                .setMergePolicy(NoMergePolicy.INSTANCE);
        writer = new IndexWriter(dir, conf);
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        writer.deleteDocuments(new Term("id", "" + (i * (N + 1) + 3)));
        // this is a commit
        writer.close();
        IndexReader reader = DirectoryReader.open(dir);
        IndexSearcher searcher = newSearcher(reader);
        ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
        assertEquals(16, hits.length);
        reader.close();

        writer =
            new IndexWriter(
                dir,
                newIndexWriterConfig(new MockAnalyzer(random()))
                    .setOpenMode(OpenMode.CREATE)
                    .setIndexDeletionPolicy(policy));
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        // This will not commit: there are no changes
        // pending because we opened for "create":
        writer.close();
      }

      assertEquals(3 * (N + 1) + 1, policy.numOnInit);
      assertEquals(3 * (N + 1) + 1, policy.numOnCommit);

      IndexReader rwReader = DirectoryReader.open(dir);
      IndexSearcher searcher = newSearcher(rwReader);
      ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
      assertEquals(0, hits.length);

      // Simplistic check: just verify only the past N segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getLastCommitGeneration(dir);

      int expectedCount = 0;

      rwReader.close();

      for (int i = 0; i < N + 1; i++) {
        try {
          IndexReader reader = DirectoryReader.open(dir);

          // Work backwards in commits on what the expected
          // count should be.
          searcher = newSearcher(reader);
          hits = searcher.search(query, 1000).scoreDocs;
          assertEquals(expectedCount, hits.length);
          if (expectedCount == 0) {
            expectedCount = 16;
          } else if (expectedCount == 16) {
            expectedCount = 17;
          } else if (expectedCount == 17) {
            expectedCount = 0;
          }
          reader.close();
          if (i == N) {
            fail("should have failed on commits before last " + N);
          }
        } catch (IOException e) {
          if (i != N) {
            throw e;
          }
        }
        if (i < N) {
          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        }
        gen--;
      }

      dir.close();
    }
  }
  public void testRandom() throws Exception {

    int num = atLeast(2);
    for (int iter = 0; iter < num; iter++) {
      if (VERBOSE) {
        System.out.println("TEST: iter=" + iter);
      }

      Directory dir = newDirectory();

      IndexWriter w =
          new IndexWriter(
              dir,
              newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                  .setMergePolicy(NoMergePolicy.COMPOUND_FILES));
      _TestUtil.keepFullyDeletedSegments(w);

      Map<BytesRef, List<Integer>> docs = new HashMap<BytesRef, List<Integer>>();
      Set<Integer> deleted = new HashSet<Integer>();
      List<BytesRef> terms = new ArrayList<BytesRef>();

      int numDocs = _TestUtil.nextInt(random(), 1, 100 * RANDOM_MULTIPLIER);
      Document doc = new Document();
      Field f = newStringField("field", "", Field.Store.NO);
      doc.add(f);
      Field id = newStringField("id", "", Field.Store.NO);
      doc.add(id);

      boolean onlyUniqueTerms = random().nextBoolean();
      if (VERBOSE) {
        System.out.println("TEST: onlyUniqueTerms=" + onlyUniqueTerms + " numDocs=" + numDocs);
      }
      Set<BytesRef> uniqueTerms = new HashSet<BytesRef>();
      for (int i = 0; i < numDocs; i++) {

        if (!onlyUniqueTerms && random().nextBoolean() && terms.size() > 0) {
          // re-use existing term
          BytesRef term = terms.get(random().nextInt(terms.size()));
          docs.get(term).add(i);
          f.setStringValue(term.utf8ToString());
        } else {
          String s = _TestUtil.randomUnicodeString(random(), 10);
          BytesRef term = new BytesRef(s);
          if (!docs.containsKey(term)) {
            docs.put(term, new ArrayList<Integer>());
          }
          docs.get(term).add(i);
          terms.add(term);
          uniqueTerms.add(term);
          f.setStringValue(s);
        }
        id.setStringValue("" + i);
        w.addDocument(doc);
        if (random().nextInt(4) == 1) {
          w.commit();
        }
        if (i > 0 && random().nextInt(20) == 1) {
          int delID = random().nextInt(i);
          deleted.add(delID);
          w.deleteDocuments(new Term("id", "" + delID));
          if (VERBOSE) {
            System.out.println("TEST: delete " + delID);
          }
        }
      }

      if (VERBOSE) {
        List<BytesRef> termsList = new ArrayList<BytesRef>(uniqueTerms);
        Collections.sort(termsList, BytesRef.getUTF8SortedAsUTF16Comparator());
        System.out.println("TEST: terms in UTF16 order:");
        for (BytesRef b : termsList) {
          System.out.println("  " + UnicodeUtil.toHexString(b.utf8ToString()) + " " + b);
          for (int docID : docs.get(b)) {
            if (deleted.contains(docID)) {
              System.out.println("    " + docID + " (deleted)");
            } else {
              System.out.println("    " + docID);
            }
          }
        }
      }

      IndexReader reader = w.getReader();
      w.close();
      if (VERBOSE) {
        System.out.println("TEST: reader=" + reader);
      }

      Bits liveDocs = MultiFields.getLiveDocs(reader);
      for (int delDoc : deleted) {
        assertFalse(liveDocs.get(delDoc));
      }

      for (int i = 0; i < 100; i++) {
        BytesRef term = terms.get(random().nextInt(terms.size()));
        if (VERBOSE) {
          System.out.println(
              "TEST: seek term=" + UnicodeUtil.toHexString(term.utf8ToString()) + " " + term);
        }

        DocsEnum docsEnum = _TestUtil.docs(random(), reader, "field", term, liveDocs, null, 0);
        assertNotNull(docsEnum);

        for (int docID : docs.get(term)) {
          if (!deleted.contains(docID)) {
            assertEquals(docID, docsEnum.nextDoc());
          }
        }
        assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
      }

      reader.close();
      dir.close();
    }
  }
Beispiel #25
0
  @Test
  public void testRollingUpdates() throws Exception {
    Random random = new Random(random().nextLong());
    final BaseDirectoryWrapper dir = newDirectory();
    // test checks for no unref'ed files with the IW helper method, which isn't aware of "tried to
    // delete files"
    if (dir instanceof MockDirectoryWrapper) {
      ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
    }

    final LineFileDocs docs = new LineFileDocs(random, true);

    // provider.register(new MemoryCodec());
    if (random().nextBoolean()) {
      Codec.setDefault(
          TestUtil.alwaysPostingsFormat(
              new MemoryPostingsFormat(random().nextBoolean(), random.nextFloat())));
    }

    MockAnalyzer analyzer = new MockAnalyzer(random());
    analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH));

    final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
    final int SIZE = atLeast(20);
    int id = 0;
    IndexReader r = null;
    IndexSearcher s = null;
    final int numUpdates =
        (int)
            (SIZE * (2 + (TEST_NIGHTLY ? 200 * random().nextDouble() : 5 * random().nextDouble())));
    if (VERBOSE) {
      System.out.println("TEST: numUpdates=" + numUpdates);
    }
    int updateCount = 0;
    // TODO: sometimes update ids not in order...
    for (int docIter = 0; docIter < numUpdates; docIter++) {
      final Document doc = docs.nextDoc();
      final String myID = Integer.toString(id);
      if (id == SIZE - 1) {
        id = 0;
      } else {
        id++;
      }
      if (VERBOSE) {
        System.out.println("  docIter=" + docIter + " id=" + id);
      }
      ((Field) doc.getField("docid")).setStringValue(myID);

      Term idTerm = new Term("docid", myID);

      final boolean doUpdate;
      if (s != null && updateCount < SIZE) {
        TopDocs hits = s.search(new TermQuery(idTerm), 1);
        assertEquals(1, hits.totalHits);
        doUpdate = !w.tryDeleteDocument(r, hits.scoreDocs[0].doc);
        if (VERBOSE) {
          if (doUpdate) {
            System.out.println("  tryDeleteDocument failed");
          } else {
            System.out.println("  tryDeleteDocument succeeded");
          }
        }
      } else {
        doUpdate = true;
        if (VERBOSE) {
          System.out.println("  no searcher: doUpdate=true");
        }
      }

      updateCount++;

      if (doUpdate) {
        if (random().nextBoolean()) {
          w.updateDocument(idTerm, doc);
        } else {
          // It's OK to not be atomic for this test (no separate thread reopening readers):
          w.deleteDocuments(new TermQuery(idTerm));
          w.addDocument(doc);
        }
      } else {
        w.addDocument(doc);
      }

      if (docIter >= SIZE && random().nextInt(50) == 17) {
        if (r != null) {
          r.close();
        }

        final boolean applyDeletions = random().nextBoolean();

        if (VERBOSE) {
          System.out.println("TEST: reopen applyDeletions=" + applyDeletions);
        }

        r = w.getReader(applyDeletions);
        if (applyDeletions) {
          s = newSearcher(r);
        } else {
          s = null;
        }
        assertTrue(
            "applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE,
            !applyDeletions || r.numDocs() == SIZE);
        updateCount = 0;
      }
    }

    if (r != null) {
      r.close();
    }

    w.commit();
    assertEquals(SIZE, w.numDocs());

    w.close();

    TestIndexWriter.assertNoUnreferencedFiles(dir, "leftover files after rolling updates");

    docs.close();

    // LUCENE-4455:
    SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
    long totalBytes = 0;
    for (SegmentCommitInfo sipc : infos) {
      totalBytes += sipc.sizeInBytes();
    }
    long totalBytes2 = 0;

    for (String fileName : dir.listAll()) {
      if (IndexFileNames.CODEC_FILE_PATTERN.matcher(fileName).matches()) {
        totalBytes2 += dir.fileLength(fileName);
      }
    }
    assertEquals(totalBytes2, totalBytes);
    dir.close();
  }
  /*
   * Simple test for "commit on close": open writer, then
   * add a bunch of docs, making sure reader does not see
   * them until writer has closed.  Then instead of
   * closing the writer, call abort and verify reader sees
   * nothing was added.  Then verify we can open the index
   * and add docs to it.
   */
  public void testCommitOnCloseAbort() throws IOException {
    MockDirectoryWrapper dir = newDirectory();
    IndexWriter writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setMaxBufferedDocs(10));
    for (int i = 0; i < 14; i++) {
      TestIndexWriter.addDoc(writer);
    }
    writer.close();

    Term searchTerm = new Term("content", "aaa");
    IndexReader reader = DirectoryReader.open(dir);
    IndexSearcher searcher = new IndexSearcher(reader);
    ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("first number of hits", 14, hits.length);
    reader.close();

    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(10));
    for (int j = 0; j < 17; j++) {
      TestIndexWriter.addDoc(writer);
    }
    // Delete all docs:
    writer.deleteDocuments(searchTerm);

    reader = DirectoryReader.open(dir);
    searcher = new IndexSearcher(reader);
    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
    reader.close();

    // Now, close the writer:
    writer.rollback();

    TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");

    reader = DirectoryReader.open(dir);
    searcher = new IndexSearcher(reader);
    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("saw changes after writer.abort", 14, hits.length);
    reader.close();

    // Now make sure we can re-open the index, add docs,
    // and all is good:
    writer =
        new IndexWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(10));

    // On abort, writer in fact may write to the same
    // segments_N file:
    dir.setPreventDoubleWrite(false);

    for (int i = 0; i < 12; i++) {
      for (int j = 0; j < 17; j++) {
        TestIndexWriter.addDoc(writer);
      }
      IndexReader r = DirectoryReader.open(dir);
      searcher = new IndexSearcher(r);
      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
      assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
      r.close();
    }

    writer.close();
    IndexReader r = DirectoryReader.open(dir);
    searcher = new IndexSearcher(r);
    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
    assertEquals("didn't see changes after close", 218, hits.length);
    r.close();

    dir.close();
  }
 protected void deleteDocuments(Term term) throws Exception {
   writer.deleteDocuments(term);
 }
Beispiel #28
0
  // case 5: tail segments, invariants not hold
  public void testMoreMerges() throws IOException {
    // main directory
    Directory dir = newDirectory();
    // auxiliary directory
    Directory aux = newDirectory();
    Directory aux2 = newDirectory();

    setUpDirs(dir, aux, true);

    IndexWriter writer =
        newWriter(
            aux2,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setMaxBufferedDocs(100)
                .setMergePolicy(newLogMergePolicy(10)));
    writer.addIndexes(aux);
    assertEquals(30, writer.maxDoc());
    assertEquals(3, writer.getSegmentCount());
    writer.close();

    IndexWriterConfig dontMergeConfig =
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(NoMergePolicy.COMPOUND_FILES);
    writer = new IndexWriter(aux, dontMergeConfig);
    for (int i = 0; i < 27; i++) {
      writer.deleteDocuments(new Term("id", "" + i));
    }
    writer.close();
    IndexReader reader = DirectoryReader.open(aux);
    assertEquals(3, reader.numDocs());
    reader.close();

    dontMergeConfig =
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(NoMergePolicy.COMPOUND_FILES);
    writer = new IndexWriter(aux2, dontMergeConfig);
    for (int i = 0; i < 8; i++) {
      writer.deleteDocuments(new Term("id", "" + i));
    }
    writer.close();
    reader = DirectoryReader.open(aux2);
    assertEquals(22, reader.numDocs());
    reader.close();

    writer =
        newWriter(
            dir,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setMaxBufferedDocs(6)
                .setMergePolicy(newLogMergePolicy(4)));

    writer.addIndexes(aux, aux2);
    assertEquals(1040, writer.maxDoc());
    assertEquals(1000, writer.getDocCount(0));
    writer.close();
    dir.close();
    aux.close();
    aux2.close();
  }
Beispiel #29
0
  @Test
  public void testRecoveryDiff() throws IOException, InterruptedException {
    int numDocs = 2 + random().nextInt(100);
    List<Document> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      doc.add(
          new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      docs.add(doc);
    }
    long seed = random().nextLong();
    Store.MetadataSnapshot first;
    {
      Random random = new Random(seed);
      IndexWriterConfig iwc =
          new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
      iwc.setMergePolicy(NoMergePolicy.INSTANCE);
      iwc.setUseCompoundFile(random.nextBoolean());
      iwc.setMaxThreadStates(1);
      final ShardId shardId = new ShardId(new Index("index"), 1);
      DirectoryService directoryService = new LuceneManagedDirectoryService(random);
      Store store =
          new Store(
              shardId,
              ImmutableSettings.EMPTY,
              directoryService,
              randomDistributor(random, directoryService),
              new DummyShardLock(shardId));
      IndexWriter writer = new IndexWriter(store.directory(), iwc);
      final boolean lotsOfSegments = rarely(random);
      for (Document d : docs) {
        writer.addDocument(d);
        if (lotsOfSegments && random.nextBoolean()) {
          writer.commit();
        } else if (rarely(random)) {
          writer.commit();
        }
      }
      writer.commit();
      writer.close();
      first = store.getMetadata();
      assertDeleteContent(store, directoryService);
      store.close();
    }
    long time = new Date().getTime();
    while (time == new Date().getTime()) {
      Thread.sleep(10); // bump the time
    }
    Store.MetadataSnapshot second;
    Store store;
    {
      Random random = new Random(seed);
      IndexWriterConfig iwc =
          new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
      iwc.setMergePolicy(NoMergePolicy.INSTANCE);
      iwc.setUseCompoundFile(random.nextBoolean());
      iwc.setMaxThreadStates(1);
      final ShardId shardId = new ShardId(new Index("index"), 1);
      DirectoryService directoryService = new LuceneManagedDirectoryService(random);
      store =
          new Store(
              shardId,
              ImmutableSettings.EMPTY,
              directoryService,
              randomDistributor(random, directoryService),
              new DummyShardLock(shardId));
      IndexWriter writer = new IndexWriter(store.directory(), iwc);
      final boolean lotsOfSegments = rarely(random);
      for (Document d : docs) {
        writer.addDocument(d);
        if (lotsOfSegments && random.nextBoolean()) {
          writer.commit();
        } else if (rarely(random)) {
          writer.commit();
        }
      }
      writer.commit();
      writer.close();
      second = store.getMetadata();
    }
    Store.RecoveryDiff diff = first.recoveryDiff(second);
    assertThat(first.size(), equalTo(second.size()));
    for (StoreFileMetaData md : first) {
      assertThat(second.get(md.name()), notNullValue());
      // si files are different - containing timestamps etc
      assertThat(second.get(md.name()).isSame(md), equalTo(false));
    }
    assertThat(diff.different.size(), equalTo(first.size()));
    assertThat(
        diff.identical.size(),
        equalTo(0)); // in lucene 5 nothing is identical - we use random ids in file headers
    assertThat(diff.missing, empty());

    // check the self diff
    Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
    assertThat(selfDiff.identical.size(), equalTo(first.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // lets add some deletes
    Random random = new Random(seed);
    IndexWriterConfig iwc =
        new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(random.nextBoolean());
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    IndexWriter writer = new IndexWriter(store.directory(), iwc);
    writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata = store.getMetadata();
    StoreFileMetaData delFile = null;
    for (StoreFileMetaData md : metadata) {
      if (md.name().endsWith(".liv")) {
        delFile = md;
        break;
      }
    }
    Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
    if (delFile != null) {
      assertThat(
          afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file
      assertThat(afterDeleteDiff.different.size(), equalTo(0));
      assertThat(afterDeleteDiff.missing.size(), equalTo(2));
    } else {
      // an entire segment must be missing (single doc segment got dropped)
      assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
      assertThat(afterDeleteDiff.different.size(), equalTo(0));
      assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different
    }

    // check the self diff
    selfDiff = metadata.recoveryDiff(metadata);
    assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // add a new commit
    iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(
        true); // force CFS - easier to test here since we know it will add 3 files
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    writer = new IndexWriter(store.directory(), iwc);
    writer.addDocument(docs.get(0));
    writer.close();

    Store.MetadataSnapshot newCommitMetaData = store.getMetadata();
    Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
    if (delFile != null) {
      assertThat(
          newCommitDiff.identical.size(),
          equalTo(
              newCommitMetaData.size()
                  - 5)); // segments_N, del file, cfs, cfe, si for the new segment
      assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
      assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
      assertThat(
          newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
    } else {
      assertThat(
          newCommitDiff.identical.size(),
          equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
      assertThat(newCommitDiff.different.size(), equalTo(0));
      assertThat(
          newCommitDiff.missing.size(),
          equalTo(
              4)); // an entire segment must be missing (single doc segment got dropped)  plus the
                   // commit is different
    }

    store.deleteContent();
    IOUtils.close(store);
  }
  @Slow
  public void testNoWaitClose() throws Throwable {
    Directory directory = newDirectory();

    if (directory instanceof MockDirectoryWrapper) {
      ((MockDirectoryWrapper) directory).setPreventDoubleWrite(false);
    }

    final Document doc = new Document();
    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
    customType.setTokenized(false);

    Field idField = newField("id", "", customType);
    doc.add(idField);

    for (int pass = 0; pass < 2; pass++) {
      if (VERBOSE) {
        System.out.println("TEST: pass="******"TEST: iter=" + iter);
        }
        for (int j = 0; j < 199; j++) {
          idField.setStringValue(Integer.toString(iter * 201 + j));
          writer.addDocument(doc);
        }

        int delID = iter * 199;
        for (int j = 0; j < 20; j++) {
          writer.deleteDocuments(new Term("id", Integer.toString(delID)));
          delID += 5;
        }

        writer.commit();

        // Force a bunch of merge threads to kick off so we
        // stress out aborting them on close:
        ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);

        final IndexWriter finalWriter = writer;
        final AtomicReference<Throwable> failure = new AtomicReference<>();
        Thread t1 =
            new Thread() {
              @Override
              public void run() {
                boolean done = false;
                while (!done) {
                  for (int i = 0; i < 100; i++) {
                    try {
                      finalWriter.addDocument(doc);
                    } catch (AlreadyClosedException e) {
                      done = true;
                      break;
                    } catch (NullPointerException e) {
                      done = true;
                      break;
                    } catch (Throwable e) {
                      e.printStackTrace(System.out);
                      failure.set(e);
                      done = true;
                      break;
                    }
                  }
                  Thread.yield();
                }
              }
            };

        t1.start();

        writer.close();
        t1.join();

        if (failure.get() != null) {
          throw failure.get();
        }

        // Make sure reader can read
        IndexReader reader = DirectoryReader.open(directory);
        reader.close();

        // Reopen
        writer =
            new IndexWriter(
                directory,
                newIndexWriterConfig(new MockAnalyzer(random()))
                    .setOpenMode(OpenMode.APPEND)
                    .setMergePolicy(newLogMergePolicy())
                    .setCommitOnClose(false));
      }
      writer.close();
    }

    directory.close();
  }