コード例 #1
0
  /*
   * Test a deletion policy that keeps last N commits.
   */
  public void testKeepLastNDeletionPolicy() throws IOException {
    final int N = 5;

    for (int pass = 0; pass < 2; pass++) {

      boolean useCompoundFile = (pass % 2) != 0;

      Directory dir = newDirectory();
      if (dir instanceof MockDirectoryWrapper) {
        // test manually deletes files
        ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
      }

      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
      for (int j = 0; j < N + 1; j++) {
        IndexWriterConfig conf =
            newIndexWriterConfig(new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setIndexDeletionPolicy(policy)
                .setMaxBufferedDocs(10);
        MergePolicy mp = conf.getMergePolicy();
        mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
        IndexWriter writer = new IndexWriter(dir, conf);
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        for (int i = 0; i < 17; i++) {
          addDoc(writer);
        }
        writer.forceMerge(1);
        writer.close();
      }

      assertTrue(policy.numDelete > 0);
      assertEquals(N + 1, policy.numOnInit);
      assertEquals(N + 1, policy.numOnCommit);

      // Simplistic check: just verify only the past N segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getLastCommitGeneration(dir);
      for (int i = 0; i < N + 1; i++) {
        try {
          IndexReader reader = DirectoryReader.open(dir);
          reader.close();
          if (i == N) {
            fail("should have failed on commits prior to last " + N);
          }
        } catch (IOException e) {
          if (i != N) {
            throw e;
          }
        }
        if (i < N) {
          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        }
        gen--;
      }

      dir.close();
    }
  }
コード例 #2
0
  public void testForceMergeNotNeeded() throws IOException {
    try (Directory dir = newDirectory()) {
      final AtomicBoolean mayMerge = new AtomicBoolean(true);
      final MergeScheduler mergeScheduler =
          new SerialMergeScheduler() {
            @Override
            public synchronized void merge(
                IndexWriter writer, MergeTrigger trigger, boolean newMergesFound)
                throws IOException {
              if (mayMerge.get() == false) {
                MergePolicy.OneMerge merge = writer.getNextMerge();
                if (merge != null) {
                  System.out.println(
                      "TEST: we should not need any merging, yet merge policy returned merge "
                          + merge);
                  throw new AssertionError();
                }
              }

              super.merge(writer, trigger, newMergesFound);
            }
          };

      MergePolicy mp = mergePolicy();
      assumeFalse(
          "this test cannot tolerate random forceMerges",
          mp.toString().contains("MockRandomMergePolicy"));
      mp.setNoCFSRatio(random().nextBoolean() ? 0 : 1);

      IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
      iwc.setMergeScheduler(mergeScheduler);
      iwc.setMergePolicy(mp);

      IndexWriter writer = new IndexWriter(dir, iwc);
      final int numSegments = TestUtil.nextInt(random(), 2, 20);
      for (int i = 0; i < numSegments; ++i) {
        final int numDocs = TestUtil.nextInt(random(), 1, 5);
        for (int j = 0; j < numDocs; ++j) {
          writer.addDocument(new Document());
        }
        writer.getReader().close();
      }
      for (int i = 5; i >= 0; --i) {
        final int segmentCount = writer.getSegmentCount();
        final int maxNumSegments = i == 0 ? 1 : TestUtil.nextInt(random(), 1, 10);
        mayMerge.set(segmentCount > maxNumSegments);
        if (VERBOSE) {
          System.out.println(
              "TEST: now forceMerge(maxNumSegments="
                  + maxNumSegments
                  + ") vs segmentCount="
                  + segmentCount);
        }
        writer.forceMerge(maxNumSegments);
      }
      writer.close();
    }
  }
コード例 #3
0
 @Test
 public void testNoMergePolicy() throws Exception {
   MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES;
   assertNull(mp.findMerges(null));
   // LUCENE-3577
   // assertNull(mp.findMergesToExpungeDeletes(null));
   assertFalse(mp.useCompoundFile(null, null));
   mp.close();
 }
  private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, int MAX_DOCS)
      throws Exception {
    Directory directory = newDirectory();
    Analyzer analyzer = new MockAnalyzer(random);
    IndexWriterConfig conf = newIndexWriterConfig(analyzer);
    final MergePolicy mp = conf.getMergePolicy();
    mp.setNoCFSRatio(useCompoundFiles ? 1.0 : 0.0);
    IndexWriter writer = new IndexWriter(directory, conf);
    if (VERBOSE) {
      System.out.println("TEST: now build index MAX_DOCS=" + MAX_DOCS);
    }

    for (int j = 0; j < MAX_DOCS; j++) {
      Document d = new Document();
      d.add(newTextField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES));
      d.add(newTextField(ID_FIELD, Integer.toString(j), Field.Store.YES));
      writer.addDocument(d);
    }
    writer.close();

    // try a search without OR
    IndexReader reader = DirectoryReader.open(directory);
    IndexSearcher searcher = newSearcher(reader);

    Query query = new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY));
    out.println("Query: " + query.toString(PRIORITY_FIELD));
    if (VERBOSE) {
      System.out.println("TEST: search query=" + query);
    }

    final Sort sort = new Sort(SortField.FIELD_SCORE, new SortField(ID_FIELD, SortField.Type.INT));

    ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
    printHits(out, hits, searcher);
    checkHits(hits, MAX_DOCS, searcher);

    // try a new search with OR
    searcher = newSearcher(reader);
    hits = null;

    BooleanQuery booleanQuery = new BooleanQuery();
    booleanQuery.add(
        new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY)), BooleanClause.Occur.SHOULD);
    booleanQuery.add(
        new TermQuery(new Term(PRIORITY_FIELD, MED_PRIORITY)), BooleanClause.Occur.SHOULD);
    out.println("Query: " + booleanQuery.toString(PRIORITY_FIELD));

    hits = searcher.search(booleanQuery, null, MAX_DOCS, sort).scoreDocs;
    printHits(out, hits, searcher);
    checkHits(hits, MAX_DOCS, searcher);

    reader.close();
    directory.close();
  }
コード例 #5
0
 @Override
 public MergePolicy clone() {
   MergePolicy clone;
   try {
     clone = (MergePolicy) super.clone();
   } catch (CloneNotSupportedException e) {
     // should not happen
     throw new RuntimeException(e);
   }
   clone.writer = new SetOnce<IndexWriter>();
   return clone;
 }
コード例 #6
0
  /* Test keeping NO commit points.  This is a viable and
   * useful case eg where you want to build a big index and
   * you know there are no readers.
   */
  public void testKeepNoneOnInitDeletionPolicy() throws IOException {
    for (int pass = 0; pass < 2; pass++) {

      boolean useCompoundFile = (pass % 2) != 0;

      Directory dir = newDirectory();

      IndexWriterConfig conf =
          newIndexWriterConfig(new MockAnalyzer(random()))
              .setOpenMode(OpenMode.CREATE)
              .setIndexDeletionPolicy(new KeepNoneOnInitDeletionPolicy())
              .setMaxBufferedDocs(10);
      MergePolicy mp = conf.getMergePolicy();
      mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
      IndexWriter writer = new IndexWriter(dir, conf);
      KeepNoneOnInitDeletionPolicy policy =
          (KeepNoneOnInitDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
      for (int i = 0; i < 107; i++) {
        addDoc(writer);
      }
      writer.close();

      conf =
          newIndexWriterConfig(new MockAnalyzer(random()))
              .setOpenMode(OpenMode.APPEND)
              .setIndexDeletionPolicy(policy);
      mp = conf.getMergePolicy();
      mp.setNoCFSRatio(1.0);
      writer = new IndexWriter(dir, conf);
      policy = (KeepNoneOnInitDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
      writer.forceMerge(1);
      writer.close();

      assertEquals(2, policy.numOnInit);
      // If we are not auto committing then there should
      // be exactly 2 commits (one per close above):
      assertEquals(2, policy.numOnCommit);

      // Simplistic check: just verify the index is in fact
      // readable:
      IndexReader reader = DirectoryReader.open(dir);
      reader.close();

      dir.close();
    }
  }
コード例 #7
0
  public MergeSpecification findForcedMerges(
      SegmentInfos segmentInfos,
      int maxSegmentCount,
      Map<SegmentCommitInfo, Boolean> segmentsToMerge)
      throws IOException {

    return inner.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge);
  }
コード例 #8
0
ファイル: TestAddIndexes.java プロジェクト: jbasttdi/lumongo
  // LUCENE-2790: tests that the non CFS files were deleted by addIndexes
  public void testNonCFSLeftovers() throws Exception {
    Directory[] dirs = new Directory[2];
    for (int i = 0; i < dirs.length; i++) {
      dirs[i] = new RAMDirectory();
      IndexWriter w =
          new IndexWriter(
              dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
      Document d = new Document();
      FieldType customType = new FieldType(TextField.TYPE_STORED);
      customType.setStoreTermVectors(true);
      d.add(new Field("c", "v", customType));
      w.addDocument(d);
      w.close();
    }

    IndexReader[] readers =
        new IndexReader[] {DirectoryReader.open(dirs[0]), DirectoryReader.open(dirs[1])};

    Directory dir = new MockDirectoryWrapper(random(), new RAMDirectory());
    IndexWriterConfig conf =
        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
            .setMergePolicy(newLogMergePolicy(true));
    MergePolicy lmp = conf.getMergePolicy();
    // Force creation of CFS:
    lmp.setNoCFSRatio(1.0);
    lmp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
    IndexWriter w3 = new IndexWriter(dir, conf);
    w3.addIndexes(readers);
    w3.close();
    // we should now see segments_X,
    // segments.gen,_Y.cfs,_Y.cfe, _Z.si
    assertEquals(
        "Only one compound segment should exist, but got: " + Arrays.toString(dir.listAll()),
        5,
        dir.listAll().length);
    dir.close();
  }
コード例 #9
0
  /*
   * Test a silly deletion policy that keeps all commits around.
   */
  public void testKeepAllDeletionPolicy() throws IOException {
    for (int pass = 0; pass < 2; pass++) {

      if (VERBOSE) {
        System.out.println("TEST: cycle pass="******"TEST: open writer for forceMerge");
        }
        writer = new IndexWriter(dir, conf);
        policy = (KeepAllDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        writer.forceMerge(1);
        writer.close();
      }

      assertEquals(needsMerging ? 2 : 1, policy.numOnInit);

      // If we are not auto committing then there should
      // be exactly 2 commits (one per close above):
      assertEquals(1 + (needsMerging ? 1 : 0), policy.numOnCommit);

      // Test listCommits
      Collection<IndexCommit> commits = DirectoryReader.listCommits(dir);
      // 2 from closing writer
      assertEquals(1 + (needsMerging ? 1 : 0), commits.size());

      // Make sure we can open a reader on each commit:
      for (final IndexCommit commit : commits) {
        IndexReader r = DirectoryReader.open(commit);
        r.close();
      }

      // Simplistic check: just verify all segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getLastCommitGeneration(dir);
      while (gen > 0) {
        IndexReader reader = DirectoryReader.open(dir);
        reader.close();
        dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        gen--;

        if (gen > 0) {
          // Now that we've removed a commit point, which
          // should have orphan'd at least one index file.
          // Open & close a writer and assert that it
          // actually removed something:
          int preCount = dir.listAll().length;
          writer =
              new IndexWriter(
                  dir,
                  newIndexWriterConfig(new MockAnalyzer(random()))
                      .setOpenMode(OpenMode.APPEND)
                      .setIndexDeletionPolicy(policy));
          writer.close();
          int postCount = dir.listAll().length;
          assertTrue(postCount < preCount);
        }
      }

      dir.close();
    }
  }
コード例 #10
0
  /*
   * Test "by time expiration" deletion policy:
   */
  public void testExpirationTimeDeletionPolicy() throws IOException, InterruptedException {

    final double SECONDS = 2.0;

    Directory dir = newDirectory();
    if (dir instanceof MockDirectoryWrapper) {
      // test manually deletes files
      ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
    }
    IndexWriterConfig conf =
        newIndexWriterConfig(new MockAnalyzer(random()))
            .setIndexDeletionPolicy(new ExpirationTimeDeletionPolicy(dir, SECONDS));
    MergePolicy mp = conf.getMergePolicy();
    mp.setNoCFSRatio(1.0);
    IndexWriter writer = new IndexWriter(dir, conf);
    ExpirationTimeDeletionPolicy policy =
        (ExpirationTimeDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
    Map<String, String> commitData = new HashMap<>();
    commitData.put("commitTime", String.valueOf(System.currentTimeMillis()));
    writer.setCommitData(commitData);
    writer.commit();
    writer.close();

    long lastDeleteTime = 0;
    final int targetNumDelete = TestUtil.nextInt(random(), 1, 5);
    while (policy.numDelete < targetNumDelete) {
      // Record last time when writer performed deletes of
      // past commits
      lastDeleteTime = System.currentTimeMillis();
      conf =
          newIndexWriterConfig(new MockAnalyzer(random()))
              .setOpenMode(OpenMode.APPEND)
              .setIndexDeletionPolicy(policy);
      mp = conf.getMergePolicy();
      mp.setNoCFSRatio(1.0);
      writer = new IndexWriter(dir, conf);
      policy = (ExpirationTimeDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
      for (int j = 0; j < 17; j++) {
        addDoc(writer);
      }
      commitData = new HashMap<>();
      commitData.put("commitTime", String.valueOf(System.currentTimeMillis()));
      writer.setCommitData(commitData);
      writer.commit();
      writer.close();

      Thread.sleep((int) (1000.0 * (SECONDS / 5.0)));
    }

    // Then simplistic check: just verify that the
    // segments_N's that still exist are in fact within SECONDS
    // seconds of the last one's mod time, and, that I can
    // open a reader on each:
    long gen = SegmentInfos.getLastCommitGeneration(dir);

    String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen);
    boolean oneSecondResolution = true;

    while (gen > 0) {
      try {
        IndexReader reader = DirectoryReader.open(dir);
        reader.close();
        fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen);

        // if we are on a filesystem that seems to have only
        // 1 second resolution, allow +1 second in commit
        // age tolerance:
        SegmentInfos sis = SegmentInfos.readCommit(dir, fileName);
        long modTime = Long.parseLong(sis.getUserData().get("commitTime"));
        oneSecondResolution &= (modTime % 1000) == 0;
        final long leeway = (long) ((SECONDS + (oneSecondResolution ? 1.0 : 0.0)) * 1000);

        assertTrue(
            "commit point was older than "
                + SECONDS
                + " seconds ("
                + (lastDeleteTime - modTime)
                + " msec) but did not get deleted ",
            lastDeleteTime - modTime <= leeway);
      } catch (IOException e) {
        // OK
        break;
      }

      dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
      gen--;
    }

    dir.close();
  }
コード例 #11
0
  public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo)
      throws IOException {

    return inner.useCompoundFile(infos, mergedInfo);
  }
コード例 #12
0
  public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos)
      throws IOException {

    return inner.findMerges(mergeTrigger, segmentInfos);
  }
コード例 #13
0
  public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) throws IOException {

    return inner.findForcedDeletesMerges(segmentInfos);
  }
コード例 #14
0
 public void close() {
   inner.close();
 }
コード例 #15
0
 private RandomMergePolicy(MergePolicy inner) {
   super(inner.getNoCFSRatio(), (long) (inner.getMaxCFSSegmentSizeMB() * 1024 * 1024));
   this.inner = inner;
   log.info("RandomMergePolicy wrapping {}: {}", inner.getClass(), inner);
 }
コード例 #16
0
  /*
   * Test a deletion policy that keeps last N commits
   * around, through creates.
   */
  public void testKeepLastNDeletionPolicyWithCreates() throws IOException {

    final int N = 10;

    for (int pass = 0; pass < 2; pass++) {

      boolean useCompoundFile = (pass % 2) != 0;

      Directory dir = newDirectory();
      if (dir instanceof MockDirectoryWrapper) {
        // test manually deletes files
        ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
      }
      IndexWriterConfig conf =
          newIndexWriterConfig(new MockAnalyzer(random()))
              .setOpenMode(OpenMode.CREATE)
              .setIndexDeletionPolicy(new KeepLastNDeletionPolicy(N))
              .setMaxBufferedDocs(10);
      MergePolicy mp = conf.getMergePolicy();
      mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
      IndexWriter writer = new IndexWriter(dir, conf);
      KeepLastNDeletionPolicy policy =
          (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
      writer.close();
      Term searchTerm = new Term("content", "aaa");
      Query query = new TermQuery(searchTerm);

      for (int i = 0; i < N + 1; i++) {

        conf =
            newIndexWriterConfig(new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setIndexDeletionPolicy(policy)
                .setMaxBufferedDocs(10);
        mp = conf.getMergePolicy();
        mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
        writer = new IndexWriter(dir, conf);
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        for (int j = 0; j < 17; j++) {
          addDocWithID(writer, i * (N + 1) + j);
        }
        // this is a commit
        writer.close();
        conf =
            new IndexWriterConfig(new MockAnalyzer(random()))
                .setIndexDeletionPolicy(policy)
                .setMergePolicy(NoMergePolicy.INSTANCE);
        writer = new IndexWriter(dir, conf);
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        writer.deleteDocuments(new Term("id", "" + (i * (N + 1) + 3)));
        // this is a commit
        writer.close();
        IndexReader reader = DirectoryReader.open(dir);
        IndexSearcher searcher = newSearcher(reader);
        ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
        assertEquals(16, hits.length);
        reader.close();

        writer =
            new IndexWriter(
                dir,
                newIndexWriterConfig(new MockAnalyzer(random()))
                    .setOpenMode(OpenMode.CREATE)
                    .setIndexDeletionPolicy(policy));
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        // This will not commit: there are no changes
        // pending because we opened for "create":
        writer.close();
      }

      assertEquals(3 * (N + 1) + 1, policy.numOnInit);
      assertEquals(3 * (N + 1) + 1, policy.numOnCommit);

      IndexReader rwReader = DirectoryReader.open(dir);
      IndexSearcher searcher = newSearcher(rwReader);
      ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
      assertEquals(0, hits.length);

      // Simplistic check: just verify only the past N segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getLastCommitGeneration(dir);

      int expectedCount = 0;

      rwReader.close();

      for (int i = 0; i < N + 1; i++) {
        try {
          IndexReader reader = DirectoryReader.open(dir);

          // Work backwards in commits on what the expected
          // count should be.
          searcher = newSearcher(reader);
          hits = searcher.search(query, 1000).scoreDocs;
          assertEquals(expectedCount, hits.length);
          if (expectedCount == 0) {
            expectedCount = 16;
          } else if (expectedCount == 16) {
            expectedCount = 17;
          } else if (expectedCount == 17) {
            expectedCount = 0;
          }
          reader.close();
          if (i == N) {
            fail("should have failed on commits before last " + N);
          }
        } catch (IOException e) {
          if (i != N) {
            throw e;
          }
        }
        if (i < N) {
          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        }
        gen--;
      }

      dir.close();
    }
  }
コード例 #17
0
 public void setIndexWriter(IndexWriter writer) {
   inner.setIndexWriter(writer);
 }