/**
  * Optimizes the index forcing merge of all segments that have deleted documents.. This operation
  * may block until all merging completes.
  *
  * @param doWait {@code true} if the call should block until the operation completes.
  * @throws IOException If Lucene throws IO errors.
  */
 @Override
 public void forceMergeDeletes(boolean doWait) throws IOException {
   Log.info("%s merging index segments with deletions", logName);
   indexWriter.forceMergeDeletes(doWait);
   indexWriter.commit();
   Log.info("%s merging index segments with deletions completed", logName);
 }
예제 #2
0
  public void forceDelete() {
    IndexWriter writer = null;

    try {
      writer =
          new IndexWriter(
              directory,
              new IndexWriterConfig(Version.LUCENE_35, new StandardAnalyzer(Version.LUCENE_35)));
      writer.forceMergeDeletes();
    } catch (CorruptIndexException e) {
      e.printStackTrace();
    } catch (LockObtainFailedException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } finally {
      try {
        if (writer != null) writer.close();
      } catch (CorruptIndexException e) {
        e.printStackTrace();
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  }
예제 #3
0
 public void forceMergeDeletes() throws IOException {
   w.forceMergeDeletes();
 }
예제 #4
0
 @Override
 public void forceMerge(
     final boolean flush,
     int maxNumSegments,
     boolean onlyExpungeDeletes,
     final boolean upgrade,
     final boolean upgradeOnlyAncientSegments)
     throws EngineException, EngineClosedException, IOException {
   /*
    * We do NOT acquire the readlock here since we are waiting on the merges to finish
    * that's fine since the IW.rollback should stop all the threads and trigger an IOException
    * causing us to fail the forceMerge
    *
    * The way we implement upgrades is a bit hackish in the sense that we set an instance
    * variable and that this setting will thus apply to the next forced merge that will be run.
    * This is ok because (1) this is the only place we call forceMerge, (2) we have a single
    * thread for optimize, and the 'optimizeLock' guarding this code, and (3) ConcurrentMergeScheduler
    * syncs calls to findForcedMerges.
    */
   assert indexWriter.getConfig().getMergePolicy() instanceof ElasticsearchMergePolicy
       : "MergePolicy is " + indexWriter.getConfig().getMergePolicy().getClass().getName();
   ElasticsearchMergePolicy mp =
       (ElasticsearchMergePolicy) indexWriter.getConfig().getMergePolicy();
   optimizeLock.lock();
   try {
     ensureOpen();
     if (upgrade) {
       logger.info(
           "starting segment upgrade upgradeOnlyAncientSegments={}", upgradeOnlyAncientSegments);
       mp.setUpgradeInProgress(true, upgradeOnlyAncientSegments);
     }
     store.incRef(); // increment the ref just to ensure nobody closes the store while we optimize
     try {
       if (onlyExpungeDeletes) {
         assert upgrade == false;
         indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/);
       } else if (maxNumSegments <= 0) {
         assert upgrade == false;
         indexWriter.maybeMerge();
       } else {
         indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/);
       }
       if (flush) {
         if (tryRenewSyncCommit() == false) {
           flush(false, true);
         }
       }
       if (upgrade) {
         logger.info("finished segment upgrade");
       }
     } finally {
       store.decRef();
     }
   } catch (Throwable t) {
     maybeFailEngine("force merge", t);
     throw t;
   } finally {
     try {
       mp.setUpgradeInProgress(
           false, false); // reset it just to make sure we reset it in a case of an error
     } finally {
       optimizeLock.unlock();
     }
   }
 }
예제 #5
0
 public void forceMergeDeletes(boolean doWait) throws IOException {
   w.forceMergeDeletes(doWait);
 }