@Override
 public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId)
     throws EngineException {
   // best effort attempt before we acquire locks
   ensureOpen();
   if (indexWriter.hasUncommittedChanges()) {
     logger.trace("can't sync commit [{}]. have pending changes", syncId);
     return SyncedFlushResult.PENDING_OPERATIONS;
   }
   if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
     logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
     return SyncedFlushResult.COMMIT_MISMATCH;
   }
   try (ReleasableLock lock = writeLock.acquire()) {
     ensureOpen();
     ensureCanFlush();
     if (indexWriter.hasUncommittedChanges()) {
       logger.trace("can't sync commit [{}]. have pending changes", syncId);
       return SyncedFlushResult.PENDING_OPERATIONS;
     }
     if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
       logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
       return SyncedFlushResult.COMMIT_MISMATCH;
     }
     logger.trace("starting sync commit [{}]", syncId);
     commitIndexWriter(indexWriter, translog, syncId);
     logger.debug("successfully sync committed. sync id [{}].", syncId);
     lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
     return SyncedFlushResult.SUCCESS;
   } catch (IOException ex) {
     maybeFailEngine("sync commit", ex);
     throw new EngineException(shardId, "failed to sync commit", ex);
   }
 }
  final boolean tryRenewSyncCommit() {
    boolean renewed = false;
    try (ReleasableLock lock = writeLock.acquire()) {
      ensureOpen();
      ensureCanFlush();
      String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID);
      if (syncId != null
          && translog.totalOperations() == 0
          && indexWriter.hasUncommittedChanges()) {
        logger.trace("start renewing sync commit [{}]", syncId);
        commitIndexWriter(indexWriter, translog, syncId);
        logger.debug("successfully sync committed. sync id [{}].", syncId);
        lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
        renewed = true;
      }
    } catch (IOException ex) {
      maybeFailEngine("renew sync commit", ex);
      throw new EngineException(shardId, "failed to renew sync commit", ex);
    }
    if (renewed) { // refresh outside of the write lock
      refresh("renew sync commit");
    }

    return renewed;
  }
 @Override
 public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
   ensureOpen();
   final byte[] newCommitId;
   /*
    * Unfortunately the lock order is important here. We have to acquire the readlock first otherwise
    * if we are flushing at the end of the recovery while holding the write lock we can deadlock if:
    *  Thread 1: flushes via API and gets the flush lock but blocks on the readlock since Thread 2 has the writeLock
    *  Thread 2: flushes at the end of the recovery holding the writeLock and blocks on the flushLock owned by Thread 1
    */
   try (ReleasableLock lock = readLock.acquire()) {
     ensureOpen();
     if (flushLock.tryLock() == false) {
       // if we can't get the lock right away we block if needed otherwise barf
       if (waitIfOngoing) {
         logger.trace("waiting for in-flight flush to finish");
         flushLock.lock();
         logger.trace("acquired flush lock after blocking");
       } else {
         throw new FlushNotAllowedEngineException(shardId, "already flushing...");
       }
     } else {
       logger.trace("acquired flush lock immediately");
     }
     try {
       if (indexWriter.hasUncommittedChanges() || force) {
         ensureCanFlush();
         try {
           translog.prepareCommit();
           logger.trace("starting commit for flush; commitTranslog=true");
           commitIndexWriter(indexWriter, translog, null);
           logger.trace("finished commit for flush");
           // we need to refresh in order to clear older version values
           refresh("version_table_flush");
           // after refresh documents can be retrieved from the index so we can now commit the
           // translog
           translog.commit();
         } catch (Throwable e) {
           throw new FlushFailedEngineException(shardId, e);
         }
       }
       /*
        * we have to inc-ref the store here since if the engine is closed by a tragic event
        * we don't acquire the write lock and wait until we have exclusive access. This might also
        * dec the store reference which can essentially close the store and unless we can inc the reference
        * we can't use it.
        */
       store.incRef();
       try {
         // reread the last committed segment infos
         lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
       } catch (Throwable e) {
         if (isClosed.get() == false) {
           logger.warn("failed to read latest segment infos on flush", e);
           if (Lucene.isCorruptionException(e)) {
             throw new FlushFailedEngineException(shardId, e);
           }
         }
       } finally {
         store.decRef();
       }
       newCommitId = lastCommittedSegmentInfos.getId();
     } catch (FlushFailedEngineException ex) {
       maybeFailEngine("flush", ex);
       throw ex;
     } finally {
       flushLock.unlock();
     }
   }
   // We don't have to do this here; we do it defensively to make sure that even if wall clock time
   // is misbehaving
   // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones:
   if (engineConfig.isEnableGcDeletes()) {
     pruneDeletedTombstones();
   }
   return new CommitId(newCommitId);
 }