@Override
    public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel)
        throws Exception {
      try (RecoveriesCollection.RecoveryRef recoveryRef =
          onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) {
        final RecoveryTarget status = recoveryRef.status();
        final RecoveryState.Index indexState = status.state().getIndex();
        if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
          indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
        }

        RateLimiter rateLimiter = recoverySettings.rateLimiter();
        if (rateLimiter != null) {
          long bytes = bytesSinceLastPause.addAndGet(request.content().length());
          if (bytes > rateLimiter.getMinPauseCheckBytes()) {
            // Time to pause
            bytesSinceLastPause.addAndGet(-bytes);
            long throttleTimeInNanos = rateLimiter.pause(bytes);
            indexState.addTargetThrottling(throttleTimeInNanos);
            status.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
          }
        }

        status.writeFileChunk(
            request.metadata(),
            request.position(),
            request.content(),
            request.lastChunk(),
            request.totalTranslogOps());
      }
      channel.sendResponse(TransportResponse.Empty.INSTANCE);
    }
 private void retryRecovery(
     final RecoveryTarget recoveryTarget,
     TimeValue retryAfter,
     final StartRecoveryRequest currentRequest) {
   try {
     onGoingRecoveries.resetRecovery(recoveryTarget.recoveryId(), recoveryTarget.shardId());
   } catch (Exception e) {
     onGoingRecoveries.failRecovery(
         recoveryTarget.recoveryId(), new RecoveryFailedException(currentRequest, e), true);
   }
   threadPool.schedule(
       retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(recoveryTarget.recoveryId()));
 }
 protected void retryRecovery(
     final RecoveryTarget recoveryTarget,
     final String reason,
     TimeValue retryAfter,
     final StartRecoveryRequest currentRequest) {
   logger.trace(
       "will retry recovery with id [{}] in [{}] (reason [{}])",
       recoveryTarget.recoveryId(),
       retryAfter,
       reason);
   retryRecovery(recoveryTarget, retryAfter, currentRequest);
 }
 public void recoverReplica(
     IndexShard replica,
     BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
     boolean markAsRecovering)
     throws IOException {
   final DiscoveryNode pNode = getPrimaryNode();
   final DiscoveryNode rNode = getDiscoveryNode(replica.routingEntry().currentNodeId());
   if (markAsRecovering) {
     replica.markAsRecovering(
         "remote",
         new RecoveryState(replica.shardId(), false, RecoveryState.Type.REPLICA, pNode, rNode));
   } else {
     assertEquals(replica.state(), IndexShardState.RECOVERING);
   }
   replica.prepareForIndexRecovery();
   RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
   StartRecoveryRequest request =
       new StartRecoveryRequest(
           replica.shardId(),
           pNode,
           rNode,
           getMetadataSnapshotOrEmpty(replica),
           RecoveryState.Type.REPLICA,
           0);
   RecoverySourceHandler recovery =
       new RecoverySourceHandler(
           primary,
           recoveryTarget,
           request,
           () -> 0L,
           e -> () -> {},
           (int) ByteSizeUnit.MB.toKB(1),
           logger);
   recovery.recoverToTarget();
   recoveryTarget.markAsDone();
   replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));
 }
 protected void retryRecovery(
     final RecoveryTarget recoveryTarget,
     final Throwable reason,
     TimeValue retryAfter,
     final StartRecoveryRequest currentRequest) {
   logger.trace(
       (Supplier<?>)
           () ->
               new ParameterizedMessage(
                   "will retry recovery with id [{}] in [{}]",
                   recoveryTarget.recoveryId(),
                   retryAfter),
       reason);
   retryRecovery(recoveryTarget, retryAfter, currentRequest);
 }
    @Override
    public void messageReceived(
        final RecoveryTranslogOperationsRequest request, final TransportChannel channel)
        throws IOException {
      try (RecoveriesCollection.RecoveryRef recoveryRef =
          onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) {
        final ClusterStateObserver observer =
            new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
        final RecoveryTarget recoveryTarget = recoveryRef.status();
        try {
          recoveryTarget.indexTranslogOperations(request.operations(), request.totalTranslogOps());
          channel.sendResponse(TransportResponse.Empty.INSTANCE);
        } catch (TranslogRecoveryPerformer.BatchOperationException exception) {
          MapperException mapperException =
              (MapperException) ExceptionsHelper.unwrap(exception, MapperException.class);
          if (mapperException == null) {
            throw exception;
          }
          // in very rare cases a translog replay from primary is processed before a mapping update
          // on this node
          // which causes local mapping changes since the mapping (clusterstate) might not have
          // arrived on this node.
          // we want to wait until these mappings are processed but also need to do some maintenance
          // and roll back the
          // number of processed (completed) operations in this batch to ensure accounting is
          // correct.
          logger.trace(
              (Supplier<?>)
                  () ->
                      new ParameterizedMessage(
                          "delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)",
                          exception.completedOperations()),
              exception);
          final RecoveryState.Translog translog = recoveryTarget.state().getTranslog();
          translog.decrementRecoveredOperations(
              exception.completedOperations()); // do the maintainance and rollback competed ops
          // we do not need to use a timeout here since the entire recovery mechanism has an
          // inactivity protection (it will be
          // canceled)
          observer.waitForNextChange(
              new ClusterStateObserver.Listener() {
                @Override
                public void onNewClusterState(ClusterState state) {
                  try {
                    messageReceived(request, channel);
                  } catch (Exception e) {
                    onFailure(e);
                  }
                }

                protected void onFailure(Exception e) {
                  try {
                    channel.sendResponse(e);
                  } catch (IOException e1) {
                    logger.warn("failed to send error back to recovery source", e1);
                  }
                }

                @Override
                public void onClusterServiceClose() {
                  onFailure(
                      new ElasticsearchException(
                          "cluster service was closed while waiting for mapping updates"));
                }

                @Override
                public void onTimeout(TimeValue timeout) {
                  // note that we do not use a timeout (see comment above)
                  onFailure(
                      new ElasticsearchTimeoutException(
                          "timed out waiting for mapping updates (timeout [" + timeout + "])"));
                }
              });
        }
      }
    }
  private void doRecovery(final RecoveryTarget recoveryTarget) {
    assert recoveryTarget.sourceNode() != null : "can't do a recovery without a source node";

    logger.trace("collecting local files for {}", recoveryTarget);
    Store.MetadataSnapshot metadataSnapshot = null;
    try {
      if (recoveryTarget.indexShard().indexSettings().isOnSharedFilesystem()) {
        // we are not going to copy any files, so don't bother listing files, potentially running
        // into concurrency issues with the primary changing files underneath us.
        metadataSnapshot = Store.MetadataSnapshot.EMPTY;
      } else {
        metadataSnapshot = recoveryTarget.indexShard().snapshotStoreMetadata();
      }
    } catch (org.apache.lucene.index.IndexNotFoundException e) {
      // happens on an empty folder. no need to log
      metadataSnapshot = Store.MetadataSnapshot.EMPTY;
    } catch (IOException e) {
      logger.warn("error while listing local files, recover as if there are none", e);
      metadataSnapshot = Store.MetadataSnapshot.EMPTY;
    } catch (Exception e) {
      // this will be logged as warning later on...
      logger.trace("unexpected error while listing local files, failing recovery", e);
      onGoingRecoveries.failRecovery(
          recoveryTarget.recoveryId(),
          new RecoveryFailedException(recoveryTarget.state(), "failed to list local files", e),
          true);
      return;
    }
    logger.trace("{} local file count: [{}]", recoveryTarget, metadataSnapshot.size());
    final StartRecoveryRequest request =
        new StartRecoveryRequest(
            recoveryTarget.shardId(),
            recoveryTarget.sourceNode(),
            clusterService.localNode(),
            metadataSnapshot,
            recoveryTarget.state().getPrimary(),
            recoveryTarget.recoveryId());

    final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
    try {
      logger.trace(
          "[{}][{}] starting recovery from {}",
          request.shardId().getIndex().getName(),
          request.shardId().id(),
          request.sourceNode());
      recoveryTarget.indexShard().prepareForIndexRecovery();
      recoveryTarget
          .CancellableThreads()
          .execute(
              () ->
                  responseHolder.set(
                      transportService
                          .submitRequest(
                              request.sourceNode(),
                              PeerRecoverySourceService.Actions.START_RECOVERY,
                              request,
                              new FutureTransportResponseHandler<RecoveryResponse>() {
                                @Override
                                public RecoveryResponse newInstance() {
                                  return new RecoveryResponse();
                                }
                              })
                          .txGet()));
      final RecoveryResponse recoveryResponse = responseHolder.get();
      assert responseHolder != null;
      final TimeValue recoveryTime = new TimeValue(recoveryTarget.state().getTimer().time());
      // do this through ongoing recoveries to remove it from the collection
      onGoingRecoveries.markRecoveryAsDone(recoveryTarget.recoveryId());
      if (logger.isTraceEnabled()) {
        StringBuilder sb = new StringBuilder();
        sb.append('[')
            .append(request.shardId().getIndex().getName())
            .append(']')
            .append('[')
            .append(request.shardId().id())
            .append("] ");
        sb.append("recovery completed from ")
            .append(request.sourceNode())
            .append(", took[")
            .append(recoveryTime)
            .append("]\n");
        sb.append("   phase1: recovered_files [")
            .append(recoveryResponse.phase1FileNames.size())
            .append("]")
            .append(" with " + "total_size of [")
            .append(new ByteSizeValue(recoveryResponse.phase1TotalSize))
            .append("]")
            .append(", took [")
            .append(timeValueMillis(recoveryResponse.phase1Time))
            .append("], throttling_wait [")
            .append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime))
            .append(']')
            .append("\n");
        sb.append("         : reusing_files   [")
            .append(recoveryResponse.phase1ExistingFileNames.size())
            .append("] with " + "total_size of [")
            .append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize))
            .append("]\n");
        sb.append("   phase2: start took [")
            .append(timeValueMillis(recoveryResponse.startTime))
            .append("]\n");
        sb.append("         : recovered [")
            .append(recoveryResponse.phase2Operations)
            .append("]")
            .append(" transaction log " + "operations")
            .append(", took [")
            .append(timeValueMillis(recoveryResponse.phase2Time))
            .append("]")
            .append("\n");
        logger.trace("{}", sb);
      } else {
        logger.debug(
            "{} recovery done from [{}], took [{}]",
            request.shardId(),
            recoveryTarget.sourceNode(),
            recoveryTime);
      }
    } catch (CancellableThreads.ExecutionCancelledException e) {
      logger.trace("recovery cancelled", e);
    } catch (Exception e) {
      if (logger.isTraceEnabled()) {
        logger.trace(
            (Supplier<?>)
                () ->
                    new ParameterizedMessage(
                        "[{}][{}] Got exception on recovery",
                        request.shardId().getIndex().getName(),
                        request.shardId().id()),
            e);
      }
      Throwable cause = ExceptionsHelper.unwrapCause(e);
      if (cause instanceof CancellableThreads.ExecutionCancelledException) {
        // this can also come from the source wrapped in a RemoteTransportException
        onGoingRecoveries.failRecovery(
            recoveryTarget.recoveryId(),
            new RecoveryFailedException(request, "source has canceled the" + " recovery", cause),
            false);
        return;
      }
      if (cause instanceof RecoveryEngineException) {
        // unwrap an exception that was thrown as part of the recovery
        cause = cause.getCause();
      }
      // do it twice, in case we have double transport exception
      cause = ExceptionsHelper.unwrapCause(cause);
      if (cause instanceof RecoveryEngineException) {
        // unwrap an exception that was thrown as part of the recovery
        cause = cause.getCause();
      }

      // here, we would add checks against exception that need to be retried (and not removeAndClean
      // in this case)

      if (cause instanceof IllegalIndexShardStateException
          || cause instanceof IndexNotFoundException
          || cause instanceof ShardNotFoundException) {
        // if the target is not ready yet, retry
        retryRecovery(
            recoveryTarget,
            "remote shard not ready",
            recoverySettings.retryDelayStateSync(),
            request);
        return;
      }

      if (cause instanceof DelayRecoveryException) {
        retryRecovery(recoveryTarget, cause, recoverySettings.retryDelayStateSync(), request);
        return;
      }

      if (cause instanceof ConnectTransportException) {
        logger.debug(
            "delaying recovery of {} for [{}] due to networking error [{}]",
            recoveryTarget.shardId(),
            recoverySettings.retryDelayNetwork(),
            cause.getMessage());
        retryRecovery(
            recoveryTarget, cause.getMessage(), recoverySettings.retryDelayNetwork(), request);
        return;
      }

      if (cause instanceof AlreadyClosedException) {
        onGoingRecoveries.failRecovery(
            recoveryTarget.recoveryId(),
            new RecoveryFailedException(request, "source shard is " + "closed", cause),
            false);
        return;
      }
      onGoingRecoveries.failRecovery(
          recoveryTarget.recoveryId(), new RecoveryFailedException(request, e), true);
    }
  }
  private void applyInitializingShard(
      final ClusterState state,
      final IndexMetaData indexMetaData,
      final ShardRouting shardRouting) {
    final IndexService indexService = indicesService.indexService(shardRouting.index());
    if (indexService == null) {
      // got deleted on us, ignore
      return;
    }
    final RoutingTable routingTable = state.routingTable();
    final DiscoveryNodes nodes = state.getNodes();
    final int shardId = shardRouting.id();

    if (indexService.hasShard(shardId)) {
      IndexShard indexShard = indexService.shardSafe(shardId);
      if (indexShard.state() == IndexShardState.STARTED
          || indexShard.state() == IndexShardState.POST_RECOVERY) {
        // the master thinks we are initializing, but we are already started or on POST_RECOVERY and
        // waiting
        // for master to confirm a shard started message (either master failover, or a cluster event
        // before
        // we managed to tell the master we started), mark us as started
        if (logger.isTraceEnabled()) {
          logger.trace(
              "{} master marked shard as initializing, but shard has state [{}], resending shard started to {}",
              indexShard.shardId(),
              indexShard.state(),
              nodes.masterNode());
        }
        if (nodes.masterNode() != null) {
          shardStateAction.shardStarted(
              shardRouting,
              indexMetaData.getIndexUUID(),
              "master "
                  + nodes.masterNode()
                  + " marked shard as initializing, but shard state is ["
                  + indexShard.state()
                  + "], mark shard as started",
              nodes.masterNode());
        }
        return;
      } else {
        if (indexShard.ignoreRecoveryAttempt()) {
          logger.trace(
              "ignoring recovery instruction for an existing shard {} (shard state: [{}])",
              indexShard.shardId(),
              indexShard.state());
          return;
        }
      }
    }

    // if we're in peer recovery, try to find out the source node now so in case it fails, we will
    // not create the index shard
    DiscoveryNode sourceNode = null;
    if (isPeerRecovery(shardRouting)) {
      sourceNode = findSourceNodeForPeerRecovery(routingTable, nodes, shardRouting);
      if (sourceNode == null) {
        logger.trace(
            "ignoring initializing shard {} - no source node can be found.",
            shardRouting.shardId());
        return;
      }
    }

    // if there is no shard, create it
    if (!indexService.hasShard(shardId)) {
      if (failedShards.containsKey(shardRouting.shardId())) {
        if (nodes.masterNode() != null) {
          shardStateAction.resendShardFailed(
              shardRouting,
              indexMetaData.getIndexUUID(),
              "master "
                  + nodes.masterNode()
                  + " marked shard as initializing, but shard is marked as failed, resend shard failure",
              nodes.masterNode());
        }
        return;
      }
      try {
        if (logger.isDebugEnabled()) {
          logger.debug("[{}][{}] creating shard", shardRouting.index(), shardId);
        }
        IndexShard indexShard = indexService.createShard(shardId, shardRouting.primary());
        indexShard.updateRoutingEntry(
            shardRouting, state.blocks().disableStatePersistence() == false);
        indexShard.addFailedEngineListener(failedEngineHandler);
      } catch (IndexShardAlreadyExistsException e) {
        // ignore this, the method call can happen several times
      } catch (Throwable e) {
        failAndRemoveShard(shardRouting, indexService, true, "failed to create shard", e);
        return;
      }
    }
    final IndexShard indexShard = indexService.shardSafe(shardId);

    if (indexShard.ignoreRecoveryAttempt()) {
      // we are already recovering (we can get to this state since the cluster event can happen
      // several
      // times while we recover)
      logger.trace(
          "ignoring recovery instruction for shard {} (shard state: [{}])",
          indexShard.shardId(),
          indexShard.state());
      return;
    }

    if (isPeerRecovery(shardRouting)) {
      try {

        assert sourceNode != null : "peer recovery started but sourceNode is null";

        // we don't mark this one as relocated at the end.
        // For primaries: requests in any case are routed to both when its relocating and that way
        // we handle
        //    the edge case where its mark as relocated, and we might need to roll it back...
        // For replicas: we are recovering a backup from a primary
        RecoveryState.Type type =
            shardRouting.primary() ? RecoveryState.Type.RELOCATION : RecoveryState.Type.REPLICA;
        recoveryTarget.startRecovery(
            indexShard,
            type,
            sourceNode,
            new PeerRecoveryListener(shardRouting, indexService, indexMetaData));
      } catch (Throwable e) {
        indexShard.failShard("corrupted preexisting index", e);
        handleRecoveryFailure(indexService, shardRouting, true, e);
      }
    } else {
      final IndexShardRoutingTable indexShardRouting =
          routingTable.index(shardRouting.index()).shard(shardRouting.id());
      indexService
          .shard(shardId)
          .recoverFromStore(
              indexShardRouting,
              new StoreRecoveryService.RecoveryListener() {
                @Override
                public void onRecoveryDone() {
                  shardStateAction.shardStarted(
                      shardRouting, indexMetaData.getIndexUUID(), "after recovery from store");
                }

                @Override
                public void onIgnoreRecovery(String reason) {}

                @Override
                public void onRecoveryFailed(IndexShardRecoveryException e) {
                  handleRecoveryFailure(indexService, shardRouting, true, e);
                }
              });
    }
  }
  private void applyNewOrUpdatedShards(final ClusterChangedEvent event) {
    if (!indicesService.changesAllowed()) {
      return;
    }

    RoutingTable routingTable = event.state().routingTable();
    RoutingNodes.RoutingNodeIterator routingNode =
        event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());

    if (routingNode == null) {
      failedShards.clear();
      return;
    }
    DiscoveryNodes nodes = event.state().nodes();

    for (final ShardRouting shardRouting : routingNode) {
      final IndexService indexService = indicesService.indexService(shardRouting.index());
      if (indexService == null) {
        // got deleted on us, ignore
        continue;
      }
      final IndexMetaData indexMetaData = event.state().metaData().index(shardRouting.index());
      if (indexMetaData == null) {
        // the index got deleted on the metadata, we will clean it later in the apply deleted method
        // call
        continue;
      }

      final int shardId = shardRouting.id();

      if (!indexService.hasShard(shardId) && shardRouting.started()) {
        if (failedShards.containsKey(shardRouting.shardId())) {
          if (nodes.masterNode() != null) {
            shardStateAction.resendShardFailed(
                shardRouting,
                indexMetaData.getIndexUUID(),
                "master "
                    + nodes.masterNode()
                    + " marked shard as started, but shard has previous failed. resending shard failure.",
                nodes.masterNode());
          }
        } else {
          // the master thinks we are started, but we don't have this shard at all, mark it as
          // failed
          sendFailShard(
              shardRouting,
              indexMetaData.getIndexUUID(),
              "master ["
                  + nodes.masterNode()
                  + "] marked shard as started, but shard has not been created, mark shard as failed",
              null);
        }
        continue;
      }

      IndexShard indexShard = indexService.shard(shardId);
      if (indexShard != null) {
        ShardRouting currentRoutingEntry = indexShard.routingEntry();
        // if the current and global routing are initializing, but are still not the same, its a
        // different "shard" being allocated
        // for example: a shard that recovers from one node and now needs to recover to another
        // node,
        //              or a replica allocated and then allocating a primary because the primary
        // failed on another node
        boolean shardHasBeenRemoved = false;
        if (currentRoutingEntry.initializing()
            && shardRouting.initializing()
            && !currentRoutingEntry.equals(shardRouting)) {
          logger.debug(
              "[{}][{}] removing shard (different instance of it allocated on this node, current [{}], global [{}])",
              shardRouting.index(),
              shardRouting.id(),
              currentRoutingEntry,
              shardRouting);
          // closing the shard will also cancel any ongoing recovery.
          indexService.removeShard(
              shardRouting.id(),
              "removing shard (different instance of it allocated on this node)");
          shardHasBeenRemoved = true;
        } else if (isPeerRecovery(shardRouting)) {
          final DiscoveryNode sourceNode =
              findSourceNodeForPeerRecovery(routingTable, nodes, shardRouting);
          // check if there is an existing recovery going, and if so, and the source node is not the
          // same, cancel the recovery to restart it
          final Predicate<RecoveryStatus> shouldCancel =
              new Predicate<RecoveryStatus>() {
                @Override
                public boolean apply(@Nullable RecoveryStatus status) {
                  return status.sourceNode().equals(sourceNode) == false;
                }
              };
          if (recoveryTarget.cancelRecoveriesForShard(
              indexShard.shardId(), "recovery source node changed", shouldCancel)) {
            logger.debug(
                "[{}][{}] removing shard (recovery source changed), current [{}], global [{}])",
                shardRouting.index(),
                shardRouting.id(),
                currentRoutingEntry,
                shardRouting);
            // closing the shard will also cancel any ongoing recovery.
            indexService.removeShard(
                shardRouting.id(), "removing shard (recovery source node changed)");
            shardHasBeenRemoved = true;
          }
        }
        if (shardHasBeenRemoved == false
            && (shardRouting.equals(indexShard.routingEntry()) == false
                || shardRouting.version() > indexShard.routingEntry().version())) {
          if (shardRouting.primary()
              && indexShard.routingEntry().primary() == false
              && shardRouting.initializing()
              && indexShard.allowsPrimaryPromotion() == false) {
            logger.debug("{} reinitialize shard on primary promotion", indexShard.shardId());
            indexService.removeShard(shardId, "promoted to primary");
          } else {
            // if we happen to remove the shardRouting by id above we don't need to jump in here!
            indexShard.updateRoutingEntry(
                shardRouting, event.state().blocks().disableStatePersistence() == false);
          }
        }
      }

      if (shardRouting.initializing()) {
        applyInitializingShard(event.state(), indexMetaData, shardRouting);
      }
    }
  }
  @Override
  protected ShardStatus shardOperation(IndexShardStatusRequest request)
      throws ElasticSearchException {
    InternalIndexService indexService =
        (InternalIndexService) indicesService.indexServiceSafe(request.index());
    InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId());
    ShardStatus shardStatus = new ShardStatus(indexShard.routingEntry());
    shardStatus.state = indexShard.state();
    try {
      shardStatus.storeSize = indexShard.store().estimateSize();
    } catch (IOException e) {
      // failure to get the store size...
    }
    if (indexShard.state() == IndexShardState.STARTED) {
      //            shardStatus.estimatedFlushableMemorySize =
      // indexShard.estimateFlushableMemorySize();
      shardStatus.translogId = indexShard.translog().currentId();
      shardStatus.translogOperations = indexShard.translog().estimatedNumberOfOperations();
      Engine.Searcher searcher = indexShard.searcher();
      try {
        shardStatus.docs = new DocsStatus();
        shardStatus.docs.numDocs = searcher.reader().numDocs();
        shardStatus.docs.maxDoc = searcher.reader().maxDoc();
        shardStatus.docs.deletedDocs = searcher.reader().numDeletedDocs();
      } finally {
        searcher.release();
      }

      shardStatus.mergeStats = indexShard.mergeScheduler().stats();
      shardStatus.refreshStats = indexShard.refreshStats();
      shardStatus.flushStats = indexShard.flushStats();
    }

    if (request.recovery) {
      // check on going recovery (from peer or gateway)
      RecoveryStatus peerRecoveryStatus = indexShard.peerRecoveryStatus();
      if (peerRecoveryStatus == null) {
        peerRecoveryStatus = peerRecoveryTarget.peerRecoveryStatus(indexShard.shardId());
      }
      if (peerRecoveryStatus != null) {
        PeerRecoveryStatus.Stage stage;
        switch (peerRecoveryStatus.stage()) {
          case INIT:
            stage = PeerRecoveryStatus.Stage.INIT;
            break;
          case INDEX:
            stage = PeerRecoveryStatus.Stage.INDEX;
            break;
          case TRANSLOG:
            stage = PeerRecoveryStatus.Stage.TRANSLOG;
            break;
          case FINALIZE:
            stage = PeerRecoveryStatus.Stage.FINALIZE;
            break;
          case DONE:
            stage = PeerRecoveryStatus.Stage.DONE;
            break;
          default:
            stage = PeerRecoveryStatus.Stage.INIT;
        }
        shardStatus.peerRecoveryStatus =
            new PeerRecoveryStatus(
                stage,
                peerRecoveryStatus.startTime(),
                peerRecoveryStatus.time(),
                peerRecoveryStatus.phase1TotalSize(),
                peerRecoveryStatus.phase1ExistingTotalSize(),
                peerRecoveryStatus.currentFilesSize(),
                peerRecoveryStatus.currentTranslogOperations());
      }

      IndexShardGatewayService gatewayService =
          indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
      org.elasticsearch.index.gateway.RecoveryStatus gatewayRecoveryStatus =
          gatewayService.recoveryStatus();
      if (gatewayRecoveryStatus != null) {
        GatewayRecoveryStatus.Stage stage;
        switch (gatewayRecoveryStatus.stage()) {
          case INIT:
            stage = GatewayRecoveryStatus.Stage.INIT;
            break;
          case INDEX:
            stage = GatewayRecoveryStatus.Stage.INDEX;
            break;
          case TRANSLOG:
            stage = GatewayRecoveryStatus.Stage.TRANSLOG;
            break;
          case DONE:
            stage = GatewayRecoveryStatus.Stage.DONE;
            break;
          default:
            stage = GatewayRecoveryStatus.Stage.INIT;
        }
        shardStatus.gatewayRecoveryStatus =
            new GatewayRecoveryStatus(
                stage,
                gatewayRecoveryStatus.startTime(),
                gatewayRecoveryStatus.time(),
                gatewayRecoveryStatus.index().totalSize(),
                gatewayRecoveryStatus.index().reusedTotalSize(),
                gatewayRecoveryStatus.index().currentFilesSize(),
                gatewayRecoveryStatus.translog().currentTranslogOperations());
      }
    }

    if (request.snapshot) {
      IndexShardGatewayService gatewayService =
          indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
      SnapshotStatus snapshotStatus = gatewayService.snapshotStatus();
      if (snapshotStatus != null) {
        GatewaySnapshotStatus.Stage stage;
        switch (snapshotStatus.stage()) {
          case DONE:
            stage = GatewaySnapshotStatus.Stage.DONE;
            break;
          case FAILURE:
            stage = GatewaySnapshotStatus.Stage.FAILURE;
            break;
          case TRANSLOG:
            stage = GatewaySnapshotStatus.Stage.TRANSLOG;
            break;
          case FINALIZE:
            stage = GatewaySnapshotStatus.Stage.FINALIZE;
            break;
          case INDEX:
            stage = GatewaySnapshotStatus.Stage.INDEX;
            break;
          default:
            stage = GatewaySnapshotStatus.Stage.NONE;
            break;
        }
        shardStatus.gatewaySnapshotStatus =
            new GatewaySnapshotStatus(
                stage,
                snapshotStatus.startTime(),
                snapshotStatus.time(),
                snapshotStatus.index().totalSize(),
                snapshotStatus.translog().expectedNumberOfOperations());
      }
    }

    return shardStatus;
  }