/**
     * checks for any cluster state blocks. Returns true if operation is OK to proceeded. if false
     * is return, no further action is needed. The method takes care of any continuation, by either
     * responding to the listener or scheduling a retry
     */
    protected boolean checkBlocks() {
      ClusterBlockException blockException = checkGlobalBlock(observer.observedState());
      if (blockException != null) {
        if (blockException.retryable()) {
          logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
          retry(blockException);
        } else {
          finishAsFailed(blockException);
        }
        return false;
      }
      if (resolveIndex()) {
        internalRequest.concreteIndex(
            indexNameExpressionResolver.concreteSingleIndex(
                observer.observedState(), internalRequest.request()));
      } else {
        internalRequest.concreteIndex(internalRequest.request().index());
      }

      resolveRequest(observer.observedState(), internalRequest, listener);

      blockException = checkRequestBlock(observer.observedState(), internalRequest);
      if (blockException != null) {
        if (blockException.retryable()) {
          logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage());
          retry(blockException);
        } else {
          finishAsFailed(blockException);
        }
        return false;
      }
      return true;
    }
 protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) {
   return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.concreteIndex());
 }
    protected boolean doStart() {
      nodes = observer.observedState().nodes();
      try {
        ClusterBlockException blockException = checkGlobalBlock(observer.observedState());
        if (blockException != null) {
          if (blockException.retryable()) {
            retry(blockException);
            return false;
          } else {
            throw blockException;
          }
        }
        internalRequest.concreteIndex(
            observer
                .observedState()
                .metaData()
                .concreteSingleIndex(
                    internalRequest.request().index(), internalRequest.request().indicesOptions()));
        // check if we need to execute, and if not, return
        if (!resolveRequest(observer.observedState(), internalRequest, listener)) {
          return true;
        }
        blockException = checkRequestBlock(observer.observedState(), internalRequest);
        if (blockException != null) {
          if (blockException.retryable()) {
            retry(blockException);
            return false;
          } else {
            throw blockException;
          }
        }
        shardIt = shards(observer.observedState(), internalRequest);
      } catch (Throwable e) {
        listener.onFailure(e);
        return true;
      }

      // no shardIt, might be in the case between index gateway recovery and shardIt initialization
      if (shardIt.size() == 0) {
        retry(null);
        return false;
      }

      // this transport only make sense with an iterator that returns a single shard routing (like
      // primary)
      assert shardIt.size() == 1;

      ShardRouting shard = shardIt.nextOrNull();
      assert shard != null;

      if (!shard.active()) {
        retry(null);
        return false;
      }

      if (!operationStarted.compareAndSet(false, true)) {
        return true;
      }

      internalRequest.request().shardId = shardIt.shardId().id();
      if (shard.currentNodeId().equals(nodes.localNodeId())) {
        internalRequest.request().beforeLocalFork();
        try {
          threadPool
              .executor(executor)
              .execute(
                  new Runnable() {
                    @Override
                    public void run() {
                      try {
                        shardOperation(internalRequest, listener);
                      } catch (Throwable e) {
                        if (retryOnFailure(e)) {
                          operationStarted.set(false);
                          // we already marked it as started when we executed it (removed the
                          // listener) so pass false
                          // to re-add to the cluster listener
                          retry(null);
                        } else {
                          listener.onFailure(e);
                        }
                      }
                    }
                  });
        } catch (Throwable e) {
          if (retryOnFailure(e)) {
            retry(null);
          } else {
            listener.onFailure(e);
          }
        }
      } else {
        DiscoveryNode node = nodes.get(shard.currentNodeId());
        transportService.sendRequest(
            node,
            actionName,
            internalRequest.request(),
            transportOptions(),
            new BaseTransportResponseHandler<Response>() {

              @Override
              public Response newInstance() {
                return newResponse();
              }

              @Override
              public String executor() {
                return ThreadPool.Names.SAME;
              }

              @Override
              public void handleResponse(Response response) {
                listener.onResponse(response);
              }

              @Override
              public void handleException(TransportException exp) {
                // if we got disconnected from the node, or the node / shard is not in the right
                // state (being closed)
                if (exp.unwrapCause() instanceof ConnectTransportException
                    || exp.unwrapCause() instanceof NodeClosedException
                    || retryOnFailure(exp)) {
                  operationStarted.set(false);
                  // we already marked it as started when we executed it (removed the listener) so
                  // pass false
                  // to re-add to the cluster listener
                  retry(null);
                } else {
                  listener.onFailure(exp);
                }
              }
            });
      }
      return true;
    }
    /**
     * the constructor doesn't take any action, just calculates state. Call {@link #run()} to start
     * replicating.
     */
    public ReplicationPhase(
        ShardIterator originalShardIt,
        ReplicaRequest replicaRequest,
        Response finalResponse,
        ClusterStateObserver observer,
        ShardRouting originalPrimaryShard,
        InternalRequest internalRequest,
        ActionListener<Response> listener,
        Releasable indexShardReference,
        TimeValue shardFailedTimeout) {
      this.replicaRequest = replicaRequest;
      this.listener = listener;
      this.finalResponse = finalResponse;
      this.originalPrimaryShard = originalPrimaryShard;
      this.observer = observer;
      indexMetaData = observer.observedState().metaData().index(internalRequest.concreteIndex());
      this.indexShardReference = indexShardReference;
      this.shardFailedTimeout = shardFailedTimeout;

      ShardRouting shard;
      // we double check on the state, if it got changed we need to make sure we take the latest one
      // cause
      // maybe a replica shard started its recovery process and we need to apply it there...

      // we also need to make sure if the new state has a new primary shard (that we indexed to
      // before) started
      // and assigned to another node (while the indexing happened). In that case, we want to apply
      // it on the
      // new primary shard as well...
      ClusterState newState = clusterService.state();

      int numberOfUnassignedOrIgnoredReplicas = 0;
      int numberOfPendingShardInstances = 0;
      if (observer.observedState() != newState) {
        observer.reset(newState);
        shardIt = shards(newState, internalRequest);
        while ((shard = shardIt.nextOrNull()) != null) {
          if (shard.primary()) {
            if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId()) == false) {
              // there is a new primary, we'll have to replicate to it.
              numberOfPendingShardInstances++;
            }
            if (shard.relocating()) {
              numberOfPendingShardInstances++;
            }
          } else if (shouldExecuteReplication(indexMetaData.getSettings()) == false) {
            // If the replicas use shadow replicas, there is no reason to
            // perform the action on the replica, so skip it and
            // immediately return

            // this delays mapping updates on replicas because they have
            // to wait until they get the new mapping through the cluster
            // state, which is why we recommend pre-defined mappings for
            // indices using shadow replicas
            numberOfUnassignedOrIgnoredReplicas++;
          } else if (shard.unassigned()) {
            numberOfUnassignedOrIgnoredReplicas++;
          } else if (shard.relocating()) {
            // we need to send to two copies
            numberOfPendingShardInstances += 2;
          } else {
            numberOfPendingShardInstances++;
          }
        }
      } else {
        shardIt = originalShardIt;
        shardIt.reset();
        while ((shard = shardIt.nextOrNull()) != null) {
          if (shard.unassigned()) {
            numberOfUnassignedOrIgnoredReplicas++;
          } else if (shard.primary()) {
            if (shard.relocating()) {
              // we have to replicate to the other copy
              numberOfPendingShardInstances += 1;
            }
          } else if (shouldExecuteReplication(indexMetaData.getSettings()) == false) {
            // If the replicas use shadow replicas, there is no reason to
            // perform the action on the replica, so skip it and
            // immediately return

            // this delays mapping updates on replicas because they have
            // to wait until they get the new mapping through the cluster
            // state, which is why we recommend pre-defined mappings for
            // indices using shadow replicas
            numberOfUnassignedOrIgnoredReplicas++;
          } else if (shard.relocating()) {
            // we need to send to two copies
            numberOfPendingShardInstances += 2;
          } else {
            numberOfPendingShardInstances++;
          }
        }
      }

      // one for the primary already done
      this.totalShards = 1 + numberOfPendingShardInstances + numberOfUnassignedOrIgnoredReplicas;
      this.pending = new AtomicInteger(numberOfPendingShardInstances);
    }
 /** perform the operation on the node holding the primary */
 void performOnPrimary(final ShardRouting primary, final ShardIterator shardsIt) {
   final String writeConsistencyFailure = checkWriteConsistency(primary);
   if (writeConsistencyFailure != null) {
     retryBecauseUnavailable(primary.shardId(), writeConsistencyFailure);
     return;
   }
   final ReplicationPhase replicationPhase;
   try {
     indexShardReference = getIndexShardOperationsCounter(primary.shardId());
     PrimaryOperationRequest por =
         new PrimaryOperationRequest(
             primary.id(), internalRequest.concreteIndex(), internalRequest.request());
     Tuple<Response, ReplicaRequest> primaryResponse =
         shardOperationOnPrimary(observer.observedState(), por);
     logger.trace("operation completed on primary [{}]", primary);
     replicationPhase =
         new ReplicationPhase(
             shardsIt,
             primaryResponse.v2(),
             primaryResponse.v1(),
             observer,
             primary,
             internalRequest,
             listener,
             indexShardReference,
             shardFailedTimeout);
   } catch (Throwable e) {
     // shard has not been allocated yet, retry it here
     if (retryPrimaryException(e)) {
       logger.trace(
           "had an error while performing operation on primary ({}), scheduling a retry.",
           e.getMessage());
       // We have to close here because when we retry we will increment get a new reference on
       // index shard again and we do not want to
       // increment twice.
       Releasables.close(indexShardReference);
       // We have to reset to null here because whe we retry it might be that we never get to the
       // point where we assign a new reference
       // (for example, in case the operation was rejected because queue is full). In this case
       // we would release again once one of the finish methods is called.
       indexShardReference = null;
       retry(e);
       return;
     }
     if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
       if (logger.isTraceEnabled()) {
         logger.trace(
             primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]",
             e);
       }
     } else {
       if (logger.isDebugEnabled()) {
         logger.debug(
             primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]",
             e);
       }
     }
     finishAsFailed(e);
     return;
   }
   finishAndMoveToReplication(replicationPhase);
 }