private Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData>
      buildShardStores(DiscoveryNodes nodes, MutableShardRouting shard) {
    Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores =
        cachedStores.get(shard.shardId());
    ObjectOpenHashSet<String> nodesIds;
    if (shardStores == null) {
      shardStores = Maps.newHashMap();
      cachedStores.put(shard.shardId(), shardStores);
      nodesIds = ObjectOpenHashSet.from(nodes.dataNodes().keys());
    } else {
      nodesIds = ObjectOpenHashSet.newInstance();
      // clean nodes that have failed
      for (Iterator<DiscoveryNode> it = shardStores.keySet().iterator(); it.hasNext(); ) {
        DiscoveryNode node = it.next();
        if (!nodes.nodeExists(node.id())) {
          it.remove();
        }
      }

      for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
        DiscoveryNode node = cursor.value;
        if (!shardStores.containsKey(node)) {
          nodesIds.add(node.id());
        }
      }
    }

    if (!nodesIds.isEmpty()) {
      String[] nodesIdsArray = nodesIds.toArray(String.class);
      TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData nodesStoreFilesMetaData =
          listShardStoreMetaData
              .list(shard.shardId(), false, nodesIdsArray, listTimeout)
              .actionGet();
      if (logger.isTraceEnabled()) {
        if (nodesStoreFilesMetaData.failures().length > 0) {
          StringBuilder sb =
              new StringBuilder(shard + ": failures when trying to list stores on nodes:");
          for (int i = 0; i < nodesStoreFilesMetaData.failures().length; i++) {
            Throwable cause = ExceptionsHelper.unwrapCause(nodesStoreFilesMetaData.failures()[i]);
            if (cause instanceof ConnectTransportException) {
              continue;
            }
            sb.append("\n    -> ")
                .append(nodesStoreFilesMetaData.failures()[i].getDetailedMessage());
          }
          logger.trace(sb.toString());
        }
      }

      for (TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData nodeStoreFilesMetaData :
          nodesStoreFilesMetaData) {
        if (nodeStoreFilesMetaData.storeFilesMetaData() != null) {
          shardStores.put(
              nodeStoreFilesMetaData.getNode(), nodeStoreFilesMetaData.storeFilesMetaData());
        }
      }
    }

    return shardStores;
  }
 private void handleTransportDisconnect(DiscoveryNode node) {
   if (!latestNodes.nodeExists(node.id())) {
     return;
   }
   NodeFD nodeFD = nodesFD.remove(node);
   if (nodeFD == null) {
     return;
   }
   if (!running) {
     return;
   }
   nodeFD.running = false;
   if (connectOnNetworkDisconnect) {
     try {
       transportService.connectToNode(node);
       nodesFD.put(node, new NodeFD());
       threadPool.schedule(pingInterval, ThreadPool.Names.SAME, new SendPingRequest(node));
     } catch (Exception e) {
       logger.trace("[node  ] [{}] transport disconnected (with verified connect)", node);
       notifyNodeFailure(node, "transport disconnected (with verified connect)");
     }
   } else {
     logger.trace("[node  ] [{}] transport disconnected", node);
     notifyNodeFailure(node, "transport disconnected");
   }
 }
 /** Returns the changes comparing this nodes to the provided nodes. */
 public Delta delta(DiscoveryNodes other) {
   List<DiscoveryNode> removed = newArrayList();
   List<DiscoveryNode> added = newArrayList();
   for (DiscoveryNode node : other) {
     if (!this.nodeExists(node.id())) {
       removed.add(node);
     }
   }
   for (DiscoveryNode node : this) {
     if (!other.nodeExists(node.id())) {
       added.add(node);
     }
   }
   DiscoveryNode previousMasterNode = null;
   DiscoveryNode newMasterNode = null;
   if (masterNodeId != null) {
     if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) {
       previousMasterNode = other.masterNode();
       newMasterNode = masterNode();
     }
   }
   return new Delta(
       previousMasterNode,
       newMasterNode,
       localNodeId,
       ImmutableList.copyOf(removed),
       ImmutableList.copyOf(added));
 }
 @Override
 public void messageReceived(MasterPingRequest request, TransportChannel channel)
     throws Exception {
   DiscoveryNodes nodes = nodesProvider.nodes();
   // check if we are really the same master as the one we seemed to be think we are
   // this can happen if the master got "kill -9" and then another node started using the same
   // port
   if (!request.masterNodeId.equals(nodes.localNodeId())) {
     throw new NotMasterException();
   }
   // if we are no longer master, fail...
   if (!nodes.localNodeMaster()) {
     throw new NoLongerMasterException();
   }
   if (!nodes.nodeExists(request.nodeId)) {
     throw new NodeDoesNotExistOnMasterException();
   }
   // send a response, and note if we are connected to the master or not
   channel.sendResponse(new MasterPingResponseResponse(nodes.nodeExists(request.nodeId)));
 }
 @Override
 public void messageReceived(MasterPingRequest request, TransportChannel channel)
     throws Exception {
   DiscoveryNodes nodes = nodesProvider.nodes();
   // check if we are really the same master as the one we seemed to be think we are
   // this can happen if the master got "kill -9" and then another node started using the same
   // port
   if (!request.masterNodeId.equals(nodes.localNodeId())) {
     throw new ElasticSearchIllegalStateException(
         "Got ping as master with id [" + request.masterNodeId + "], but not master and no id");
   }
   // send a response, and note if we are connected to the master or not
   channel.sendResponse(new MasterPingResponseResponse(nodes.nodeExists(request.nodeId)));
 }
    void performOnReplica(
        final PrimaryResponse<Response, ReplicaRequest> response,
        final AtomicInteger counter,
        final ShardRouting shard,
        String nodeId) {
      // if we don't have that node, it means that it might have failed and will be created again,
      // in
      // this case, we don't have to do the operation, and just let it failover
      if (!nodes.nodeExists(nodeId)) {
        if (counter.decrementAndGet() == 0) {
          listener.onResponse(response.response());
        }
        return;
      }

      final ReplicaOperationRequest shardRequest =
          new ReplicaOperationRequest(shardIt.shardId().id(), response.replicaRequest());
      if (!nodeId.equals(nodes.localNodeId())) {
        DiscoveryNode node = nodes.get(nodeId);
        transportService.sendRequest(
            node,
            transportReplicaAction,
            shardRequest,
            transportOptions,
            new VoidTransportResponseHandler(ThreadPool.Names.SAME) {
              @Override
              public void handleResponse(VoidStreamable vResponse) {
                finishIfPossible();
              }

              @Override
              public void handleException(TransportException exp) {
                if (!ignoreReplicaException(exp.unwrapCause())) {
                  logger.warn(
                      "Failed to perform " + transportAction + " on replica " + shardIt.shardId(),
                      exp);
                  shardStateAction.shardFailed(
                      shard,
                      "Failed to perform ["
                          + transportAction
                          + "] on replica, message ["
                          + detailedMessage(exp)
                          + "]");
                }
                finishIfPossible();
              }

              private void finishIfPossible() {
                if (counter.decrementAndGet() == 0) {
                  listener.onResponse(response.response());
                }
              }
            });
      } else {
        if (request.operationThreaded()) {
          request.beforeLocalFork();
          threadPool
              .executor(executor)
              .execute(
                  new Runnable() {
                    @Override
                    public void run() {
                      try {
                        shardOperationOnReplica(shardRequest);
                      } catch (Exception e) {
                        if (!ignoreReplicaException(e)) {
                          logger.warn(
                              "Failed to perform "
                                  + transportAction
                                  + " on replica "
                                  + shardIt.shardId(),
                              e);
                          shardStateAction.shardFailed(
                              shard,
                              "Failed to perform ["
                                  + transportAction
                                  + "] on replica, message ["
                                  + detailedMessage(e)
                                  + "]");
                        }
                      }
                      if (counter.decrementAndGet() == 0) {
                        listener.onResponse(response.response());
                      }
                    }
                  });
        } else {
          try {
            shardOperationOnReplica(shardRequest);
          } catch (Exception e) {
            if (!ignoreReplicaException(e)) {
              logger.warn(
                  "Failed to perform " + transportAction + " on replica" + shardIt.shardId(), e);
              shardStateAction.shardFailed(
                  shard,
                  "Failed to perform ["
                      + transportAction
                      + "] on replica, message ["
                      + detailedMessage(e)
                      + "]");
            }
          }
          if (counter.decrementAndGet() == 0) {
            listener.onResponse(response.response());
          }
        }
      }
    }
    /** Returns <tt>true</tt> if the action starting to be performed on the primary (or is done). */
    public boolean start(final boolean fromClusterEvent) throws ElasticSearchException {
      final ClusterState clusterState = clusterService.state();
      nodes = clusterState.nodes();
      try {
        ClusterBlockException blockException = checkGlobalBlock(clusterState, request);
        if (blockException != null) {
          if (blockException.retryable()) {
            retry(fromClusterEvent, blockException);
            return false;
          } else {
            throw blockException;
          }
        }
        // check if we need to execute, and if not, return
        if (!resolveRequest(clusterState, request, listener)) {
          return true;
        }
        blockException = checkRequestBlock(clusterState, request);
        if (blockException != null) {
          if (blockException.retryable()) {
            retry(fromClusterEvent, blockException);
            return false;
          } else {
            throw blockException;
          }
        }
        shardIt = shards(clusterState, request);
      } catch (Exception e) {
        listener.onFailure(e);
        return true;
      }

      // no shardIt, might be in the case between index gateway recovery and shardIt initialization
      if (shardIt.size() == 0) {
        retry(fromClusterEvent, null);
        return false;
      }

      boolean foundPrimary = false;
      ShardRouting shardX;
      while ((shardX = shardIt.nextOrNull()) != null) {
        final ShardRouting shard = shardX;
        // we only deal with primary shardIt here...
        if (!shard.primary()) {
          continue;
        }
        if (!shard.active() || !nodes.nodeExists(shard.currentNodeId())) {
          retry(fromClusterEvent, null);
          return false;
        }

        // check here for consistency
        if (checkWriteConsistency) {
          WriteConsistencyLevel consistencyLevel = defaultWriteConsistencyLevel;
          if (request.consistencyLevel() != WriteConsistencyLevel.DEFAULT) {
            consistencyLevel = request.consistencyLevel();
          }
          int requiredNumber = 1;
          if (consistencyLevel == WriteConsistencyLevel.QUORUM && shardIt.size() > 2) {
            // only for more than 2 in the number of shardIt it makes sense, otherwise its 1 shard
            // with 1 replica, quorum is 1 (which is what it is initialized to)
            requiredNumber = (shardIt.size() / 2) + 1;
          } else if (consistencyLevel == WriteConsistencyLevel.ALL) {
            requiredNumber = shardIt.size();
          }

          if (shardIt.sizeActive() < requiredNumber) {
            retry(fromClusterEvent, null);
            return false;
          }
        }

        if (!primaryOperationStarted.compareAndSet(false, true)) {
          return true;
        }

        foundPrimary = true;
        if (shard.currentNodeId().equals(nodes.localNodeId())) {
          if (request.operationThreaded()) {
            request.beforeLocalFork();
            threadPool
                .executor(executor)
                .execute(
                    new Runnable() {
                      @Override
                      public void run() {
                        performOnPrimary(shard.id(), fromClusterEvent, shard, clusterState);
                      }
                    });
          } else {
            performOnPrimary(shard.id(), fromClusterEvent, shard, clusterState);
          }
        } else {
          DiscoveryNode node = nodes.get(shard.currentNodeId());
          transportService.sendRequest(
              node,
              transportAction,
              request,
              transportOptions,
              new BaseTransportResponseHandler<Response>() {

                @Override
                public Response newInstance() {
                  return newResponseInstance();
                }

                @Override
                public String executor() {
                  return ThreadPool.Names.SAME;
                }

                @Override
                public void handleResponse(Response response) {
                  listener.onResponse(response);
                }

                @Override
                public void handleException(TransportException exp) {
                  // if we got disconnected from the node, or the node / shard is not in the right
                  // state (being closed)
                  if (exp.unwrapCause() instanceof ConnectTransportException
                      || exp.unwrapCause() instanceof NodeClosedException
                      || retryPrimaryException(exp)) {
                    primaryOperationStarted.set(false);
                    // we already marked it as started when we executed it (removed the listener) so
                    // pass false
                    // to re-add to the cluster listener
                    retry(false, null);
                  } else {
                    listener.onFailure(exp);
                  }
                }
              });
        }
        break;
      }
      // we should never get here, but here we go
      if (!foundPrimary) {
        retry(fromClusterEvent, null);
        return false;
      }
      return true;
    }