@Override
 public boolean canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
   if (clusterConcurrentRebalance == -1) {
     return true;
   }
   int rebalance = 0;
   for (RoutingNode node : allocation.routingNodes()) {
     for (MutableShardRouting shard : node) {
       if (shard.state() == ShardRoutingState.RELOCATING) {
         rebalance++;
       }
     }
   }
   if (rebalance >= clusterConcurrentRebalance) {
     return false;
   }
   return true;
 }
  @Override
  public void execute(RoutingAllocation allocation) throws ElasticSearchException {
    DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
    boolean found = false;
    for (RoutingNodes.RoutingNodeIterator it =
            allocation.routingNodes().routingNodeIter(discoNode.id());
        it.hasNext(); ) {
      MutableShardRouting shardRouting = it.next();
      if (!shardRouting.shardId().equals(shardId)) {
        continue;
      }
      found = true;
      if (shardRouting.relocatingNodeId() != null) {
        if (shardRouting.initializing()) {
          // the shard is initializing and recovering from another node, simply cancel the recovery
          it.remove();
          // and cancel the relocating state from the shard its being relocated from
          RoutingNode relocatingFromNode =
              allocation.routingNodes().node(shardRouting.relocatingNodeId());
          if (relocatingFromNode != null) {
            for (MutableShardRouting fromShardRouting : relocatingFromNode) {
              if (fromShardRouting.shardId().equals(shardRouting.shardId())
                  && fromShardRouting.state() == RELOCATING) {
                allocation.routingNodes().cancelRelocation(fromShardRouting);
                break;
              }
            }
          }
        } else if (shardRouting.relocating()) {

          // the shard is relocating to another node, cancel the recovery on the other node, and
          // deallocate this one
          if (!allowPrimary && shardRouting.primary()) {
            // can't cancel a primary shard being initialized
            throw new ElasticSearchIllegalArgumentException(
                "[cancel_allocation] can't cancel "
                    + shardId
                    + " on node "
                    + discoNode
                    + ", shard is primary and initializing its state");
          }
          it.moveToUnassigned();
          // now, go and find the shard that is initializing on the target node, and cancel it as
          // well...
          RoutingNodes.RoutingNodeIterator initializingNode =
              allocation.routingNodes().routingNodeIter(shardRouting.relocatingNodeId());
          if (initializingNode != null) {
            while (initializingNode.hasNext()) {
              MutableShardRouting initializingShardRouting = initializingNode.next();
              if (initializingShardRouting.shardId().equals(shardRouting.shardId())
                  && initializingShardRouting.state() == INITIALIZING) {
                initializingNode.remove();
              }
            }
          }
        }
      } else {
        // the shard is not relocating, its either started, or initializing, just cancel it and move
        // on...
        if (!allowPrimary && shardRouting.primary()) {
          // can't cancel a primary shard being initialized
          throw new ElasticSearchIllegalArgumentException(
              "[cancel_allocation] can't cancel "
                  + shardId
                  + " on node "
                  + discoNode
                  + ", shard is primary and started");
        }
        it.remove();
        allocation
            .routingNodes()
            .unassigned()
            .add(
                new MutableShardRouting(
                    shardRouting.index(),
                    shardRouting.id(),
                    null,
                    shardRouting.primary(),
                    ShardRoutingState.UNASSIGNED,
                    shardRouting.version() + 1));
      }
    }
    if (!found) {
      throw new ElasticSearchIllegalArgumentException(
          "[cancel_allocation] can't cancel "
              + shardId
              + ", failed to find it on node "
              + discoNode);
    }
  }
  /**
   * Applies the relevant logic to handle a failed shard. Returns <tt>true</tt> if changes happened
   * that require relocation.
   */
  private boolean applyFailedShard(
      RoutingAllocation allocation, ShardRouting failedShard, boolean addToIgnoreList) {
    // create a copy of the failed shard, since we assume we can change possible references to it
    // without
    // changing the state of failed shard
    failedShard = new ImmutableShardRouting(failedShard);

    IndexRoutingTable indexRoutingTable = allocation.routingTable().index(failedShard.index());
    if (indexRoutingTable == null) {
      return false;
    }

    RoutingNodes routingNodes = allocation.routingNodes();
    boolean dirty = false;
    if (failedShard.relocatingNodeId() != null) {
      // the shard is relocating, either in initializing (recovery from another node) or relocating
      // (moving to another node)
      if (failedShard.state() == INITIALIZING) {
        // the shard is initializing and recovering from another node
        // first, we need to cancel the current node that is being initialized
        RoutingNodes.RoutingNodeIterator initializingNode =
            routingNodes.routingNodeIter(failedShard.currentNodeId());
        if (initializingNode != null) {
          while (initializingNode.hasNext()) {
            MutableShardRouting shardRouting = initializingNode.next();
            if (shardRouting.equals(failedShard)) {
              dirty = true;
              initializingNode.remove();
              if (addToIgnoreList) {
                // make sure we ignore this shard on the relevant node
                allocation.addIgnoreShardForNode(
                    failedShard.shardId(), failedShard.currentNodeId());
              }

              break;
            }
          }
        }
        if (dirty) {
          // now, find the node that we are relocating *from*, and cancel its relocation
          RoutingNode relocatingFromNode = routingNodes.node(failedShard.relocatingNodeId());
          if (relocatingFromNode != null) {
            for (MutableShardRouting shardRouting : relocatingFromNode) {
              if (shardRouting.shardId().equals(failedShard.shardId())
                  && shardRouting.relocating()) {
                dirty = true;
                routingNodes.cancelRelocation(shardRouting);
                break;
              }
            }
          }
        } else {
          logger.debug("failed shard {} not found in routingNodes, ignoring it", failedShard);
        }
        return dirty;
      } else if (failedShard.state() == RELOCATING) {
        // the shard is relocating, meaning its the source the shard is relocating from
        // first, we need to cancel the current relocation from the current node
        // now, find the node that we are recovering from, cancel the relocation, remove it from the
        // node
        // and add it to the unassigned shards list...
        RoutingNodes.RoutingNodeIterator relocatingFromNode =
            routingNodes.routingNodeIter(failedShard.currentNodeId());
        if (relocatingFromNode != null) {
          while (relocatingFromNode.hasNext()) {
            MutableShardRouting shardRouting = relocatingFromNode.next();
            if (shardRouting.equals(failedShard)) {
              dirty = true;
              relocatingFromNode.remove();
              if (addToIgnoreList) {
                // make sure we ignore this shard on the relevant node
                allocation.addIgnoreShardForNode(
                    failedShard.shardId(), failedShard.currentNodeId());
              }

              routingNodes
                  .unassigned()
                  .add(
                      new MutableShardRouting(
                          failedShard.index(),
                          failedShard.id(),
                          null,
                          failedShard.primary(),
                          ShardRoutingState.UNASSIGNED,
                          failedShard.version() + 1));
              break;
            }
          }
        }
        if (dirty) {
          // next, we need to find the target initializing shard that is recovering from, and remove
          // it...
          RoutingNodes.RoutingNodeIterator initializingNode =
              routingNodes.routingNodeIter(failedShard.relocatingNodeId());
          if (initializingNode != null) {
            while (initializingNode.hasNext()) {
              MutableShardRouting shardRouting = initializingNode.next();
              if (shardRouting.shardId().equals(failedShard.shardId())
                  && shardRouting.state() == INITIALIZING) {
                dirty = true;
                initializingNode.remove();
              }
            }
          }
        } else {
          logger.debug("failed shard {} not found in routingNodes, ignoring it", failedShard);
        }
      } else {
        throw new ElasticsearchIllegalStateException(
            "illegal state for a failed shard, relocating node id is set, but state does not match: "
                + failedShard);
      }
    } else {
      // the shard is not relocating, its either started, or initializing, just cancel it and move
      // on...
      RoutingNodes.RoutingNodeIterator node =
          routingNodes.routingNodeIter(failedShard.currentNodeId());
      if (node != null) {
        while (node.hasNext()) {
          MutableShardRouting shardRouting = node.next();
          if (shardRouting.equals(failedShard)) {
            dirty = true;
            if (addToIgnoreList) {
              // make sure we ignore this shard on the relevant node
              allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId());
            }
            node.remove();
            // move all the shards matching the failed shard to the end of the unassigned list
            // so we give a chance for other allocations and won't create poison failed allocations
            // that can keep other shards from being allocated (because of limits applied on how
            // many
            // shards we can start per node)
            List<MutableShardRouting> shardsToMove = Lists.newArrayList();
            for (Iterator<MutableShardRouting> unassignedIt = routingNodes.unassigned().iterator();
                unassignedIt.hasNext(); ) {
              MutableShardRouting unassignedShardRouting = unassignedIt.next();
              if (unassignedShardRouting.shardId().equals(failedShard.shardId())) {
                unassignedIt.remove();
                shardsToMove.add(unassignedShardRouting);
              }
            }
            if (!shardsToMove.isEmpty()) {
              routingNodes.unassigned().addAll(shardsToMove);
            }

            routingNodes
                .unassigned()
                .add(
                    new MutableShardRouting(
                        failedShard.index(),
                        failedShard.id(),
                        null,
                        null,
                        failedShard.restoreSource(),
                        failedShard.primary(),
                        ShardRoutingState.UNASSIGNED,
                        failedShard.version() + 1));

            break;
          }
        }
      }
      if (!dirty) {
        logger.debug("failed shard {} not found in routingNodes, ignoring it", failedShard);
      }
    }
    return dirty;
  }