private void applyCleanedIndices(final ClusterChangedEvent event) {
   // handle closed indices, since they are not allocated on a node once they are closed
   // so applyDeletedIndices might not take them into account
   for (IndexService indexService : indicesService) {
     String index = indexService.index().getName();
     IndexMetaData indexMetaData = event.state().metaData().index(index);
     if (indexMetaData != null && indexMetaData.state() == IndexMetaData.State.CLOSE) {
       for (Integer shardId : indexService.shardIds()) {
         logger.debug("[{}][{}] removing shard (index is closed)", index, shardId);
         try {
           indexService.removeShard(shardId, "removing shard (index is closed)");
         } catch (Throwable e) {
           logger.warn("[{}] failed to remove shard (index is closed)", e, index);
         }
       }
     }
   }
   for (IndexService indexService : indicesService) {
     String index = indexService.index().getName();
     if (indexService.shardIds().isEmpty()) {
       if (logger.isDebugEnabled()) {
         logger.debug("[{}] cleaning index (no shards allocated)", index);
       }
       // clean the index
       removeIndex(index, "removing index (no shards allocated)");
     }
   }
 }
 private void applyDeletedShards(final ClusterChangedEvent event) {
   RoutingNodes.RoutingNodeIterator routingNode =
       event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
   if (routingNode == null) {
     return;
   }
   IntHashSet newShardIds = new IntHashSet();
   for (IndexService indexService : indicesService) {
     String index = indexService.index().name();
     IndexMetaData indexMetaData = event.state().metaData().index(index);
     if (indexMetaData == null) {
       continue;
     }
     // now, go over and delete shards that needs to get deleted
     newShardIds.clear();
     for (ShardRouting shard : routingNode) {
       if (shard.index().equals(index)) {
         newShardIds.add(shard.id());
       }
     }
     for (Integer existingShardId : indexService.shardIds()) {
       if (!newShardIds.contains(existingShardId)) {
         if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
           if (logger.isDebugEnabled()) {
             logger.debug("[{}][{}] removing shard (index is closed)", index, existingShardId);
           }
           indexService.removeShard(existingShardId, "removing shard (index is closed)");
         } else {
           // we can just remove the shard, without cleaning it locally, since we will clean it
           // when all shards are allocated in the IndicesStore
           if (logger.isDebugEnabled()) {
             logger.debug("[{}][{}] removing shard (not allocated)", index, existingShardId);
           }
           indexService.removeShard(existingShardId, "removing shard (not allocated)");
         }
       }
     }
   }
 }
 public synchronized void close(final String reason, boolean delete) throws IOException {
   if (closed.compareAndSet(false, true)) {
     deleted.compareAndSet(false, delete);
     try {
       final Set<Integer> shardIds = shardIds();
       for (final int shardId : shardIds) {
         try {
           removeShard(shardId, reason);
         } catch (Exception e) {
           logger.warn("failed to close shard", e);
         }
       }
     } finally {
       IOUtils.close(
           bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask);
     }
   }
 }
  @Override
  public void clusterChanged(final ClusterChangedEvent event) {
    if (!indicesService.changesAllowed()) {
      return;
    }

    if (!lifecycle.started()) {
      return;
    }

    synchronized (mutex) {
      // we need to clean the shards and indices we have on this node, since we
      // are going to recover them again once state persistence is disabled (no master / not
      // recovered)
      // TODO: this feels a bit hacky here, a block disables state persistence, and then we clean
      // the allocated shards, maybe another flag in blocks?
      if (event.state().blocks().disableStatePersistence()) {
        for (IndexService indexService : indicesService) {
          String index = indexService.index().getName();
          for (Integer shardId : indexService.shardIds()) {
            logger.debug("[{}][{}] removing shard (disabled block persistence)", index, shardId);
            try {
              indexService.removeShard(shardId, "removing shard (disabled block persistence)");
            } catch (Throwable e) {
              logger.warn("[{}] failed to remove shard (disabled block persistence)", e, index);
            }
          }
          removeIndex(index, "cleaning index (disabled block persistence)");
        }
        return;
      }

      cleanFailedShards(event);

      applyDeletedIndices(event);
      applyNewIndices(event);
      applyMappings(event);
      applyAliases(event);
      applyNewOrUpdatedShards(event);
      applyDeletedShards(event);
      applyCleanedIndices(event);
      applySettings(event);
    }
  }
 private void failAndRemoveShard(
     ShardRouting shardRouting,
     IndexService indexService,
     boolean sendShardFailure,
     String message,
     @Nullable Throwable failure) {
   if (indexService.hasShard(shardRouting.getId())) {
     try {
       indexService.removeShard(shardRouting.getId(), message);
     } catch (ShardNotFoundException e) {
       // the node got closed on us, ignore it
     } catch (Throwable e1) {
       logger.warn(
           "[{}][{}] failed to remove shard after failure ([{}])",
           e1,
           shardRouting.getIndex(),
           shardRouting.getId(),
           message);
     }
   }
   if (sendShardFailure) {
     sendFailShard(shardRouting, indexService.indexUUID(), message, failure);
   }
 }
  private void applyNewOrUpdatedShards(final ClusterChangedEvent event) {
    if (!indicesService.changesAllowed()) {
      return;
    }

    RoutingTable routingTable = event.state().routingTable();
    RoutingNodes.RoutingNodeIterator routingNode =
        event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());

    if (routingNode == null) {
      failedShards.clear();
      return;
    }
    DiscoveryNodes nodes = event.state().nodes();

    for (final ShardRouting shardRouting : routingNode) {
      final IndexService indexService = indicesService.indexService(shardRouting.index());
      if (indexService == null) {
        // got deleted on us, ignore
        continue;
      }
      final IndexMetaData indexMetaData = event.state().metaData().index(shardRouting.index());
      if (indexMetaData == null) {
        // the index got deleted on the metadata, we will clean it later in the apply deleted method
        // call
        continue;
      }

      final int shardId = shardRouting.id();

      if (!indexService.hasShard(shardId) && shardRouting.started()) {
        if (failedShards.containsKey(shardRouting.shardId())) {
          if (nodes.masterNode() != null) {
            shardStateAction.resendShardFailed(
                shardRouting,
                indexMetaData.getIndexUUID(),
                "master "
                    + nodes.masterNode()
                    + " marked shard as started, but shard has previous failed. resending shard failure.",
                nodes.masterNode());
          }
        } else {
          // the master thinks we are started, but we don't have this shard at all, mark it as
          // failed
          sendFailShard(
              shardRouting,
              indexMetaData.getIndexUUID(),
              "master ["
                  + nodes.masterNode()
                  + "] marked shard as started, but shard has not been created, mark shard as failed",
              null);
        }
        continue;
      }

      IndexShard indexShard = indexService.shard(shardId);
      if (indexShard != null) {
        ShardRouting currentRoutingEntry = indexShard.routingEntry();
        // if the current and global routing are initializing, but are still not the same, its a
        // different "shard" being allocated
        // for example: a shard that recovers from one node and now needs to recover to another
        // node,
        //              or a replica allocated and then allocating a primary because the primary
        // failed on another node
        boolean shardHasBeenRemoved = false;
        if (currentRoutingEntry.initializing()
            && shardRouting.initializing()
            && !currentRoutingEntry.equals(shardRouting)) {
          logger.debug(
              "[{}][{}] removing shard (different instance of it allocated on this node, current [{}], global [{}])",
              shardRouting.index(),
              shardRouting.id(),
              currentRoutingEntry,
              shardRouting);
          // closing the shard will also cancel any ongoing recovery.
          indexService.removeShard(
              shardRouting.id(),
              "removing shard (different instance of it allocated on this node)");
          shardHasBeenRemoved = true;
        } else if (isPeerRecovery(shardRouting)) {
          final DiscoveryNode sourceNode =
              findSourceNodeForPeerRecovery(routingTable, nodes, shardRouting);
          // check if there is an existing recovery going, and if so, and the source node is not the
          // same, cancel the recovery to restart it
          final Predicate<RecoveryStatus> shouldCancel =
              new Predicate<RecoveryStatus>() {
                @Override
                public boolean apply(@Nullable RecoveryStatus status) {
                  return status.sourceNode().equals(sourceNode) == false;
                }
              };
          if (recoveryTarget.cancelRecoveriesForShard(
              indexShard.shardId(), "recovery source node changed", shouldCancel)) {
            logger.debug(
                "[{}][{}] removing shard (recovery source changed), current [{}], global [{}])",
                shardRouting.index(),
                shardRouting.id(),
                currentRoutingEntry,
                shardRouting);
            // closing the shard will also cancel any ongoing recovery.
            indexService.removeShard(
                shardRouting.id(), "removing shard (recovery source node changed)");
            shardHasBeenRemoved = true;
          }
        }
        if (shardHasBeenRemoved == false
            && (shardRouting.equals(indexShard.routingEntry()) == false
                || shardRouting.version() > indexShard.routingEntry().version())) {
          if (shardRouting.primary()
              && indexShard.routingEntry().primary() == false
              && shardRouting.initializing()
              && indexShard.allowsPrimaryPromotion() == false) {
            logger.debug("{} reinitialize shard on primary promotion", indexShard.shardId());
            indexService.removeShard(shardId, "promoted to primary");
          } else {
            // if we happen to remove the shardRouting by id above we don't need to jump in here!
            indexShard.updateRoutingEntry(
                shardRouting, event.state().blocks().disableStatePersistence() == false);
          }
        }
      }

      if (shardRouting.initializing()) {
        applyInitializingShard(event.state(), indexMetaData, shardRouting);
      }
    }
  }