@Override public void onFailedEngine( final ShardId shardId, final String reason, final @Nullable Throwable failure) { ShardRouting shardRouting = null; final IndexService indexService = indicesService.indexService(shardId.index().name()); if (indexService != null) { IndexShard indexShard = indexService.shard(shardId.id()); if (indexShard != null) { shardRouting = indexShard.routingEntry(); } } if (shardRouting == null) { logger.warn( "[{}][{}] engine failed, but can't find index shard. failure reason: [{}]", failure, shardId.index().name(), shardId.id(), reason); return; } final ShardRouting fShardRouting = shardRouting; threadPool .generic() .execute( new Runnable() { @Override public void run() { synchronized (mutex) { failAndRemoveShard( fShardRouting, indexService, true, "engine failure, reason [" + reason + "]", failure); } } }); }
private void applyNewOrUpdatedShards(final ClusterChangedEvent event) { if (!indicesService.changesAllowed()) { return; } RoutingTable routingTable = event.state().routingTable(); RoutingNodes.RoutingNodeIterator routingNode = event.state().readOnlyRoutingNodes().routingNodeIter(event.state().nodes().localNodeId()); if (routingNode == null) { failedShards.clear(); return; } DiscoveryNodes nodes = event.state().nodes(); for (final ShardRouting shardRouting : routingNode) { final IndexService indexService = indicesService.indexService(shardRouting.index()); if (indexService == null) { // got deleted on us, ignore continue; } final IndexMetaData indexMetaData = event.state().metaData().index(shardRouting.index()); if (indexMetaData == null) { // the index got deleted on the metadata, we will clean it later in the apply deleted method // call continue; } final int shardId = shardRouting.id(); if (!indexService.hasShard(shardId) && shardRouting.started()) { if (failedShards.containsKey(shardRouting.shardId())) { if (nodes.masterNode() != null) { shardStateAction.resendShardFailed( shardRouting, indexMetaData.getIndexUUID(), "master " + nodes.masterNode() + " marked shard as started, but shard has previous failed. resending shard failure.", nodes.masterNode()); } } else { // the master thinks we are started, but we don't have this shard at all, mark it as // failed sendFailShard( shardRouting, indexMetaData.getIndexUUID(), "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null); } continue; } IndexShard indexShard = indexService.shard(shardId); if (indexShard != null) { ShardRouting currentRoutingEntry = indexShard.routingEntry(); // if the current and global routing are initializing, but are still not the same, its a // different "shard" being allocated // for example: a shard that recovers from one node and now needs to recover to another // node, // or a replica allocated and then allocating a primary because the primary // failed on another node boolean shardHasBeenRemoved = false; if (currentRoutingEntry.initializing() && shardRouting.initializing() && !currentRoutingEntry.equals(shardRouting)) { logger.debug( "[{}][{}] removing shard (different instance of it allocated on this node, current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting); // closing the shard will also cancel any ongoing recovery. indexService.removeShard( shardRouting.id(), "removing shard (different instance of it allocated on this node)"); shardHasBeenRemoved = true; } else if (isPeerRecovery(shardRouting)) { final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(routingTable, nodes, shardRouting); // check if there is an existing recovery going, and if so, and the source node is not the // same, cancel the recovery to restart it final Predicate<RecoveryStatus> shouldCancel = new Predicate<RecoveryStatus>() { @Override public boolean apply(@Nullable RecoveryStatus status) { return status.sourceNode().equals(sourceNode) == false; } }; if (recoveryTarget.cancelRecoveriesForShard( indexShard.shardId(), "recovery source node changed", shouldCancel)) { logger.debug( "[{}][{}] removing shard (recovery source changed), current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting); // closing the shard will also cancel any ongoing recovery. indexService.removeShard( shardRouting.id(), "removing shard (recovery source node changed)"); shardHasBeenRemoved = true; } } if (shardHasBeenRemoved == false && (shardRouting.equals(indexShard.routingEntry()) == false || shardRouting.version() > indexShard.routingEntry().version())) { if (shardRouting.primary() && indexShard.routingEntry().primary() == false && shardRouting.initializing() && indexShard.allowsPrimaryPromotion() == false) { logger.debug("{} reinitialize shard on primary promotion", indexShard.shardId()); indexService.removeShard(shardId, "promoted to primary"); } else { // if we happen to remove the shardRouting by id above we don't need to jump in here! indexShard.updateRoutingEntry( shardRouting, event.state().blocks().disableStatePersistence() == false); } } } if (shardRouting.initializing()) { applyInitializingShard(event.state(), indexMetaData, shardRouting); } } }
private void applyMappings(ClusterChangedEvent event) { // go over and update mappings for (IndexMetaData indexMetaData : event.state().metaData()) { if (!indicesService.hasIndex(indexMetaData.index())) { // we only create / update here continue; } List<String> typesToRefresh = Lists.newArrayList(); String index = indexMetaData.index(); IndexService indexService = indicesService.indexService(index); if (indexService == null) { // got deleted on us, ignore (closing the node) return; } try { MapperService mapperService = indexService.mapperService(); // first, go over and update the _default_ mapping (if exists) if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) { boolean requireRefresh = processMapping( index, mapperService, MapperService.DEFAULT_MAPPING, indexMetaData.mapping(MapperService.DEFAULT_MAPPING).source()); if (requireRefresh) { typesToRefresh.add(MapperService.DEFAULT_MAPPING); } } // go over and add the relevant mappings (or update them) for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) { MappingMetaData mappingMd = cursor.value; String mappingType = mappingMd.type(); CompressedXContent mappingSource = mappingMd.source(); if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { // we processed _default_ first continue; } boolean requireRefresh = processMapping(index, mapperService, mappingType, mappingSource); if (requireRefresh) { typesToRefresh.add(mappingType); } } if (!typesToRefresh.isEmpty() && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh( event.state(), new NodeMappingRefreshAction.NodeMappingRefreshRequest( index, indexMetaData.indexUUID(), typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId())); } } catch (Throwable t) { // if we failed the mappings anywhere, we need to fail the shards for this index, note, we // safeguard // by creating the processing the mappings on the master, or on the node the mapping was // introduced on, // so this failure typically means wrong node level configuration or something similar for (IndexShard indexShard : indexService) { ShardRouting shardRouting = indexShard.routingEntry(); failAndRemoveShard(shardRouting, indexService, true, "failed to update mappings", t); } } } }