/** Cleans dangling indices if they are already allocated on the provided meta data. */ void cleanupAllocatedDangledIndices(MetaData metaData) { for (String danglingIndex : danglingIndices.keySet()) { if (metaData.hasIndex(danglingIndex)) { logger.debug( "[{}] no longer dangling (created), removing from dangling list", danglingIndex); danglingIndices.remove(danglingIndex); } } }
public void validate(RoutingTableValidation validation, MetaData metaData) { if (!metaData.hasIndex(index())) { validation.addIndexFailure(index(), "Exists in routing does not exists in metadata"); return; } IndexMetaData indexMetaData = metaData.index(index()); for (String failure : validate(indexMetaData)) { validation.addIndexFailure(index, failure); } }
@Override protected void resolveRequest( ClusterState state, InternalRequest request, ActionListener<IndexResponse> indexResponseActionListener) { MetaData metaData = clusterService.state().metaData(); MappingMetaData mappingMd = null; if (metaData.hasIndex(request.concreteIndex())) { mappingMd = metaData.index(request.concreteIndex()).mappingOrDefault(request.request().type()); } request.request().process(metaData, mappingMd, allowIdGeneration, request.concreteIndex()); }
/** * Finds new dangling indices by iterating over the indices and trying to find indices that have * state on disk, but are not part of the provided meta data, or not detected as dangled already. */ Map<String, IndexMetaData> findNewDanglingIndices(MetaData metaData) { final Set<String> indices; try { indices = nodeEnv.findAllIndices(); } catch (Throwable e) { logger.warn("failed to list dangling indices", e); return ImmutableMap.of(); } Map<String, IndexMetaData> newIndices = Maps.newHashMap(); for (String indexName : indices) { if (metaData.hasIndex(indexName) == false && danglingIndices.containsKey(indexName) == false) { try { IndexMetaData indexMetaData = metaStateService.loadIndexState(indexName); if (indexMetaData != null) { logger.info( "[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", indexName); if (!indexMetaData.index().equals(indexName)) { logger.info( "dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index()); indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build(); } newIndices.put(indexName, indexMetaData); } else { logger.debug("[{}] dangling index directory detected, but no state found", indexName); } } catch (Throwable t) { logger.warn("[{}] failed to load index state for detected dangled index", t, indexName); } } } return newIndices; }
@Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... this.currentMetaData = null; return; } MetaData newMetaData = event.state().metaData(); // delete indices that were there before, but are deleted now // we need to do it so they won't be detected as dangling if (currentMetaData != null) { // only delete indices when we already received a state (currentMetaData != null) for (IndexMetaData current : currentMetaData) { if (!newMetaData.hasIndex(current.index())) { logger.debug( "[{}] deleting index that is no longer part of the metadata (indices: [{}])", current.index(), newMetaData.indices().keys()); if (nodeEnv.hasNodeFile()) { FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index()))); } try { nodeIndexDeletedAction.nodeIndexStoreDeleted( event.state(), current.index(), event.state().nodes().localNodeId()); } catch (Exception e) { logger.debug( "[{}] failed to notify master on local index store deletion", e, current.index()); } } } } currentMetaData = newMetaData; }
@Override protected void doExecute( final IndexRequest indexRequest, final ActionListener<IndexResponse> listener) { final ClusterState clusterState = clusterService.state(); clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE); try { MetaData metaData = clusterState.metaData(); indexRequest.index( clusterState .metaData() .concreteSingleIndex(indexRequest.index(), indexRequest.indicesOptions())); MappingMetaData mappingMd = null; if (metaData.hasIndex(indexRequest.index())) { mappingMd = metaData.index(indexRequest.index()).mappingOrDefault(indexRequest.type()); } indexRequest.process(metaData, indexRequest.index(), mappingMd, allowIdGeneration); } catch (Throwable e) { logger.error(e.getMessage(), e); listener.onFailure(e); return; } final AtomicInteger counter = new AtomicInteger(1); final IndexResponse response = new IndexResponse(); transportLeaderShardIndexAction.execute( indexRequest, new ActionListener<IndexResponse>() { @Override public void onResponse(IndexResponse indexResponse) { int quorumShards = indexResponse.getQuorumShards(); response .setIndex(indexRequest.index()) .setType(indexRequest.type()) .setId(indexRequest.id()) .setVersion(indexResponse.getVersion()) .setQuorumShards(quorumShards); ShardId shardId = clusterService .operationRouting() .indexShards( clusterService.state(), indexRequest.index(), indexRequest.type(), indexRequest.id(), indexRequest.routing()) .shardId(); if (quorumShards < 0) { response.setFailure(new IndexActionFailure(shardId, "quorum not reached")); } else if (quorumShards > 0) { counter.incrementAndGet(); IndexReplicaShardRequest indexReplicaShardRequest = new IndexReplicaShardRequest(shardId, indexRequest); transportReplicaShardIndexAction.execute( indexReplicaShardRequest, new ActionListener< TransportReplicaShardOperationAction.ReplicaOperationResponse>() { @Override public void onResponse( TransportReplicaShardOperationAction.ReplicaOperationResponse replicaOperationResponse) { response.addReplicaResponses(replicaOperationResponse.responses()); if (counter.decrementAndGet() == 0) { listener.onResponse(response); } } @Override public void onFailure(Throwable e) { logger.error(e.getMessage(), e); listener.onFailure(e); } }); } if (counter.decrementAndGet() == 0) { listener.onResponse(response); } } @Override public void onFailure(Throwable e) { logger.error(e.getMessage(), e); listener.onFailure(e); } }); }
private void executeBulk( final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses) { final ClusterState clusterState = clusterService.state(); // TODO use timeout to wait here if its blocked... clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE); final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver); MetaData metaData = clusterState.metaData(); for (int i = 0; i < bulkRequest.requests.size(); i++) { ActionRequest request = bulkRequest.requests.get(i); if (request instanceof DocumentRequest) { DocumentRequest req = (DocumentRequest) request; if (addFailureIfIndexIsUnavailable( req, bulkRequest, responses, i, concreteIndices, metaData)) { continue; } String concreteIndex = concreteIndices.resolveIfAbsent(req); if (request instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request; MappingMetaData mappingMd = null; if (metaData.hasIndex(concreteIndex)) { mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type()); } try { indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex); } catch (ElasticsearchParseException | RoutingMissingException e) { BulkItemResponse.Failure failure = new BulkItemResponse.Failure( concreteIndex, indexRequest.type(), indexRequest.id(), e); BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure); responses.set(i, bulkItemResponse); // make sure the request gets never processed again bulkRequest.requests.set(i, null); } } else { concreteIndices.resolveIfAbsent(req); req.routing( clusterState .metaData() .resolveIndexRouting(req.parent(), req.routing(), req.index())); } } } // first, go over all the requests and create a ShardId -> Operations mapping Map<ShardId, List<BulkItemRequest>> requestsByShard = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { ActionRequest request = bulkRequest.requests.get(i); if (request instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request; String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()); ShardId shardId = clusterService .operationRouting() .indexShards( clusterState, concreteIndex, indexRequest.type(), indexRequest.id(), indexRequest.routing()) .shardId(); List<BulkItemRequest> list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); requestsByShard.put(shardId, list); } list.add(new BulkItemRequest(i, request)); } else if (request instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) request; String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()); MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(deleteRequest.type()); if (mappingMd != null && mappingMd.routing().required() && deleteRequest.routing() == null) { // if routing is required, and no routing on the delete request, we need to broadcast // it.... GroupShardsIterator groupShards = clusterService.operationRouting().broadcastDeleteShards(clusterState, concreteIndex); for (ShardIterator shardIt : groupShards) { List<BulkItemRequest> list = requestsByShard.get(shardIt.shardId()); if (list == null) { list = new ArrayList<>(); requestsByShard.put(shardIt.shardId(), list); } list.add(new BulkItemRequest(i, deleteRequest)); } } else { ShardId shardId = clusterService .operationRouting() .indexShards( clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()) .shardId(); List<BulkItemRequest> list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); requestsByShard.put(shardId, list); } list.add(new BulkItemRequest(i, request)); } } else if (request instanceof UpdateRequest) { UpdateRequest updateRequest = (UpdateRequest) request; String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()); MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(updateRequest.type()); if (mappingMd != null && mappingMd.routing().required() && updateRequest.routing() == null) { BulkItemResponse.Failure failure = new BulkItemResponse.Failure( updateRequest.index(), updateRequest.type(), updateRequest.id(), new IllegalArgumentException("routing is required for this item")); responses.set(i, new BulkItemResponse(i, updateRequest.type(), failure)); continue; } ShardId shardId = clusterService .operationRouting() .indexShards( clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()) .shardId(); List<BulkItemRequest> list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); requestsByShard.put(shardId, list); } list.add(new BulkItemRequest(i, request)); } } if (requestsByShard.isEmpty()) { listener.onResponse( new BulkResponse( responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime))); return; } final AtomicInteger counter = new AtomicInteger(requestsByShard.size()); for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List<BulkItemRequest> requests = entry.getValue(); BulkShardRequest bulkShardRequest = new BulkShardRequest( bulkRequest, shardId, bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()])); bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel()); bulkShardRequest.timeout(bulkRequest.timeout()); shardBulkAction.execute( bulkShardRequest, new ActionListener<BulkShardResponse>() { @Override public void onResponse(BulkShardResponse bulkShardResponse) { for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { // we may have no response if item failed if (bulkItemResponse.getResponse() != null) { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); } responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } if (counter.decrementAndGet() == 0) { finishHim(); } } @Override public void onFailure(Throwable e) { // create failures for all relevant requests for (BulkItemRequest request : requests) { if (request.request() instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request.request(); responses.set( request.id(), new BulkItemResponse( request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH), new BulkItemResponse.Failure( concreteIndices.getConcreteIndex(indexRequest.index()), indexRequest.type(), indexRequest.id(), e))); } else if (request.request() instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) request.request(); responses.set( request.id(), new BulkItemResponse( request.id(), "delete", new BulkItemResponse.Failure( concreteIndices.getConcreteIndex(deleteRequest.index()), deleteRequest.type(), deleteRequest.id(), e))); } else if (request.request() instanceof UpdateRequest) { UpdateRequest updateRequest = (UpdateRequest) request.request(); responses.set( request.id(), new BulkItemResponse( request.id(), "update", new BulkItemResponse.Failure( concreteIndices.getConcreteIndex(updateRequest.index()), updateRequest.type(), updateRequest.id(), e))); } } if (counter.decrementAndGet() == 0) { finishHim(); } } private void finishHim() { listener.onResponse( new BulkResponse( responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime))); } }); } }
@Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... this.currentMetaData = null; return; } MetaData newMetaData = event.state().metaData(); // we don't check if metaData changed, since we might be called several times and we need to // check dangling... boolean success = true; // only applied to master node, writing the global and index level states if (event.state().nodes().localNode().masterNode()) { // check if the global state changed? if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) { try { writeGlobalState("changed", newMetaData, currentMetaData); } catch (Exception e) { success = false; } } // check and write changes in indices for (IndexMetaData indexMetaData : newMetaData) { String writeReason = null; IndexMetaData currentIndexMetaData; if (currentMetaData == null) { // a new event..., check from the state stored currentIndexMetaData = loadIndex(indexMetaData.index()); } else { currentIndexMetaData = currentMetaData.index(indexMetaData.index()); } if (currentIndexMetaData == null) { writeReason = "freshly created"; } else if (currentIndexMetaData.version() != indexMetaData.version()) { writeReason = "version changed from [" + currentIndexMetaData.version() + "] to [" + indexMetaData.version() + "]"; } // we update the writeReason only if we really need to write it if (writeReason == null) { continue; } try { writeIndex(writeReason, indexMetaData, currentIndexMetaData); } catch (Exception e) { success = false; } } } // delete indices that were there before, but are deleted now // we need to do it so they won't be detected as dangling if (nodeEnv.hasNodeFile()) { if (currentMetaData != null) { // only delete indices when we already received a state (currentMetaData != null) // and we had a go at processing dangling indices at least once // this will also delete the _state of the index itself for (IndexMetaData current : currentMetaData) { if (danglingIndices.containsKey(current.index())) { continue; } if (!newMetaData.hasIndex(current.index())) { logger.debug( "[{}] deleting index that is no longer part of the metadata (indices: [{}])", current.index(), newMetaData.indices().keySet()); FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index()))); } } } } // handle dangling indices, we handle those for all nodes that have a node file (data or master) if (nodeEnv.hasNodeFile()) { if (danglingTimeout.millis() >= 0) { synchronized (danglingMutex) { for (String danglingIndex : danglingIndices.keySet()) { if (newMetaData.hasIndex(danglingIndex)) { logger.debug("[{}] no longer dangling (created), removing", danglingIndex); DanglingIndex removed = danglingIndices.remove(danglingIndex); removed.future.cancel(false); } } // delete indices that are no longer part of the metadata try { for (String indexName : nodeEnv.findAllIndices()) { // if we have the index on the metadata, don't delete it if (newMetaData.hasIndex(indexName)) { continue; } if (danglingIndices.containsKey(indexName)) { // already dangling, continue continue; } IndexMetaData indexMetaData = loadIndex(indexName); if (indexMetaData != null) { if (danglingTimeout.millis() == 0) { logger.info( "[{}] dangling index, exists on local file system, but not in cluster metadata, timeout set to 0, deleting now", indexName); FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(indexName))); } else { logger.info( "[{}] dangling index, exists on local file system, but not in cluster metadata, scheduling to delete in [{}], auto import to cluster state [{}]", indexName, danglingTimeout, autoImportDangled); danglingIndices.put( indexName, new DanglingIndex( indexName, threadPool.schedule( danglingTimeout, ThreadPool.Names.SAME, new RemoveDanglingIndex(indexName)))); } } } } catch (Exception e) { logger.warn("failed to find dangling indices", e); } } } if (autoImportDangled.shouldImport() && !danglingIndices.isEmpty()) { final List<IndexMetaData> dangled = Lists.newArrayList(); for (String indexName : danglingIndices.keySet()) { IndexMetaData indexMetaData = loadIndex(indexName); if (indexMetaData == null) { logger.debug("failed to find state for dangling index [{}]", indexName); continue; } // we might have someone copying over an index, renaming the directory, handle that if (!indexMetaData.index().equals(indexName)) { logger.info( "dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index()); indexMetaData = IndexMetaData.newIndexMetaDataBuilder(indexMetaData).index(indexName).build(); } if (autoImportDangled == AutoImportDangledState.CLOSED) { indexMetaData = IndexMetaData.newIndexMetaDataBuilder(indexMetaData) .state(IndexMetaData.State.CLOSE) .build(); } if (indexMetaData != null) { dangled.add(indexMetaData); } } IndexMetaData[] dangledIndices = dangled.toArray(new IndexMetaData[dangled.size()]); try { allocateDangledIndices.allocateDangled( dangledIndices, new LocalAllocateDangledIndices.Listener() { @Override public void onResponse( LocalAllocateDangledIndices.AllocateDangledResponse response) { logger.trace("allocated dangled"); } @Override public void onFailure(Throwable e) { logger.info("failed to send allocated dangled", e); } }); } catch (Exception e) { logger.warn("failed to send allocate dangled", e); } } } if (success) { currentMetaData = newMetaData; } }