private AsyncSingleAction(Request request, ActionListener<Response> listener) { this.listener = listener; ClusterState clusterState = clusterService.state(); nodes = clusterState.nodes(); ClusterBlockException blockException = checkGlobalBlock(clusterState); if (blockException != null) { throw blockException; } String concreteSingleIndex; if (resolveIndex(request)) { concreteSingleIndex = clusterState.metaData().concreteSingleIndex(request.index(), request.indicesOptions()); } else { concreteSingleIndex = request.index(); } this.internalRequest = new InternalRequest(request, concreteSingleIndex); blockException = checkRequestBlock(clusterState, internalRequest); if (blockException != null) { throw blockException; } this.shardsIt = shards(clusterState, internalRequest); }
@Override public void messageReceived(Request request, final TransportChannel channel) throws Exception { // we just send back a response, no need to fork a listener request.listenerThreaded(false); // we don't spawn, so if we get a request with no threading, change it to single threaded if (request.operationThreading() == BroadcastOperationThreading.NO_THREADS) { request.operationThreading(BroadcastOperationThreading.SINGLE_THREAD); } execute( request, new ActionListener<Response>() { @Override public void onResponse(Response response) { try { channel.sendResponse(response); } catch (Throwable e) { onFailure(e); } } @Override public void onFailure(Throwable e) { try { channel.sendResponse(e); } catch (Exception e1) { logger.warn("Failed to send response", e1); } } }); }
@Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { // no need to have a threaded listener since we just send back a response request.listenerThreaded(false); // if we have a local operation, execute it on a thread since we don't spawn request.operationThreaded(true); execute( request, new ActionListener<Response>() { @Override public void onResponse(Response result) { try { channel.sendResponse(result); } catch (Exception e) { onFailure(e); } } @Override public void onFailure(Throwable e) { try { channel.sendResponse(e); } catch (Exception e1) { logger.warn("Failed to send response for " + transportAction, e1); } } }); }
protected AsyncBroadcastAction(Request request, ActionListener<Response> listener) { this.request = request; this.listener = listener; clusterState = clusterService.state(); ClusterBlockException blockException = checkGlobalBlock(clusterState, request); if (blockException != null) { throw blockException; } // update to concrete indices String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices()); blockException = checkRequestBlock(clusterState, request, concreteIndices); if (blockException != null) { throw blockException; } nodes = clusterState.nodes(); logger.trace("resolving shards based on cluster state version [{}]", clusterState.version()); shardsIts = shards(clusterState, request, concreteIndices); expectedOps = shardsIts.size(); shardsResponses = new AtomicReferenceArray<Object>(expectedOps); }
AsyncShardOperationAction(Request request, ActionListener<Response> listener) { this.request = request; this.listener = listener; if (request.replicationType() != ReplicationType.DEFAULT) { replicationType = request.replicationType(); } else { replicationType = defaultReplicationType; } }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); request = newRequest(); request.readFrom(in); if (in.getVersion().onOrAfter(Version.V_1_4_0)) { shardId = ShardId.readShardId(in); } else { // older nodes will send the concrete index as part of the request shardId = new ShardId(request.index(), in.readVInt()); } }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); if (out.getVersion().before(Version.V_1_4_0)) { // older nodes expect the concrete index as part of the request request.index(shardId.getIndex()); } request.writeTo(out); if (out.getVersion().onOrAfter(Version.V_1_4_0)) { shardId.writeTo(out); } else { out.writeVInt(shardId.id()); } }
@Override public void messageReceived(Request request, final TransportChannel channel) throws Exception { // we just send back a response, no need to fork a listener request.listenerThreaded(false); execute( request, new ActionListener<Response>() { @Override public void onResponse(Response response) { try { channel.sendResponse(response); } catch (Throwable e) { onFailure(e); } } @Override public void onFailure(Throwable e) { try { channel.sendResponse(e); } catch (Exception e1) { logger.warn("Failed to send response", e1); } } }); }
public void start() { if (shardsIts.size() == 0) { // no shards try { listener.onResponse(newResponse(request, new AtomicReferenceArray(0), clusterState)); } catch (Throwable e) { listener.onFailure(e); } return; } request.beforeStart(); // count the local operations, and perform the non local ones int shardIndex = -1; for (final ShardIterator shardIt : shardsIts) { shardIndex++; final ShardRouting shard = shardIt.nextOrNull(); if (shard != null) { performOperation(shardIt, shard, shardIndex); } else { // really, no shards active in this group onOperation( null, shardIt, shardIndex, new NoShardAvailableActionException(shardIt.shardId())); } } }
void retry(final @Nullable Throwable failure) { if (observer.isTimedOut()) { // we running as a last attempt after a timeout has happened. don't retry Throwable listenFailure = failure; if (listenFailure == null) { if (shardIt == null) { listenFailure = new UnavailableShardsException( new ShardId(request.concreteIndex(), -1), "Timeout waiting for [{}], request: {}", request.timeout(), actionName); } else { listenFailure = new UnavailableShardsException( shardIt.shardId(), "[{}] shardIt, [{}] active : Timeout waiting for [{}], request: {}", shardIt.size(), shardIt.sizeActive(), request.timeout(), actionName); } } listener.onFailure(listenFailure); return; } observer.waitForNextChange( new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { doStart(); } @Override public void onClusterServiceClose() { listener.onFailure(new NodeClosedException(nodes.localNode())); } @Override public void onTimeout(TimeValue timeout) { // just to be on the safe side, see if we can start it now? doStart(); } }, request.timeout()); }
@Override public void execute(Request request, ActionListener<Response> listener) { // since the callback is async, we typically can get called from within an event in the cluster // service // or something similar, so make sure we are threaded so we won't block it. request.listenerThreaded(true); super.execute(request, listener); }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); indicesLevelRequest.writeTo(out); int size = shards.size(); out.writeVInt(size); for (int i = 0; i < size; i++) { shards.get(i).writeTo(out); } out.writeString(nodeId); }
public IndicesOptions indicesOptions() { return indicesLevelRequest.indicesOptions(); }
@Override public String[] indices() { return request.indices(); }
/** Returns <tt>true</tt> if the action starting to be performed on the primary (or is done). */ public boolean start(final boolean fromClusterEvent) throws ElasticSearchException { final ClusterState clusterState = clusterService.state(); nodes = clusterState.nodes(); try { ClusterBlockException blockException = checkGlobalBlock(clusterState, request); if (blockException != null) { if (blockException.retryable()) { retry(fromClusterEvent, blockException); return false; } else { throw blockException; } } // check if we need to execute, and if not, return if (!resolveRequest(clusterState, request, listener)) { return true; } blockException = checkRequestBlock(clusterState, request); if (blockException != null) { if (blockException.retryable()) { retry(fromClusterEvent, blockException); return false; } else { throw blockException; } } shardIt = shards(clusterState, request); } catch (Exception e) { listener.onFailure(e); return true; } // no shardIt, might be in the case between index gateway recovery and shardIt initialization if (shardIt.size() == 0) { retry(fromClusterEvent, null); return false; } boolean foundPrimary = false; ShardRouting shardX; while ((shardX = shardIt.nextOrNull()) != null) { final ShardRouting shard = shardX; // we only deal with primary shardIt here... if (!shard.primary()) { continue; } if (!shard.active() || !nodes.nodeExists(shard.currentNodeId())) { retry(fromClusterEvent, null); return false; } // check here for consistency if (checkWriteConsistency) { WriteConsistencyLevel consistencyLevel = defaultWriteConsistencyLevel; if (request.consistencyLevel() != WriteConsistencyLevel.DEFAULT) { consistencyLevel = request.consistencyLevel(); } int requiredNumber = 1; if (consistencyLevel == WriteConsistencyLevel.QUORUM && shardIt.size() > 2) { // only for more than 2 in the number of shardIt it makes sense, otherwise its 1 shard // with 1 replica, quorum is 1 (which is what it is initialized to) requiredNumber = (shardIt.size() / 2) + 1; } else if (consistencyLevel == WriteConsistencyLevel.ALL) { requiredNumber = shardIt.size(); } if (shardIt.sizeActive() < requiredNumber) { retry(fromClusterEvent, null); return false; } } if (!primaryOperationStarted.compareAndSet(false, true)) { return true; } foundPrimary = true; if (shard.currentNodeId().equals(nodes.localNodeId())) { if (request.operationThreaded()) { request.beforeLocalFork(); threadPool .executor(executor) .execute( new Runnable() { @Override public void run() { performOnPrimary(shard.id(), fromClusterEvent, shard, clusterState); } }); } else { performOnPrimary(shard.id(), fromClusterEvent, shard, clusterState); } } else { DiscoveryNode node = nodes.get(shard.currentNodeId()); transportService.sendRequest( node, transportAction, request, transportOptions, new BaseTransportResponseHandler<Response>() { @Override public Response newInstance() { return newResponseInstance(); } @Override public String executor() { return ThreadPool.Names.SAME; } @Override public void handleResponse(Response response) { listener.onResponse(response); } @Override public void handleException(TransportException exp) { // if we got disconnected from the node, or the node / shard is not in the right // state (being closed) if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException || retryPrimaryException(exp)) { primaryOperationStarted.set(false); // we already marked it as started when we executed it (removed the listener) so // pass false // to re-add to the cluster listener retry(false, null); } else { listener.onFailure(exp); } } }); } break; } // we should never get here, but here we go if (!foundPrimary) { retry(fromClusterEvent, null); return false; } return true; }
public String[] indices() { return indicesLevelRequest.indices(); }
public void start() { this.observer = new ClusterStateObserver(clusterService, request.timeout(), logger); doStart(); }
protected void doStart() { nodes = observer.observedState().nodes(); try { ClusterBlockException blockException = checkGlobalBlock(observer.observedState()); if (blockException != null) { if (blockException.retryable()) { retry(blockException); return; } else { throw blockException; } } request.concreteIndex( indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request)); // check if we need to execute, and if not, return if (!resolveRequest(observer.observedState(), request, listener)) { listener.onFailure( new IllegalStateException( LoggerMessageFormat.format( "{} request {} could not be resolved", new ShardId(request.index, request.shardId), actionName))); return; } blockException = checkRequestBlock(observer.observedState(), request); if (blockException != null) { if (blockException.retryable()) { retry(blockException); return; } else { throw blockException; } } shardIt = shards(observer.observedState(), request); } catch (Throwable e) { listener.onFailure(e); return; } // no shardIt, might be in the case between index gateway recovery and shardIt initialization if (shardIt.size() == 0) { retry(null); return; } // this transport only make sense with an iterator that returns a single shard routing (like // primary) assert shardIt.size() == 1; ShardRouting shard = shardIt.nextOrNull(); assert shard != null; if (!shard.active()) { retry(null); return; } request.shardId = shardIt.shardId().id(); DiscoveryNode node = nodes.get(shard.currentNodeId()); transportService.sendRequest( node, shardActionName, request, transportOptions(), new BaseTransportResponseHandler<Response>() { @Override public Response newInstance() { return newResponse(); } @Override public String executor() { return ThreadPool.Names.SAME; } @Override public void handleResponse(Response response) { listener.onResponse(response); } @Override public void handleException(TransportException exp) { Throwable cause = exp.unwrapCause(); // if we got disconnected from the node, or the node / shard is not in the right state // (being closed) if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || retryOnFailure(exp)) { retry(cause); } else { listener.onFailure(exp); } } }); }
void retry(boolean fromClusterEvent, @Nullable final Throwable failure) { if (!fromClusterEvent) { // make it threaded operation so we fork on the discovery listener thread request.beforeLocalFork(); request.operationThreaded(true); clusterService.add( request.timeout(), new TimeoutClusterStateListener() { @Override public void postAdded() { if (start(true)) { // if we managed to start and perform the operation on the primary, we can remove // this listener clusterService.remove(this); } } @Override public void onClose() { clusterService.remove(this); listener.onFailure(new NodeClosedException(nodes.localNode())); } @Override public void clusterChanged(ClusterChangedEvent event) { if (start(true)) { // if we managed to start and perform the operation on the primary, we can remove // this listener clusterService.remove(this); } } @Override public void onTimeout(TimeValue timeValue) { // just to be on the safe side, see if we can start it now? if (start(true)) { clusterService.remove(this); return; } clusterService.remove(this); Throwable listenerFailure = failure; if (listenerFailure == null) { if (shardIt == null) { listenerFailure = new UnavailableShardsException( null, "no available shards: Timeout waiting for [" + timeValue + "], request: " + request.toString()); } else { listenerFailure = new UnavailableShardsException( shardIt.shardId(), "[" + shardIt.size() + "] shardIt, [" + shardIt.sizeActive() + "] active : Timeout waiting for [" + timeValue + "], request: " + request.toString()); } } listener.onFailure(listenerFailure); } }); } }
protected ClusterBlockException checkRequestBlock(ClusterState state, Request request) { return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.concreteIndex()); }
void performOnReplica( final PrimaryResponse<Response, ReplicaRequest> response, final AtomicInteger counter, final ShardRouting shard, String nodeId) { // if we don't have that node, it means that it might have failed and will be created again, // in // this case, we don't have to do the operation, and just let it failover if (!nodes.nodeExists(nodeId)) { if (counter.decrementAndGet() == 0) { listener.onResponse(response.response()); } return; } final ReplicaOperationRequest shardRequest = new ReplicaOperationRequest(shardIt.shardId().id(), response.replicaRequest()); if (!nodeId.equals(nodes.localNodeId())) { DiscoveryNode node = nodes.get(nodeId); transportService.sendRequest( node, transportReplicaAction, shardRequest, transportOptions, new VoidTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(VoidStreamable vResponse) { finishIfPossible(); } @Override public void handleException(TransportException exp) { if (!ignoreReplicaException(exp.unwrapCause())) { logger.warn( "Failed to perform " + transportAction + " on replica " + shardIt.shardId(), exp); shardStateAction.shardFailed( shard, "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(exp) + "]"); } finishIfPossible(); } private void finishIfPossible() { if (counter.decrementAndGet() == 0) { listener.onResponse(response.response()); } } }); } else { if (request.operationThreaded()) { request.beforeLocalFork(); threadPool .executor(executor) .execute( new Runnable() { @Override public void run() { try { shardOperationOnReplica(shardRequest); } catch (Exception e) { if (!ignoreReplicaException(e)) { logger.warn( "Failed to perform " + transportAction + " on replica " + shardIt.shardId(), e); shardStateAction.shardFailed( shard, "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]"); } } if (counter.decrementAndGet() == 0) { listener.onResponse(response.response()); } } }); } else { try { shardOperationOnReplica(shardRequest); } catch (Exception e) { if (!ignoreReplicaException(e)) { logger.warn( "Failed to perform " + transportAction + " on replica" + shardIt.shardId(), e); shardStateAction.shardFailed( shard, "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]"); } } if (counter.decrementAndGet() == 0) { listener.onResponse(response.response()); } } } }
@Override public IndicesOptions indicesOptions() { return request.indicesOptions(); }
@Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(shardId); request.writeTo(out); }
public void start() { if (shardsIts.size() == 0) { // no shards try { listener.onResponse(newResponse(request, new AtomicReferenceArray(0), clusterState)); } catch (Throwable e) { listener.onFailure(e); } } request.beforeStart(); // count the local operations, and perform the non local ones int localOperations = 0; int shardIndex = -1; for (final ShardIterator shardIt : shardsIts) { shardIndex++; final ShardRouting shard = shardIt.firstOrNull(); if (shard != null) { if (shard.currentNodeId().equals(nodes.localNodeId())) { localOperations++; } else { // do the remote operation here, the localAsync flag is not relevant performOperation(shardIt, shardIndex, true); } } else { // really, no shards active in this group onOperation( null, shardIt, shardIndex, new NoShardAvailableActionException(shardIt.shardId())); } } // we have local operations, perform them now if (localOperations > 0) { if (request.operationThreading() == BroadcastOperationThreading.SINGLE_THREAD) { request.beforeLocalFork(); threadPool .executor(executor) .execute( new Runnable() { @Override public void run() { int shardIndex = -1; for (final ShardIterator shardIt : shardsIts) { shardIndex++; final ShardRouting shard = shardIt.firstOrNull(); if (shard != null) { if (shard.currentNodeId().equals(nodes.localNodeId())) { performOperation(shardIt, shardIndex, false); } } } } }); } else { boolean localAsync = request.operationThreading() == BroadcastOperationThreading.THREAD_PER_SHARD; if (localAsync) { request.beforeLocalFork(); } shardIndex = -1; for (final ShardIterator shardIt : shardsIts) { shardIndex++; final ShardRouting shard = shardIt.firstOrNull(); if (shard != null) { if (shard.currentNodeId().equals(nodes.localNodeId())) { performOperation(shardIt, shardIndex, localAsync); } } } } } }
private void innerExecute( final Request request, final ActionListener<Response> listener, final boolean retrying) { final ClusterState clusterState = clusterService.state(); final DiscoveryNodes nodes = clusterState.nodes(); if (nodes.localNodeMaster() || localExecute(request)) { // check for block, if blocked, retry, else, execute locally final ClusterBlockException blockException = checkBlock(request, clusterState); if (blockException != null) { if (!blockException.retryable()) { listener.onFailure(blockException); return; } clusterService.add( request.masterNodeTimeout(), new TimeoutClusterStateListener() { @Override public void postAdded() { ClusterBlockException blockException = checkBlock(request, clusterService.state()); if (blockException == null || !blockException.retryable()) { clusterService.remove(this); innerExecute(request, listener, false); } } @Override public void onClose() { clusterService.remove(this); listener.onFailure(blockException); } @Override public void onTimeout(TimeValue timeout) { clusterService.remove(this); listener.onFailure(blockException); } @Override public void clusterChanged(ClusterChangedEvent event) { ClusterBlockException blockException = checkBlock(request, event.state()); if (blockException == null || !blockException.retryable()) { clusterService.remove(this); innerExecute(request, listener, false); } } }); } else { threadPool .executor(executor) .execute( new Runnable() { @Override public void run() { try { masterOperation(request, clusterState, listener); } catch (Throwable e) { listener.onFailure(e); } } }); } } else { if (nodes.masterNode() == null) { if (retrying) { listener.onFailure(new MasterNotDiscoveredException()); } else { clusterService.add( request.masterNodeTimeout(), new TimeoutClusterStateListener() { @Override public void postAdded() { ClusterState clusterStateV2 = clusterService.state(); if (clusterStateV2.nodes().masterNodeId() != null) { // now we have a master, try and execute it... clusterService.remove(this); innerExecute(request, listener, true); } } @Override public void onClose() { clusterService.remove(this); listener.onFailure(new NodeClosedException(nodes.localNode())); } @Override public void onTimeout(TimeValue timeout) { clusterService.remove(this); listener.onFailure( new MasterNotDiscoveredException("waited for [" + timeout + "]")); } @Override public void clusterChanged(ClusterChangedEvent event) { if (event.nodesDelta().masterNodeChanged()) { clusterService.remove(this); innerExecute(request, listener, true); } } }); } return; } processBeforeDelegationToMaster(request, clusterState); transportService.sendRequest( nodes.masterNode(), transportAction, request, new BaseTransportResponseHandler<Response>() { @Override public Response newInstance() { return newResponse(); } @Override public void handleResponse(Response response) { listener.onResponse(response); } @Override public String executor() { return ThreadPool.Names.SAME; } @Override public void handleException(final TransportException exp) { if (exp.unwrapCause() instanceof ConnectTransportException) { // we want to retry here a bit to see if a new master is elected clusterService.add( request.masterNodeTimeout(), new TimeoutClusterStateListener() { @Override public void postAdded() { ClusterState clusterStateV2 = clusterService.state(); if (!clusterState .nodes() .masterNodeId() .equals(clusterStateV2.nodes().masterNodeId())) { // master changes while adding the listener, try here clusterService.remove(this); innerExecute(request, listener, false); } } @Override public void onClose() { clusterService.remove(this); listener.onFailure(new NodeClosedException(nodes.localNode())); } @Override public void onTimeout(TimeValue timeout) { clusterService.remove(this); listener.onFailure(new MasterNotDiscoveredException()); } @Override public void clusterChanged(ClusterChangedEvent event) { if (event.nodesDelta().masterNodeChanged()) { clusterService.remove(this); innerExecute(request, listener, false); } } }); } else { listener.onFailure(exp); } } }); } }
/** * Resolves the request, by default, simply setting the concrete index (if its aliased one). If * the resolve means a different execution, then return false here to indicate not to continue and * execute this request. */ protected boolean resolveRequest( ClusterState state, Request request, ActionListener<Response> listener) { request.index(state.metaData().concreteIndex(request.index())); return true; }
@Override public void readFrom(StreamInput in) throws IOException { shardId = in.readVInt(); request = newRequestInstance(); request.readFrom(in); }