@Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { // no need to have a threaded listener since we just send back a response request.listenerThreaded(false); // if we have a local operation, execute it on a thread since we don't spawn request.operationThreaded(true); execute( request, new ActionListener<Response>() { @Override public void onResponse(Response result) { try { channel.sendResponse(result); } catch (Exception e) { onFailure(e); } } @Override public void onFailure(Throwable e) { try { channel.sendResponse(e); } catch (Exception e1) { logger.warn("Failed to send response for " + transportAction, e1); } } }); }
AsyncShardOperationAction(Request request, ActionListener<Response> listener) { this.request = request; this.listener = listener; if (request.replicationType() != ReplicationType.DEFAULT) { replicationType = request.replicationType(); } else { replicationType = defaultReplicationType; } }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); indicesLevelRequest.writeTo(out); int size = shards.size(); out.writeVInt(size); for (int i = 0; i < size; i++) { shards.get(i).writeTo(out); } out.writeString(nodeId); }
public IndicesOptions indicesOptions() { return indicesLevelRequest.indicesOptions(); }
public String[] indices() { return indicesLevelRequest.indices(); }
void performOnReplica( final PrimaryResponse<Response, ReplicaRequest> response, final AtomicInteger counter, final ShardRouting shard, String nodeId) { // if we don't have that node, it means that it might have failed and will be created again, // in // this case, we don't have to do the operation, and just let it failover if (!nodes.nodeExists(nodeId)) { if (counter.decrementAndGet() == 0) { listener.onResponse(response.response()); } return; } final ReplicaOperationRequest shardRequest = new ReplicaOperationRequest(shardIt.shardId().id(), response.replicaRequest()); if (!nodeId.equals(nodes.localNodeId())) { DiscoveryNode node = nodes.get(nodeId); transportService.sendRequest( node, transportReplicaAction, shardRequest, transportOptions, new VoidTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(VoidStreamable vResponse) { finishIfPossible(); } @Override public void handleException(TransportException exp) { if (!ignoreReplicaException(exp.unwrapCause())) { logger.warn( "Failed to perform " + transportAction + " on replica " + shardIt.shardId(), exp); shardStateAction.shardFailed( shard, "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(exp) + "]"); } finishIfPossible(); } private void finishIfPossible() { if (counter.decrementAndGet() == 0) { listener.onResponse(response.response()); } } }); } else { if (request.operationThreaded()) { request.beforeLocalFork(); threadPool .executor(executor) .execute( new Runnable() { @Override public void run() { try { shardOperationOnReplica(shardRequest); } catch (Exception e) { if (!ignoreReplicaException(e)) { logger.warn( "Failed to perform " + transportAction + " on replica " + shardIt.shardId(), e); shardStateAction.shardFailed( shard, "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]"); } } if (counter.decrementAndGet() == 0) { listener.onResponse(response.response()); } } }); } else { try { shardOperationOnReplica(shardRequest); } catch (Exception e) { if (!ignoreReplicaException(e)) { logger.warn( "Failed to perform " + transportAction + " on replica" + shardIt.shardId(), e); shardStateAction.shardFailed( shard, "Failed to perform [" + transportAction + "] on replica, message [" + detailedMessage(e) + "]"); } } if (counter.decrementAndGet() == 0) { listener.onResponse(response.response()); } } } }
void retry(boolean fromClusterEvent, @Nullable final Throwable failure) { if (!fromClusterEvent) { // make it threaded operation so we fork on the discovery listener thread request.beforeLocalFork(); request.operationThreaded(true); clusterService.add( request.timeout(), new TimeoutClusterStateListener() { @Override public void postAdded() { if (start(true)) { // if we managed to start and perform the operation on the primary, we can remove // this listener clusterService.remove(this); } } @Override public void onClose() { clusterService.remove(this); listener.onFailure(new NodeClosedException(nodes.localNode())); } @Override public void clusterChanged(ClusterChangedEvent event) { if (start(true)) { // if we managed to start and perform the operation on the primary, we can remove // this listener clusterService.remove(this); } } @Override public void onTimeout(TimeValue timeValue) { // just to be on the safe side, see if we can start it now? if (start(true)) { clusterService.remove(this); return; } clusterService.remove(this); Throwable listenerFailure = failure; if (listenerFailure == null) { if (shardIt == null) { listenerFailure = new UnavailableShardsException( null, "no available shards: Timeout waiting for [" + timeValue + "], request: " + request.toString()); } else { listenerFailure = new UnavailableShardsException( shardIt.shardId(), "[" + shardIt.size() + "] shardIt, [" + shardIt.sizeActive() + "] active : Timeout waiting for [" + timeValue + "], request: " + request.toString()); } } listener.onFailure(listenerFailure); } }); } }
/** Returns <tt>true</tt> if the action starting to be performed on the primary (or is done). */ public boolean start(final boolean fromClusterEvent) throws ElasticSearchException { final ClusterState clusterState = clusterService.state(); nodes = clusterState.nodes(); try { ClusterBlockException blockException = checkGlobalBlock(clusterState, request); if (blockException != null) { if (blockException.retryable()) { retry(fromClusterEvent, blockException); return false; } else { throw blockException; } } // check if we need to execute, and if not, return if (!resolveRequest(clusterState, request, listener)) { return true; } blockException = checkRequestBlock(clusterState, request); if (blockException != null) { if (blockException.retryable()) { retry(fromClusterEvent, blockException); return false; } else { throw blockException; } } shardIt = shards(clusterState, request); } catch (Exception e) { listener.onFailure(e); return true; } // no shardIt, might be in the case between index gateway recovery and shardIt initialization if (shardIt.size() == 0) { retry(fromClusterEvent, null); return false; } boolean foundPrimary = false; ShardRouting shardX; while ((shardX = shardIt.nextOrNull()) != null) { final ShardRouting shard = shardX; // we only deal with primary shardIt here... if (!shard.primary()) { continue; } if (!shard.active() || !nodes.nodeExists(shard.currentNodeId())) { retry(fromClusterEvent, null); return false; } // check here for consistency if (checkWriteConsistency) { WriteConsistencyLevel consistencyLevel = defaultWriteConsistencyLevel; if (request.consistencyLevel() != WriteConsistencyLevel.DEFAULT) { consistencyLevel = request.consistencyLevel(); } int requiredNumber = 1; if (consistencyLevel == WriteConsistencyLevel.QUORUM && shardIt.size() > 2) { // only for more than 2 in the number of shardIt it makes sense, otherwise its 1 shard // with 1 replica, quorum is 1 (which is what it is initialized to) requiredNumber = (shardIt.size() / 2) + 1; } else if (consistencyLevel == WriteConsistencyLevel.ALL) { requiredNumber = shardIt.size(); } if (shardIt.sizeActive() < requiredNumber) { retry(fromClusterEvent, null); return false; } } if (!primaryOperationStarted.compareAndSet(false, true)) { return true; } foundPrimary = true; if (shard.currentNodeId().equals(nodes.localNodeId())) { if (request.operationThreaded()) { request.beforeLocalFork(); threadPool .executor(executor) .execute( new Runnable() { @Override public void run() { performOnPrimary(shard.id(), fromClusterEvent, shard, clusterState); } }); } else { performOnPrimary(shard.id(), fromClusterEvent, shard, clusterState); } } else { DiscoveryNode node = nodes.get(shard.currentNodeId()); transportService.sendRequest( node, transportAction, request, transportOptions, new BaseTransportResponseHandler<Response>() { @Override public Response newInstance() { return newResponseInstance(); } @Override public String executor() { return ThreadPool.Names.SAME; } @Override public void handleResponse(Response response) { listener.onResponse(response); } @Override public void handleException(TransportException exp) { // if we got disconnected from the node, or the node / shard is not in the right // state (being closed) if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException || retryPrimaryException(exp)) { primaryOperationStarted.set(false); // we already marked it as started when we executed it (removed the listener) so // pass false // to re-add to the cluster listener retry(false, null); } else { listener.onFailure(exp); } } }); } break; } // we should never get here, but here we go if (!foundPrimary) { retry(fromClusterEvent, null); return false; } return true; }
@Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(shardId); request.writeTo(out); }
@Override public void readFrom(StreamInput in) throws IOException { shardId = in.readVInt(); request = newRequestInstance(); request.readFrom(in); }
/** * Resolves the request, by default, simply setting the concrete index (if its aliased one). If * the resolve means a different execution, then return false here to indicate not to continue and * execute this request. */ protected boolean resolveRequest( ClusterState state, Request request, ActionListener<Response> listener) { request.index(state.metaData().concreteIndex(request.index())); return true; }