private UpdateResult shardUpdateOperation( ClusterState clusterState, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) { UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard); switch (translate.operation()) { case UPSERT: case INDEX: IndexRequest indexRequest = translate.action(); try { WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, clusterState, indexShard, false); return new UpdateResult(translate, indexRequest, result); } catch (Throwable t) { t = ExceptionsHelper.unwrapCause(t); boolean retry = false; if (t instanceof VersionConflictEngineException || (t instanceof DocumentAlreadyExistsException && translate.operation() == UpdateHelper.Operation.UPSERT)) { retry = true; } return new UpdateResult(translate, indexRequest, retry, t, null); } case DELETE: DeleteRequest deleteRequest = translate.action(); try { WriteResult result = shardDeleteOperation(bulkShardRequest, deleteRequest, indexShard); return new UpdateResult(translate, deleteRequest, result); } catch (Throwable t) { t = ExceptionsHelper.unwrapCause(t); boolean retry = false; if (t instanceof VersionConflictEngineException) { retry = true; } return new UpdateResult(translate, deleteRequest, retry, t, null); } case NONE: UpdateResponse updateResponse = translate.action(); indexShard.indexingService().noopUpdate(updateRequest.type()); return new UpdateResult(translate, updateResponse); default: throw new IllegalStateException("Illegal update operation " + translate.operation()); } }
@Override protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary( ClusterState clusterState, PrimaryOperationRequest shardRequest) { final BulkShardRequest request = shardRequest.request; final IndexService indexService = indicesService.indexServiceSafe(request.index()); final IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); long[] preVersions = new long[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length]; Translog.Location location = null; for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) { BulkItemRequest item = request.items()[requestIndex]; if (item.request() instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) item.request(); preVersions[requestIndex] = indexRequest.version(); preVersionTypes[requestIndex] = indexRequest.versionType(); try { WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, clusterState, indexShard, true); location = locationToSync(location, result.location); // add the response IndexResponse indexResponse = result.response(); setResponse( item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse)); } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to // handle it if (retryPrimaryException(e)) { // restore updated versions... for (int j = 0; j < requestIndex; j++) { applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); } throw (ElasticsearchException) e; } if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { logger.trace( "{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest); } else { logger.debug( "{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest); } // if its a conflict failure, and we already executed the request on a primary (and we // execute it // again, due to primary relocation and only processing up to N bulk items when the shard // gets closed) // then just use the response we got from the successful execution if (item.getPrimaryResponse() != null && isConflictException(e)) { setResponse(item, item.getPrimaryResponse()); } else { setResponse( item, new BulkItemResponse( item.id(), indexRequest.opType().lowercase(), new BulkItemResponse.Failure( request.index(), indexRequest.type(), indexRequest.id(), e))); } } } else if (item.request() instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) item.request(); preVersions[requestIndex] = deleteRequest.version(); preVersionTypes[requestIndex] = deleteRequest.versionType(); try { // add the response final WriteResult<DeleteResponse> writeResult = shardDeleteOperation(request, deleteRequest, indexShard); DeleteResponse deleteResponse = writeResult.response(); location = locationToSync(location, writeResult.location); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse)); } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to // handle it if (retryPrimaryException(e)) { // restore updated versions... for (int j = 0; j < requestIndex; j++) { applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); } throw (ElasticsearchException) e; } if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { logger.trace( "{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest); } else { logger.debug( "{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest); } // if its a conflict failure, and we already executed the request on a primary (and we // execute it // again, due to primary relocation and only processing up to N bulk items when the shard // gets closed) // then just use the response we got from the successful execution if (item.getPrimaryResponse() != null && isConflictException(e)) { setResponse(item, item.getPrimaryResponse()); } else { setResponse( item, new BulkItemResponse( item.id(), OP_TYPE_DELETE, new BulkItemResponse.Failure( request.index(), deleteRequest.type(), deleteRequest.id(), e))); } } } else if (item.request() instanceof UpdateRequest) { UpdateRequest updateRequest = (UpdateRequest) item.request(); preVersions[requestIndex] = updateRequest.version(); preVersionTypes[requestIndex] = updateRequest.versionType(); // We need to do the requested retries plus the initial attempt. We don't do < // 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) { UpdateResult updateResult; try { updateResult = shardUpdateOperation(clusterState, request, updateRequest, indexShard); } catch (Throwable t) { updateResult = new UpdateResult(null, null, false, t, null); } if (updateResult.success()) { if (updateResult.writeResult != null) { location = locationToSync(location, updateResult.writeResult.location); } switch (updateResult.result.operation()) { case UPSERT: case INDEX: WriteResult<IndexResponse> result = updateResult.writeResult; IndexRequest indexRequest = updateResult.request(); BytesReference indexSourceAsBytes = indexRequest.source(); // add the response IndexResponse indexResponse = result.response(); UpdateResponse updateResponse = new UpdateResponse( indexResponse.getShardInfo(), indexResponse.getIndex(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated()); if (updateRequest.fields() != null && updateRequest.fields().length > 0) { Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); updateResponse.setGetResult( updateHelper.extractGetResult( updateRequest, shardRequest.request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); } item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); break; case DELETE: WriteResult<DeleteResponse> writeResult = updateResult.writeResult; DeleteResponse response = writeResult.response(); DeleteRequest deleteRequest = updateResult.request(); updateResponse = new UpdateResponse( response.getShardInfo(), response.getIndex(), response.getType(), response.getId(), response.getVersion(), false); updateResponse.setGetResult( updateHelper.extractGetResult( updateRequest, shardRequest.request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); // Replace the update request to the translated delete request to execute on the // replica. item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); break; case NONE: setResponse( item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResult.noopResult)); item.setIgnoreOnReplica(); // no need to go to the replica break; } // NOTE: Breaking out of the retry_on_conflict loop! break; } else if (updateResult.failure()) { Throwable t = updateResult.error; if (updateResult.retry) { // updateAttemptCount is 0 based and marks current attempt, if it's equal to // retryOnConflict we are going out of the iteration if (updateAttemptsCount >= updateRequest.retryOnConflict()) { setResponse( item, new BulkItemResponse( item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure( request.index(), updateRequest.type(), updateRequest.id(), t))); } } else { // rethrow the failure if we are going to retry on primary and let parent failure to // handle it if (retryPrimaryException(t)) { // restore updated versions... for (int j = 0; j < requestIndex; j++) { applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); } throw (ElasticsearchException) t; } // if its a conflict failure, and we already executed the request on a primary (and we // execute it // again, due to primary relocation and only processing up to N bulk items when the // shard gets closed) // then just use the response we got from the successful execution if (item.getPrimaryResponse() != null && isConflictException(t)) { setResponse(item, item.getPrimaryResponse()); } else if (updateResult.result == null) { setResponse( item, new BulkItemResponse( item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure( shardRequest.request.index(), updateRequest.type(), updateRequest.id(), t))); } else { switch (updateResult.result.operation()) { case UPSERT: case INDEX: IndexRequest indexRequest = updateResult.request(); if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { logger.trace( "{} failed to execute bulk item (index) {}", t, shardRequest.shardId, indexRequest); } else { logger.debug( "{} failed to execute bulk item (index) {}", t, shardRequest.shardId, indexRequest); } setResponse( item, new BulkItemResponse( item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure( request.index(), indexRequest.type(), indexRequest.id(), t))); break; case DELETE: DeleteRequest deleteRequest = updateResult.request(); if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { logger.trace( "{} failed to execute bulk item (delete) {}", t, shardRequest.shardId, deleteRequest); } else { logger.debug( "{} failed to execute bulk item (delete) {}", t, shardRequest.shardId, deleteRequest); } setResponse( item, new BulkItemResponse( item.id(), OP_TYPE_DELETE, new BulkItemResponse.Failure( request.index(), deleteRequest.type(), deleteRequest.id(), t))); break; } } // NOTE: Breaking out of the retry_on_conflict loop! break; } } } } else { throw new IllegalStateException("Unexpected index operation: " + item.request()); } assert item.getPrimaryResponse() != null; assert preVersionTypes[requestIndex] != null; } processAfter(request.refresh(), indexShard, location); BulkItemResponse[] responses = new BulkItemResponse[request.items().length]; BulkItemRequest[] items = request.items(); for (int i = 0; i < items.length; i++) { responses[i] = items[i].getPrimaryResponse(); } return new Tuple<>( new BulkShardResponse(shardRequest.shardId, responses), shardRequest.request); }
protected void shardOperation( final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) throws ElasticsearchException { final UpdateHelper.Result result = updateHelper.prepare(request); switch (result.operation()) { case UPSERT: IndexRequest upsertRequest = result.action(); // we fetch it from the index request so we don't generate the bytes twice, its already done // in the index request final BytesReference upsertSourceBytes = upsertRequest.source(); indexAction.execute( upsertRequest, new ActionListener<IndexResponse>() { @Override public void onResponse(IndexResponse response) { UpdateResponse update = new UpdateResponse( response.getIndex(), response.getType(), response.getId(), response.getVersion(), response.isCreated()); if (request.fields() != null && request.fields().length > 0) { Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true); update.setGetResult( updateHelper.extractGetResult( request, response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); } else { update.setGetResult(null); } listener.onResponse(update); } @Override public void onFailure(Throwable e) { e = ExceptionsHelper.unwrapCause(e); if (e instanceof VersionConflictEngineException || e instanceof DocumentAlreadyExistsException) { if (retryCount < request.retryOnConflict()) { threadPool .executor(executor()) .execute( new ActionRunnable<UpdateResponse>(listener) { @Override protected void doRun() { shardOperation(request, listener, retryCount + 1); } }); return; } } listener.onFailure(e); } }); break; case INDEX: IndexRequest indexRequest = result.action(); // we fetch it from the index request so we don't generate the bytes twice, its already done // in the index request final BytesReference indexSourceBytes = indexRequest.source(); indexAction.execute( indexRequest, new ActionListener<IndexResponse>() { @Override public void onResponse(IndexResponse response) { UpdateResponse update = new UpdateResponse( response.getIndex(), response.getType(), response.getId(), response.getVersion(), response.isCreated()); update.setGetResult( updateHelper.extractGetResult( request, response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); listener.onResponse(update); } @Override public void onFailure(Throwable e) { e = ExceptionsHelper.unwrapCause(e); if (e instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { threadPool .executor(executor()) .execute( new ActionRunnable<UpdateResponse>(listener) { @Override protected void doRun() { shardOperation(request, listener, retryCount + 1); } }); return; } } listener.onFailure(e); } }); break; case DELETE: DeleteRequest deleteRequest = result.action(); deleteAction.execute( deleteRequest, new ActionListener<DeleteResponse>() { @Override public void onResponse(DeleteResponse response) { UpdateResponse update = new UpdateResponse( response.getIndex(), response.getType(), response.getId(), response.getVersion(), false); update.setGetResult( updateHelper.extractGetResult( request, response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); listener.onResponse(update); } @Override public void onFailure(Throwable e) { e = ExceptionsHelper.unwrapCause(e); if (e instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { threadPool .executor(executor()) .execute( new ActionRunnable<UpdateResponse>(listener) { @Override protected void doRun() { shardOperation(request, listener, retryCount + 1); } }); return; } } listener.onFailure(e); } }); break; case NONE: UpdateResponse update = result.action(); listener.onResponse(update); indicesService .indexService(request.index()) .shard(request.shardId()) .indexingService() .noopUpdate(request.type()); break; default: throw new ElasticsearchIllegalStateException("Illegal operation " + result.operation()); } }