@Override public void executeESBulkRequest(BulkRequestBuilder esBulk) throws ElasticsearchException, BulkUpdatePartialFailureException { BulkResponse response = esBulk.execute().actionGet(); if (response.hasFailures()) { boolean containsSuccess = false; int numOfFailures = 0; for (BulkItemResponse bir : response.getItems()) { if (!bir.isFailed()) { containsSuccess = true; } else { numOfFailures++; } } if (containsSuccess) { throw new BulkUpdatePartialFailureException(response.buildFailureMessage(), numOfFailures); } else { throw new ElasticsearchException( "Failed to completely execute ES index bulk update for " + numOfFailures + " commands: " + response.buildFailureMessage()); } } }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(shardId); out.writeVInt(totalHits); out.writeVInt(updated); out.writeVInt(bulkResponses.length); for (BulkItemResponse response : bulkResponses) { response.writeTo(out); } out.writeOptionalString(failedShardExceptionMessage); }
private void index(final QueuedTaskExecutor executor) { final Esi4JBatchedEntityResolver entityResolver = executor.getEntityResolver(); if (entityResolver != null) { entityResolver.resolveEntities(_tasks); } final BulkResponseWrapper response = executor .getTaskProcessor() .getIndex() .executeBulk( new Esi4JOperation<ListenableActionFuture<BulkResponse>>() { @Override public ListenableActionFuture<BulkResponse> execute( final Client client, final String indexName, final OperationContext helper) { final BulkRequestBuilder bulk = client.prepareBulk(); for (final Esi4JEntityTask _task : _tasks) { if (_task != null) { _task.addToBulk(client, bulk, indexName, helper); } } final ListenableActionFuture<BulkResponse> response = bulk.execute(); return response; } }) .actionGet(); int failed = 0; for (final BulkItemResponse item : response.getBulkResponse()) { if (item.isFailed()) { failed++; } } if (failed > 0) { log.warn("failed to index " + failed + " items. index might be out of sync"); } if (log.isDebugEnabled()) { final int indexed = response.getBulkResponse().getItems().length - failed; log.debug("finished bulk indexing " + indexed + " items"); } }
@Test public void testBulk_withUpdateItems() throws Exception { prepareIndex(1); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int i = 0; i < 10; i++) { bulkRequestBuilder.add( client() .prepareUpdate("idx", "type", Integer.toString(i)) .setDoc("{}") .setDocAsUpsert(true)); } BulkResponse bulkResponse = bulkRequestBuilder.get(); for (BulkItemResponse item : bulkResponse) { assertThat(item.isFailed(), equalTo(false)); assertShardInfo(item.getResponse()); } }
@Override public void bulkIndex(List<IndexQuery> queries) { BulkRequestBuilder bulkRequest = client.prepareBulk(); for (IndexQuery query : queries) { bulkRequest.add(prepareIndex(query)); } BulkResponse bulkResponse = bulkRequest.execute().actionGet(); if (bulkResponse.hasFailures()) { Map<String, String> failedDocuments = new HashMap<String, String>(); for (BulkItemResponse item : bulkResponse.getItems()) { if (item.isFailed()) failedDocuments.put(item.getId(), item.getFailureMessage()); } throw new ElasticsearchException( "Bulk indexing has failures. Use ElasticsearchException.getFailedDocuments() for detailed messages [" + failedDocuments + "]", failedDocuments); } }
/** * Updates the specified objects * * @return the id's of the failed objects (e.g. due to versioning) */ public Collection<Integer> bulkUpdate( Collection<T> objects, String indexName, boolean refresh, boolean enableVersioning) { // now using bulk API instead of feeding each doc separate with feedDoc BulkRequestBuilder brb = client.prepareBulk(); // this works differently then the direct call to refresh!? maybe refresh is not async? // brb.setRefresh(refresh); for (T o : objects) { if (o.getId() == null) { logger.warn("Skipped object without id when bulkUpdate:" + o); continue; } try { XContentBuilder source = createDoc(o); IndexRequest indexReq = Requests.indexRequest(indexName).type(getIndexType()).id(o.getId()).source(source); if (enableVersioning) indexReq.version(o.getVersion()); brb.add(indexReq); } catch (IOException ex) { logger.warn("Cannot add object:" + o + " to bulkIndexing action." + ex.getMessage()); } } if (brb.numberOfActions() > 0) { BulkResponse rsp = brb.execute().actionGet(); if (rsp.hasFailures()) { List<Integer> list = new ArrayList<Integer>(rsp.items().length); for (BulkItemResponse br : rsp.items()) { if (br.isFailed()) { // logger.info("Error:" + br.failureMessage()); list.add(br.itemId()); } } return list; } if (refresh) refresh(indexName); } return Collections.emptyList(); }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); shardId = in.readVInt(); totalHits = in.readVInt(); updated = in.readVInt(); bulkResponses = new BulkItemResponse[in.readVInt()]; for (int i = 0; i < bulkResponses.length; i++) { bulkResponses[i] = BulkItemResponse.readBulkItem(in); } failedShardExceptionMessage = in.readOptionalString(); }
/** * This method processes failures by iterating through each bulk response item * * @param response, a BulkResponse */ private void processBulkResponseFailure(BulkResponse response) { logger.warn("There was failures when executing bulk : " + response.buildFailureMessage()); if (!logger.isDebugEnabled()) return; for (BulkItemResponse item : response.getItems()) { if (item.isFailed()) { logger.debug( "Error {} occurred on index {}, type {}, id {} for {} operation ", item.getFailureMessage(), item.getIndex(), item.getType(), item.getId(), item.getOpType()); } } }
protected void executeBulkRequest(BulkRequestBuilder bulkRequest) { if (bulkRequest.numberOfActions() == 0) return; BulkResponse bulkResponse = bulkRequest.execute().actionGet(); if (!bulkResponse.hasFailures()) return; for (BulkItemResponse response : bulkResponse) { if (!response.isFailed()) continue; LOG.warning( String.format( "Unable to save Entity %s in %s/%s, cause: %s", response.getId(), response.getIndex(), response.getType(), response.getFailureMessage())); } }
@Override public void onResponse(BulkResponse response) { semaphore.release(); counter.addAndGet(response.getItems().length); for (BulkItemResponse item : response.getItems()) { if (item.isFailed()) { LOGGER.error( "index [{}], type [{}], id [{}], message [{}]", item.getIndex(), item.getType(), item.getId(), item.getFailureMessage()); } } }
@Test public void testBulk_withIndexAndDeleteItems() throws Exception { prepareIndex(1); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int i = 0; i < 10; i++) { bulkRequestBuilder.add(client().prepareIndex("idx", "type").setSource("{}")); } BulkResponse bulkResponse = bulkRequestBuilder.get(); bulkRequestBuilder = client().prepareBulk(); for (BulkItemResponse item : bulkResponse) { assertThat(item.isFailed(), equalTo(false)); assertShardInfo(item.getResponse()); bulkRequestBuilder.add(client().prepareDelete("idx", "type", item.getId())); } bulkResponse = bulkRequestBuilder.get(); for (BulkItemResponse item : bulkResponse) { assertThat(item.isFailed(), equalTo(false)); assertShardInfo(item.getResponse()); } }
public void testLimitsRequestSize() throws Exception { ByteSizeValue inFlightRequestsLimit = new ByteSizeValue(8, ByteSizeUnit.KB); if (noopBreakerUsed()) { logger.info("--> noop breakers used, skipping test"); return; } internalCluster().ensureAtLeastNumDataNodes(2); NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); List<NodeStats> dataNodeStats = new ArrayList<>(); for (NodeStats stat : nodeStats.getNodes()) { if (stat.getNode().isDataNode()) { dataNodeStats.add(stat); } } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); Collections.shuffle(dataNodeStats, random()); // send bulk request from source node to target node later. The sole shard is bound to the // target node. NodeStats targetNode = dataNodeStats.get(0); NodeStats sourceNode = dataNodeStats.get(1); assertAcked( prepareCreate("index") .setSettings( Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put("index.routing.allocation.include._name", targetNode.getNode().getName()) .put( EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE))); Client client = client(sourceNode.getNode().getName()); // we use the limit size as a (very) rough indication on how many requests we should sent to hit // the limit int numRequests = inFlightRequestsLimit.bytesAsInt(); BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < numRequests; i++) { IndexRequest indexRequest = new IndexRequest("index", "type", Integer.toString(i)); indexRequest.source("field", "value", "num", i); bulkRequest.add(indexRequest); } Settings limitSettings = Settings.builder() .put( HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING .getKey(), inFlightRequestsLimit) .build(); assertAcked( client().admin().cluster().prepareUpdateSettings().setTransientSettings(limitSettings)); // can either fail directly with an exception or the response contains exceptions (depending on // client) try { BulkResponse response = client.bulk(bulkRequest).actionGet(); if (!response.hasFailures()) { fail("Should have thrown CircuitBreakingException"); } else { // each item must have failed with CircuitBreakingException for (BulkItemResponse bulkItemResponse : response) { Throwable cause = ExceptionsHelper.unwrapCause(bulkItemResponse.getFailure().getCause()); assertThat(cause, instanceOf(CircuitBreakingException.class)); assertEquals( ((CircuitBreakingException) cause).getByteLimit(), inFlightRequestsLimit.bytes()); } } } catch (CircuitBreakingException ex) { assertEquals(ex.getByteLimit(), inFlightRequestsLimit.bytes()); } }