@Override
 public void executeESBulkRequest(BulkRequestBuilder esBulk)
     throws ElasticsearchException, BulkUpdatePartialFailureException {
   BulkResponse response = esBulk.execute().actionGet();
   if (response.hasFailures()) {
     boolean containsSuccess = false;
     int numOfFailures = 0;
     for (BulkItemResponse bir : response.getItems()) {
       if (!bir.isFailed()) {
         containsSuccess = true;
       } else {
         numOfFailures++;
       }
     }
     if (containsSuccess) {
       throw new BulkUpdatePartialFailureException(response.buildFailureMessage(), numOfFailures);
     } else {
       throw new ElasticsearchException(
           "Failed to completely execute ES index bulk update for "
               + numOfFailures
               + " commands: "
               + response.buildFailureMessage());
     }
   }
 }
Exemplo n.º 2
0
 public static BulkResponse executeBulkRequest(
     BulkRequestBuilder builder, String errorMessage, Object... errorMessageArgs) {
   BulkResponse bulkResponse = builder.get();
   if (bulkResponse.hasFailures()) {
     // do not use Preconditions as the message is expensive to generate (see
     // buildFailureMessage())
     throw new IllegalStateException(
         format(errorMessage, errorMessageArgs) + ": " + bulkResponse.buildFailureMessage());
   }
   return bulkResponse;
 }
Exemplo n.º 3
0
 protected BulkRequestBuilder sendAndCheck(BulkRequestBuilder bulkRequest) {
   if (bulkRequest.numberOfActions() > 0) {
     BulkResponse bulkResponse = bulkRequest.execute().actionGet();
     if (bulkResponse.hasFailures()) {
       // process failures by iterating through each bulk response item
       throw new RuntimeException(bulkResponse.buildFailureMessage());
     }
     return opalSearchService.getClient().prepareBulk();
   }
   return bulkRequest;
 }
  /**
   * This method processes failures by iterating through each bulk response item
   *
   * @param response, a BulkResponse
   */
  private void processBulkResponseFailure(BulkResponse response) {
    logger.warn("There was failures when executing bulk : " + response.buildFailureMessage());

    if (!logger.isDebugEnabled()) return;

    for (BulkItemResponse item : response.getItems()) {
      if (item.isFailed()) {
        logger.debug(
            "Error {} occurred on index {}, type {}, id {} for {} operation ",
            item.getFailureMessage(),
            item.getIndex(),
            item.getType(),
            item.getId(),
            item.getOpType());
      }
    }
  }
  static void indexProducts(List<Map<String, Object>> products, String index, Node node)
      throws Exception {
    long currentCount = getCurrentDocumentCount(index, node);
    BulkRequest bulkRequest = new BulkRequest();
    for (Map<String, Object> product : products) {
      IndexRequest indexRequest =
          new IndexRequest(index, "product", (String) product.get("ProductId"));
      indexRequest.source(product);
      bulkRequest.add(indexRequest);
    }
    BulkResponse response = node.client().bulk(bulkRequest).actionGet();
    if (response.hasFailures()) {
      Assert.fail("Error in creating products: " + response.buildFailureMessage());
    }

    refreshIndex(index, node);
    assertDocumentCountAfterIndexing(index, products.size() + currentCount, node);
  }
    @Override
    public void run() {
      while (true) {
        if (closed) {
          break;
        }
        try {
          connection = connectionFactory.newConnection(rabbitAddresses);
          channel = connection.createChannel();
        } catch (Exception e) {
          if (!closed) {
            logger.warn("failed to created a connection / channel", e);
          } else {
            continue;
          }
          cleanup(0, "failed to connect");
          try {
            Thread.sleep(5000);
          } catch (InterruptedException e1) {
            // ignore, if we are closing, we will exit later
          }
        }

        QueueingConsumer consumer = null;
        // define the queue
        try {
          if (rabbitQueueDeclare) {
            // only declare the queue if we should
            channel.queueDeclare(
                rabbitQueue /*queue*/,
                rabbitQueueDurable /*durable*/,
                false /*exclusive*/,
                rabbitQueueAutoDelete /*autoDelete*/,
                rabbitQueueArgs /*extra args*/);
          }
          if (rabbitExchangeDeclare) {
            // only declare the exchange if we should
            channel.exchangeDeclare(
                rabbitExchange /*exchange*/, rabbitExchangeType /*type*/, rabbitExchangeDurable);
          }

          channel.basicQos(
              rabbitQosPrefetchSize /*qos_prefetch_size*/,
              rabbitQosPrefetchCount /*qos_prefetch_count*/,
              false);

          if (rabbitQueueBind) {
            // only bind queue if we should
            channel.queueBind(
                rabbitQueue /*queue*/,
                rabbitExchange /*exchange*/,
                rabbitRoutingKey /*routingKey*/);
          }
          consumer = new QueueingConsumer(channel);
          channel.basicConsume(rabbitQueue /*queue*/, false /*noAck*/, consumer);
        } catch (Exception e) {
          if (!closed) {
            logger.warn("failed to create queue [{}]", e, rabbitQueue);
          }
          cleanup(0, "failed to create queue");
          continue;
        }

        // now use the queue to listen for messages
        while (true) {
          if (closed) {
            break;
          }
          QueueingConsumer.Delivery task;
          try {
            task = consumer.nextDelivery();
          } catch (Exception e) {
            if (!closed) {
              logger.error("failed to get next message, reconnecting...", e);
            }
            cleanup(0, "failed to get message");
            break;
          }

          if (task != null && task.getBody() != null) {
            final List<Long> deliveryTags = Lists.newArrayList();

            BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();

            try {
              processBody(task, bulkRequestBuilder);
            } catch (Exception e) {
              logger.warn(
                  "failed to parse request for delivery tag [{}], ack'ing...",
                  e,
                  task.getEnvelope().getDeliveryTag());
              try {
                channel.basicAck(task.getEnvelope().getDeliveryTag(), false);
              } catch (IOException e1) {
                logger.warn("failed to ack [{}]", e1, task.getEnvelope().getDeliveryTag());
              }
              continue;
            }

            deliveryTags.add(task.getEnvelope().getDeliveryTag());

            if (bulkRequestBuilder.numberOfActions() < bulkSize) {
              // try and spin some more of those without timeout, so we have a bigger bulk (bounded
              // by the bulk size)
              try {
                while ((task = consumer.nextDelivery(bulkTimeout.millis())) != null) {
                  try {
                    processBody(task, bulkRequestBuilder);
                    deliveryTags.add(task.getEnvelope().getDeliveryTag());
                  } catch (Throwable e) {
                    logger.warn(
                        "failed to parse request for delivery tag [{}], ack'ing...",
                        e,
                        task.getEnvelope().getDeliveryTag());
                    try {
                      channel.basicAck(task.getEnvelope().getDeliveryTag(), false);
                    } catch (Exception e1) {
                      logger.warn(
                          "failed to ack on failure [{}]", e1, task.getEnvelope().getDeliveryTag());
                    }
                  }
                  if (bulkRequestBuilder.numberOfActions() >= bulkSize) {
                    break;
                  }
                }
              } catch (InterruptedException e) {
                if (closed) {
                  break;
                }
              } catch (ShutdownSignalException sse) {
                logger.warn(
                    "Received a shutdown signal! initiatedByApplication: [{}], hard error: [{}]",
                    sse,
                    sse.isInitiatedByApplication(),
                    sse.isHardError());
                if (!closed && sse.isInitiatedByApplication()) {
                  logger.error("failed to get next message, reconnecting...", sse);
                }
                cleanup(0, "failed to get message");
                break;
              }
            }

            if (logger.isTraceEnabled()) {
              logger.trace(
                  "executing bulk with [{}] actions", bulkRequestBuilder.numberOfActions());
            }

            // if we have no bulk actions we might have processed custom commands, so ack them
            if (ordered || bulkRequestBuilder.numberOfActions() == 0) {
              try {
                if (bulkRequestBuilder.numberOfActions() > 0) {
                  BulkResponse response = bulkRequestBuilder.execute().actionGet();
                  if (response.hasFailures()) {
                    // TODO write to exception queue?
                    logger.warn("failed to execute: " + response.buildFailureMessage());
                  }
                }
              } catch (Exception e) {
                logger.warn("failed to execute bulk", e);
              }
              for (Long deliveryTag : deliveryTags) {
                try {
                  channel.basicAck(deliveryTag, false);
                } catch (Exception e1) {
                  logger.warn("failed to ack [{}]", e1, deliveryTag);
                }
              }
            } else {
              if (bulkRequestBuilder.numberOfActions() > 0) {
                bulkRequestBuilder.execute(
                    new ActionListener<BulkResponse>() {
                      @Override
                      public void onResponse(BulkResponse response) {
                        if (response.hasFailures()) {
                          // TODO write to exception queue?
                          logger.warn("failed to execute: " + response.buildFailureMessage());
                        }
                        for (Long deliveryTag : deliveryTags) {
                          try {
                            channel.basicAck(deliveryTag, false);
                          } catch (Exception e1) {
                            logger.warn("failed to ack [{}]", e1, deliveryTag);
                          }
                        }
                      }

                      @Override
                      public void onFailure(Throwable e) {
                        logger.warn(
                            "failed to execute bulk for delivery tags [{}], not ack'ing",
                            e,
                            deliveryTags);
                      }
                    });
              }
            }
          }
        }
      }
      cleanup(0, "closing river");
    }
 public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
   if (response.hasFailures()) {
     throw new RuntimeException(response.buildFailureMessage());
   }
   indexedDocumentCount.addAndGet(response.getItems().length);
 }
    public void run() {
      String id = null; // document id
      String type = null; // document type
      String indexName = null; // document index
      Map<String, Object> data = null; // document data for indexing
      ObjectReader dataReader = mapper.reader(new TypeReference<Map<String, Object>>() {});
      int interval =
          (LONGPOLLING_INTERVAL < 0 || LONGPOLLING_INTERVAL > 20)
              ? DEFAULT_LONGPOLLING_INTERVAL
              : LONGPOLLING_INTERVAL;

      while (!closed) {
        // pull messages from SQS
        if (DEBUG) logger.info("Waiting {}s for messages...", interval);

        List<JsonNode> msgs = pullMessages(interval);

        try {
          BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();

          for (JsonNode msg : msgs) {
            if (msg.has("_id") && msg.has("_type")) {
              id = msg.get("_id").textValue();
              type = msg.get("_type").textValue();
              // Support for dynamic indexes
              indexName = msg.has("_index") ? msg.get("_index").textValue() : INDEX;

              JsonNode dataNode = msg.get("_data");
              if (dataNode != null && dataNode.isObject()) {
                data = dataReader.readValue(msg.get("_data"));
                bulkRequestBuilder.add(
                    client.prepareIndex(indexName, type, id).setSource(data).request());
              } else {
                bulkRequestBuilder.add(client.prepareDelete(indexName, type, id).request());
              }
            }
          }

          if (bulkRequestBuilder.numberOfActions() > 0) {
            BulkResponse response = bulkRequestBuilder.execute().actionGet();
            if (response.hasFailures()) {
              logger.warn(
                  "Bulk operation completed with errors: " + response.buildFailureMessage());
            }
            idleCount = 0;
          } else {
            idleCount++;
            if (DEBUG) logger.info("No new messages. {}", idleCount);
            // no tasks in queue => throttle down pull requests
            if (SLEEP > 0 && idleCount >= 3) {
              try {
                if (DEBUG) logger.info("Queue is empty. Sleeping for {}s", interval);
                Thread.sleep(SLEEP * 1000);
              } catch (InterruptedException e) {
                if (closed) {
                  if (DEBUG) logger.info("Done.");
                  break;
                }
              }
            }
          }
        } catch (Exception e) {
          logger.error("Bulk index operation failed {}", e);
          continue;
        }
      }
    }