@Override
  public void index(List<Article> list) {
    if (list.size() > 0) {
      try {
        BulkRequestBuilder bulkRequest = transportClient.prepareBulk();

        for (Article article : list) {
          XContentBuilder builder =
              XContentFactory.jsonBuilder()
                  .startObject()
                  .field(TITLE, article.getTitle())
                  .field(SUMMARY, HtmlHelper.filterHtml(article.getSummary()))
                  .endObject();
          bulkRequest.add(
              transportClient
                  .prepareIndex(INDICE, TYPE, String.valueOf(article.getId()))
                  .setSource(builder));
        }

        BulkResponse bulkResponse = bulkRequest.execute().actionGet();
        if (bulkResponse.hasFailures()) {
          logger.debug("bulkResponse.hasFailures()");
        } else {
          logger.debug("bulk index ok!");
        }
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  }
 @Override
 public void executeESBulkRequest(BulkRequestBuilder esBulk)
     throws ElasticsearchException, BulkUpdatePartialFailureException {
   BulkResponse response = esBulk.execute().actionGet();
   if (response.hasFailures()) {
     boolean containsSuccess = false;
     int numOfFailures = 0;
     for (BulkItemResponse bir : response.getItems()) {
       if (!bir.isFailed()) {
         containsSuccess = true;
       } else {
         numOfFailures++;
       }
     }
     if (containsSuccess) {
       throw new BulkUpdatePartialFailureException(response.buildFailureMessage(), numOfFailures);
     } else {
       throw new ElasticsearchException(
           "Failed to completely execute ES index bulk update for "
               + numOfFailures
               + " commands: "
               + response.buildFailureMessage());
     }
   }
 }
  /**
   * Index all the resources in a Jena Model to ES
   *
   * @param model the model to index
   * @param bulkRequest a BulkRequestBuilder
   * @param getPropLabel if set to true all URI property values will be indexed as their label. The
   *     label is taken as the value of one of the properties set in {@link #uriDescriptionList}.
   */
  private void addModelToES(Model model, BulkRequestBuilder bulkRequest, boolean getPropLabel) {
    long startTime = System.currentTimeMillis();
    long bulkLength = 0;
    HashSet<Property> properties = new HashSet<Property>();

    StmtIterator it = model.listStatements();
    while (it.hasNext()) {
      Statement st = it.nextStatement();
      Property prop = st.getPredicate();
      String property = prop.toString();

      if (rdfPropList.isEmpty()
          || (isWhitePropList && rdfPropList.contains(property))
          || (!isWhitePropList && !rdfPropList.contains(property))
          || (normalizeProp.containsKey(property))) {
        properties.add(prop);
      }
    }

    ResIterator resIt = model.listSubjects();

    while (resIt.hasNext()) {
      Resource rs = resIt.nextResource();
      Map<String, ArrayList<String>> jsonMap = getJsonMap(rs, properties, model, getPropLabel);

      bulkRequest.add(
          client.prepareIndex(indexName, typeName, rs.toString()).setSource(mapToString(jsonMap)));
      bulkLength++;

      // We want to execute the bulk for every  DEFAULT_BULK_SIZE requests
      if (bulkLength % EEASettings.DEFAULT_BULK_SIZE == 0) {
        BulkResponse bulkResponse = bulkRequest.execute().actionGet();
        // After executing, flush the BulkRequestBuilder.
        bulkRequest = client.prepareBulk();

        if (bulkResponse.hasFailures()) {
          processBulkResponseFailure(bulkResponse);
        }
      }
    }

    // Execute remaining requests
    if (bulkRequest.numberOfActions() > 0) {
      BulkResponse response = bulkRequest.execute().actionGet();
      // Handle failure by iterating through each bulk response item
      if (response.hasFailures()) {
        processBulkResponseFailure(response);
      }
    }

    // Show time taken to index the documents
    logger.info(
        "Indexed {} documents on {}/{} in {} seconds",
        bulkLength,
        indexName,
        typeName,
        (System.currentTimeMillis() - startTime) / 1000.0);
  }
Пример #4
0
 protected BulkRequestBuilder sendAndCheck(BulkRequestBuilder bulkRequest) {
   if (bulkRequest.numberOfActions() > 0) {
     BulkResponse bulkResponse = bulkRequest.execute().actionGet();
     if (bulkResponse.hasFailures()) {
       // process failures by iterating through each bulk response item
       throw new RuntimeException(bulkResponse.buildFailureMessage());
     }
     return opalSearchService.getClient().prepareBulk();
   }
   return bulkRequest;
 }
Пример #5
0
 public static BulkResponse executeBulkRequest(
     BulkRequestBuilder builder, String errorMessage, Object... errorMessageArgs) {
   BulkResponse bulkResponse = builder.get();
   if (bulkResponse.hasFailures()) {
     // do not use Preconditions as the message is expensive to generate (see
     // buildFailureMessage())
     throw new IllegalStateException(
         format(errorMessage, errorMessageArgs) + ": " + bulkResponse.buildFailureMessage());
   }
   return bulkResponse;
 }
 protected void executeBulkRequest(BulkRequestBuilder bulkRequest) {
   if (bulkRequest.numberOfActions() == 0) return;
   BulkResponse bulkResponse = bulkRequest.execute().actionGet();
   if (!bulkResponse.hasFailures()) return;
   for (BulkItemResponse response : bulkResponse) {
     if (!response.isFailed()) continue;
     LOG.warning(
         String.format(
             "Unable to save Entity %s in %s/%s, cause: %s",
             response.getId(),
             response.getIndex(),
             response.getType(),
             response.getFailureMessage()));
   }
 }
Пример #7
0
  public void testVersioningWithBulk() {
    createIndex("test");
    ensureGreen();

    BulkResponse bulkResponse =
        client()
            .prepareBulk()
            .add(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1"))
            .execute()
            .actionGet();
    assertThat(bulkResponse.hasFailures(), equalTo(false));
    assertThat(bulkResponse.getItems().length, equalTo(1));
    IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
    assertThat(indexResponse.getVersion(), equalTo(1L));
  }
Пример #8
0
    @Override
    public void onResponse(BulkResponse response) {
      semaphore.release();
      counter.addAndGet(response.getItems().length);

      for (BulkItemResponse item : response.getItems()) {
        if (item.isFailed()) {
          LOGGER.error(
              "index [{}], type [{}], id [{}], message [{}]",
              item.getIndex(),
              item.getType(),
              item.getId(),
              item.getFailureMessage());
        }
      }
    }
  /**
   * This method processes failures by iterating through each bulk response item
   *
   * @param response, a BulkResponse
   */
  private void processBulkResponseFailure(BulkResponse response) {
    logger.warn("There was failures when executing bulk : " + response.buildFailureMessage());

    if (!logger.isDebugEnabled()) return;

    for (BulkItemResponse item : response.getItems()) {
      if (item.isFailed()) {
        logger.debug(
            "Error {} occurred on index {}, type {}, id {} for {} operation ",
            item.getFailureMessage(),
            item.getIndex(),
            item.getType(),
            item.getId(),
            item.getOpType());
      }
    }
  }
 public void testConflictingDynamicMappingsBulk() {
   // we don't use indexRandom because the order of requests is important here
   createIndex("index");
   client().prepareIndex("index", "type", "1").setSource("foo", 3).get();
   BulkResponse bulkResponse =
       client()
           .prepareBulk()
           .add(client().prepareIndex("index", "type", "1").setSource("foo", 3))
           .get();
   assertFalse(bulkResponse.hasFailures());
   bulkResponse =
       client()
           .prepareBulk()
           .add(client().prepareIndex("index", "type", "2").setSource("foo", "bar"))
           .get();
   assertTrue(bulkResponse.hasFailures());
 }
  static void indexProducts(List<Map<String, Object>> products, String index, Node node)
      throws Exception {
    long currentCount = getCurrentDocumentCount(index, node);
    BulkRequest bulkRequest = new BulkRequest();
    for (Map<String, Object> product : products) {
      IndexRequest indexRequest =
          new IndexRequest(index, "product", (String) product.get("ProductId"));
      indexRequest.source(product);
      bulkRequest.add(indexRequest);
    }
    BulkResponse response = node.client().bulk(bulkRequest).actionGet();
    if (response.hasFailures()) {
      Assert.fail("Error in creating products: " + response.buildFailureMessage());
    }

    refreshIndex(index, node);
    assertDocumentCountAfterIndexing(index, products.size() + currentCount, node);
  }
 @Override
 public void bulkIndex(List<IndexQuery> queries) {
   BulkRequestBuilder bulkRequest = client.prepareBulk();
   for (IndexQuery query : queries) {
     bulkRequest.add(prepareIndex(query));
   }
   BulkResponse bulkResponse = bulkRequest.execute().actionGet();
   if (bulkResponse.hasFailures()) {
     Map<String, String> failedDocuments = new HashMap<String, String>();
     for (BulkItemResponse item : bulkResponse.getItems()) {
       if (item.isFailed()) failedDocuments.put(item.getId(), item.getFailureMessage());
     }
     throw new ElasticsearchException(
         "Bulk indexing has failures. Use ElasticsearchException.getFailedDocuments() for detailed messages ["
             + failedDocuments
             + "]",
         failedDocuments);
   }
 }
Пример #13
0
  /**
   * Updates the specified objects
   *
   * @return the id's of the failed objects (e.g. due to versioning)
   */
  public Collection<Integer> bulkUpdate(
      Collection<T> objects, String indexName, boolean refresh, boolean enableVersioning) {
    // now using bulk API instead of feeding each doc separate with feedDoc
    BulkRequestBuilder brb = client.prepareBulk();
    // this works differently then the direct call to refresh!? maybe refresh is not async?
    //        brb.setRefresh(refresh);
    for (T o : objects) {
      if (o.getId() == null) {
        logger.warn("Skipped object without id when bulkUpdate:" + o);
        continue;
      }

      try {
        XContentBuilder source = createDoc(o);
        IndexRequest indexReq =
            Requests.indexRequest(indexName).type(getIndexType()).id(o.getId()).source(source);

        if (enableVersioning) indexReq.version(o.getVersion());

        brb.add(indexReq);
      } catch (IOException ex) {
        logger.warn("Cannot add object:" + o + " to bulkIndexing action." + ex.getMessage());
      }
    }
    if (brb.numberOfActions() > 0) {
      BulkResponse rsp = brb.execute().actionGet();
      if (rsp.hasFailures()) {
        List<Integer> list = new ArrayList<Integer>(rsp.items().length);
        for (BulkItemResponse br : rsp.items()) {
          if (br.isFailed()) {
            //                        logger.info("Error:" + br.failureMessage());
            list.add(br.itemId());
          }
        }
        return list;
      }
      if (refresh) refresh(indexName);
    }

    return Collections.emptyList();
  }
  @Test
  public void testVersioningWithBulk() {
    try {
      client.admin().indices().prepareDelete("test").execute().actionGet();
    } catch (IndexMissingException e) {
      // its ok
    }
    client.admin().indices().prepareCreate("test").execute().actionGet();
    client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();

    BulkResponse bulkResponse =
        client
            .prepareBulk()
            .add(client.prepareIndex("test", "type", "1").setSource("field1", "value1_1"))
            .execute()
            .actionGet();
    assertThat(bulkResponse.hasFailures(), equalTo(false));
    assertThat(bulkResponse.getItems().length, equalTo(1));
    IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
    assertThat(indexResponse.version(), equalTo(1l));
  }
Пример #15
0
 /*
  * 删除不存在,不会报错
  * 新增的id已存在,会覆盖
  */
 @Test
 public void bulkTest() throws IOException {
   long start = System.currentTimeMillis();
   Client client = ClientTemplate.getInstance("139.129.48.57", 9300);
   BulkRequestBuilder bulkRequest = client.prepareBulk();
   bulkRequest.add(new DeleteRequest("missxu", "user", "1"));
   bulkRequest.add(
       new IndexRequest("missxu", "user", "3")
           .source(
               XContentFactory.jsonBuilder()
                   .startObject()
                   .field("name", "姓名:新增") // 不存在,就新增,不会再更新
                   .field("age", 112)
                   .endObject()));
   bulkRequest.add(new DeleteRequest("missxu", "user", "2"));
   BulkResponse bulkResponse = bulkRequest.get();
   if (bulkResponse.hasFailures()) {
     System.out.println("------有错误--------");
   }
   ClientTemplate.close(client);
   long end = System.currentTimeMillis();
   System.out.println("创建索引花费时间:" + (end - start) + "ms");
 }
  @Test
  public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exception {
    // Indexing into a should succeed, because the field mapping for field 'field' is defined in the
    // test mapping.
    client()
        .admin()
        .indices()
        .preparePutTemplate("template1")
        .setTemplate("a*")
        .setOrder(0)
        .addMapping("test", "field", "type=string")
        .addAlias(new Alias("alias1").filter(termFilter("field", "value")))
        .get();
    // Indexing into b should succeed, because the field mapping for field 'field' is defined in the
    // _default_ mapping and the test type exists.
    client()
        .admin()
        .indices()
        .preparePutTemplate("template2")
        .setTemplate("b*")
        .setOrder(0)
        .addMapping("_default_", "field", "type=string")
        .addMapping("test")
        .addAlias(new Alias("alias2").filter(termFilter("field", "value")))
        .get();
    // Indexing into c should succeed, because the field mapping for field 'field' is defined in the
    // _default_ mapping.
    client()
        .admin()
        .indices()
        .preparePutTemplate("template3")
        .setTemplate("c*")
        .setOrder(0)
        .addMapping("_default_", "field", "type=string")
        .addAlias(new Alias("alias3").filter(termFilter("field", "value")))
        .get();
    // Indexing into d index should fail, since there is field with name 'field' in the mapping
    client()
        .admin()
        .indices()
        .preparePutTemplate("template4")
        .setTemplate("d*")
        .setOrder(0)
        .addAlias(new Alias("alias4").filter(termFilter("field", "value")))
        .get();

    client().prepareIndex("a1", "test", "test").setSource("{}").get();
    BulkResponse response =
        client().prepareBulk().add(new IndexRequest("a2", "test", "test").source("{}")).get();
    assertThat(response.hasFailures(), is(false));
    assertThat(response.getItems()[0].isFailed(), equalTo(false));
    assertThat(response.getItems()[0].getIndex(), equalTo("a2"));
    assertThat(response.getItems()[0].getType(), equalTo("test"));
    assertThat(response.getItems()[0].getId(), equalTo("test"));
    assertThat(response.getItems()[0].getVersion(), equalTo(1l));

    client().prepareIndex("b1", "test", "test").setSource("{}").get();
    response =
        client().prepareBulk().add(new IndexRequest("b2", "test", "test").source("{}")).get();
    assertThat(response.hasFailures(), is(false));
    assertThat(response.getItems()[0].isFailed(), equalTo(false));
    assertThat(response.getItems()[0].getIndex(), equalTo("b2"));
    assertThat(response.getItems()[0].getType(), equalTo("test"));
    assertThat(response.getItems()[0].getId(), equalTo("test"));
    assertThat(response.getItems()[0].getVersion(), equalTo(1l));

    client().prepareIndex("c1", "test", "test").setSource("{}").get();
    response =
        client().prepareBulk().add(new IndexRequest("c2", "test", "test").source("{}")).get();
    assertThat(response.hasFailures(), is(false));
    assertThat(response.getItems()[0].isFailed(), equalTo(false));
    assertThat(response.getItems()[0].getIndex(), equalTo("c2"));
    assertThat(response.getItems()[0].getType(), equalTo("test"));
    assertThat(response.getItems()[0].getId(), equalTo("test"));
    assertThat(response.getItems()[0].getVersion(), equalTo(1l));

    try {
      client().prepareIndex("d1", "test", "test").setSource("{}").get();
      fail();
    } catch (Exception e) {
      assertThat(
          ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchIllegalArgumentException.class));
      assertThat(e.getMessage(), containsString("failed to parse filter for alias [alias4]"));
    }
    response =
        client().prepareBulk().add(new IndexRequest("d2", "test", "test").source("{}")).get();
    assertThat(response.hasFailures(), is(true));
    assertThat(response.getItems()[0].isFailed(), equalTo(true));
    assertThat(
        response.getItems()[0].getFailureMessage(),
        containsString("failed to parse filter for alias [alias4]"));
  }
 public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
   if (response.hasFailures()) {
     throw new RuntimeException(response.buildFailureMessage());
   }
   indexedDocumentCount.addAndGet(response.getItems().length);
 }
  public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exception {
    // Indexing into a should succeed, because the field mapping for field 'field' is defined in the
    // test mapping.
    client()
        .admin()
        .indices()
        .preparePutTemplate("template1")
        .setTemplate("a*")
        .setOrder(0)
        .addMapping("test", "field", "type=text")
        .addAlias(new Alias("alias1").filter(termQuery("field", "value")))
        .get();
    // Indexing into b should succeed, because the field mapping for field 'field' is defined in the
    // _default_ mapping and the test type exists.
    client()
        .admin()
        .indices()
        .preparePutTemplate("template2")
        .setTemplate("b*")
        .setOrder(0)
        .addMapping("_default_", "field", "type=text")
        .addMapping("test")
        .addAlias(new Alias("alias2").filter(termQuery("field", "value")))
        .get();
    // Indexing into c should succeed, because the field mapping for field 'field' is defined in the
    // _default_ mapping.
    client()
        .admin()
        .indices()
        .preparePutTemplate("template3")
        .setTemplate("c*")
        .setOrder(0)
        .addMapping("_default_", "field", "type=text")
        .addAlias(new Alias("alias3").filter(termQuery("field", "value")))
        .get();
    // Indexing into d index should fail, since there is field with name 'field' in the mapping
    client()
        .admin()
        .indices()
        .preparePutTemplate("template4")
        .setTemplate("d*")
        .setOrder(0)
        .addAlias(new Alias("alias4").filter(termQuery("field", "value")))
        .get();

    client().prepareIndex("a1", "test", "test").setSource("{}").get();
    BulkResponse response =
        client().prepareBulk().add(new IndexRequest("a2", "test", "test").source("{}")).get();
    assertThat(response.hasFailures(), is(false));
    assertThat(response.getItems()[0].isFailed(), equalTo(false));
    assertThat(response.getItems()[0].getIndex(), equalTo("a2"));
    assertThat(response.getItems()[0].getType(), equalTo("test"));
    assertThat(response.getItems()[0].getId(), equalTo("test"));
    assertThat(response.getItems()[0].getVersion(), equalTo(1L));

    client().prepareIndex("b1", "test", "test").setSource("{}").get();
    response =
        client().prepareBulk().add(new IndexRequest("b2", "test", "test").source("{}")).get();
    assertThat(response.hasFailures(), is(false));
    assertThat(response.getItems()[0].isFailed(), equalTo(false));
    assertThat(response.getItems()[0].getIndex(), equalTo("b2"));
    assertThat(response.getItems()[0].getType(), equalTo("test"));
    assertThat(response.getItems()[0].getId(), equalTo("test"));
    assertThat(response.getItems()[0].getVersion(), equalTo(1L));

    client().prepareIndex("c1", "test", "test").setSource("{}").get();
    response =
        client().prepareBulk().add(new IndexRequest("c2", "test", "test").source("{}")).get();
    assertThat(response.hasFailures(), is(false));
    assertThat(response.getItems()[0].isFailed(), equalTo(false));
    assertThat(response.getItems()[0].getIndex(), equalTo("c2"));
    assertThat(response.getItems()[0].getType(), equalTo("test"));
    assertThat(response.getItems()[0].getId(), equalTo("test"));
    assertThat(response.getItems()[0].getVersion(), equalTo(1L));

    // Before 2.0 alias filters were parsed at alias creation time, in order
    // for filters to work correctly ES required that fields mentioned in those
    // filters exist in the mapping.
    // From 2.0 and higher alias filters are parsed at request time and therefor
    // fields mentioned in filters don't need to exist in the mapping.
    // So the aliases defined in the index template for this index will not fail
    // even though the fields in the alias fields don't exist yet and indexing into
    // an index that doesn't exist yet will succeed
    client().prepareIndex("d1", "test", "test").setSource("{}").get();

    response =
        client().prepareBulk().add(new IndexRequest("d2", "test", "test").source("{}")).get();
    assertThat(response.hasFailures(), is(false));
    assertThat(response.getItems()[0].isFailed(), equalTo(false));
    assertThat(response.getItems()[0].getId(), equalTo("test"));
    assertThat(response.getItems()[0].getVersion(), equalTo(1L));
  }
  public void handleBefore(Annotation annotation, Object instance, Map<String, Object> context)
      throws Exception {
    ElasticsearchBulkRequest elasticsearchBulkRequest = (ElasticsearchBulkRequest) annotation;

    InputStream input = null;
    ByteArrayOutputStream output = null;

    try {
      // Get an AdminClient for the node
      Client client = client(context, elasticsearchBulkRequest.nodeName());

      // Load file as byte array
      input = getClass().getResourceAsStream(elasticsearchBulkRequest.dataFile());
      if (input == null) {
        input =
            Thread.currentThread()
                .getContextClassLoader()
                .getResourceAsStream(elasticsearchBulkRequest.dataFile());
      }
      if (input == null) {
        throw new IllegalArgumentException(
            "Bulk file " + elasticsearchBulkRequest.dataFile() + " not found!");
      }
      output = new ByteArrayOutputStream();

      byte[] buffer = new byte[512 * 1024];
      while (input.read(buffer) > 0) {
        output.write(buffer);
      }

      buffer = output.toByteArray();

      // Execute the BulkRequest
      BulkResponse response =
          client
              .prepareBulk()
              .add(
                  buffer,
                  0,
                  buffer.length,
                  elasticsearchBulkRequest.defaultIndexName(),
                  elasticsearchBulkRequest.defaultTypeName())
              .setRefresh(true)
              .execute()
              .actionGet();

      LOGGER.info(
          String.format(
              "Bulk request for data file '%s' executed in %d ms with %sfailures",
              elasticsearchBulkRequest.dataFile(),
              response.getTookInMillis(),
              response.hasFailures() ? "" : "no "));
    } finally {
      try {
        if (output != null) {
          output.close();
        }
      } catch (Exception e) {
      }
      try {
        if (input != null) {
          input.close();
        }
      } catch (Exception e) {
      }
    }
  }
    @Override
    public void run() {
      while (true) {
        if (closed) {
          break;
        }
        try {
          connection = connectionFactory.newConnection(rabbitAddresses);
          channel = connection.createChannel();
        } catch (Exception e) {
          if (!closed) {
            logger.warn("failed to created a connection / channel", e);
          } else {
            continue;
          }
          cleanup(0, "failed to connect");
          try {
            Thread.sleep(5000);
          } catch (InterruptedException e1) {
            // ignore, if we are closing, we will exit later
          }
        }

        QueueingConsumer consumer = null;
        // define the queue
        try {
          if (rabbitQueueDeclare) {
            // only declare the queue if we should
            channel.queueDeclare(
                rabbitQueue /*queue*/,
                rabbitQueueDurable /*durable*/,
                false /*exclusive*/,
                rabbitQueueAutoDelete /*autoDelete*/,
                rabbitQueueArgs /*extra args*/);
          }
          if (rabbitExchangeDeclare) {
            // only declare the exchange if we should
            channel.exchangeDeclare(
                rabbitExchange /*exchange*/, rabbitExchangeType /*type*/, rabbitExchangeDurable);
          }

          channel.basicQos(
              rabbitQosPrefetchSize /*qos_prefetch_size*/,
              rabbitQosPrefetchCount /*qos_prefetch_count*/,
              false);

          if (rabbitQueueBind) {
            // only bind queue if we should
            channel.queueBind(
                rabbitQueue /*queue*/,
                rabbitExchange /*exchange*/,
                rabbitRoutingKey /*routingKey*/);
          }
          consumer = new QueueingConsumer(channel);
          channel.basicConsume(rabbitQueue /*queue*/, false /*noAck*/, consumer);
        } catch (Exception e) {
          if (!closed) {
            logger.warn("failed to create queue [{}]", e, rabbitQueue);
          }
          cleanup(0, "failed to create queue");
          continue;
        }

        // now use the queue to listen for messages
        while (true) {
          if (closed) {
            break;
          }
          QueueingConsumer.Delivery task;
          try {
            task = consumer.nextDelivery();
          } catch (Exception e) {
            if (!closed) {
              logger.error("failed to get next message, reconnecting...", e);
            }
            cleanup(0, "failed to get message");
            break;
          }

          if (task != null && task.getBody() != null) {
            final List<Long> deliveryTags = Lists.newArrayList();

            BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();

            try {
              processBody(task, bulkRequestBuilder);
            } catch (Exception e) {
              logger.warn(
                  "failed to parse request for delivery tag [{}], ack'ing...",
                  e,
                  task.getEnvelope().getDeliveryTag());
              try {
                channel.basicAck(task.getEnvelope().getDeliveryTag(), false);
              } catch (IOException e1) {
                logger.warn("failed to ack [{}]", e1, task.getEnvelope().getDeliveryTag());
              }
              continue;
            }

            deliveryTags.add(task.getEnvelope().getDeliveryTag());

            if (bulkRequestBuilder.numberOfActions() < bulkSize) {
              // try and spin some more of those without timeout, so we have a bigger bulk (bounded
              // by the bulk size)
              try {
                while ((task = consumer.nextDelivery(bulkTimeout.millis())) != null) {
                  try {
                    processBody(task, bulkRequestBuilder);
                    deliveryTags.add(task.getEnvelope().getDeliveryTag());
                  } catch (Throwable e) {
                    logger.warn(
                        "failed to parse request for delivery tag [{}], ack'ing...",
                        e,
                        task.getEnvelope().getDeliveryTag());
                    try {
                      channel.basicAck(task.getEnvelope().getDeliveryTag(), false);
                    } catch (Exception e1) {
                      logger.warn(
                          "failed to ack on failure [{}]", e1, task.getEnvelope().getDeliveryTag());
                    }
                  }
                  if (bulkRequestBuilder.numberOfActions() >= bulkSize) {
                    break;
                  }
                }
              } catch (InterruptedException e) {
                if (closed) {
                  break;
                }
              } catch (ShutdownSignalException sse) {
                logger.warn(
                    "Received a shutdown signal! initiatedByApplication: [{}], hard error: [{}]",
                    sse,
                    sse.isInitiatedByApplication(),
                    sse.isHardError());
                if (!closed && sse.isInitiatedByApplication()) {
                  logger.error("failed to get next message, reconnecting...", sse);
                }
                cleanup(0, "failed to get message");
                break;
              }
            }

            if (logger.isTraceEnabled()) {
              logger.trace(
                  "executing bulk with [{}] actions", bulkRequestBuilder.numberOfActions());
            }

            // if we have no bulk actions we might have processed custom commands, so ack them
            if (ordered || bulkRequestBuilder.numberOfActions() == 0) {
              try {
                if (bulkRequestBuilder.numberOfActions() > 0) {
                  BulkResponse response = bulkRequestBuilder.execute().actionGet();
                  if (response.hasFailures()) {
                    // TODO write to exception queue?
                    logger.warn("failed to execute: " + response.buildFailureMessage());
                  }
                }
              } catch (Exception e) {
                logger.warn("failed to execute bulk", e);
              }
              for (Long deliveryTag : deliveryTags) {
                try {
                  channel.basicAck(deliveryTag, false);
                } catch (Exception e1) {
                  logger.warn("failed to ack [{}]", e1, deliveryTag);
                }
              }
            } else {
              if (bulkRequestBuilder.numberOfActions() > 0) {
                bulkRequestBuilder.execute(
                    new ActionListener<BulkResponse>() {
                      @Override
                      public void onResponse(BulkResponse response) {
                        if (response.hasFailures()) {
                          // TODO write to exception queue?
                          logger.warn("failed to execute: " + response.buildFailureMessage());
                        }
                        for (Long deliveryTag : deliveryTags) {
                          try {
                            channel.basicAck(deliveryTag, false);
                          } catch (Exception e1) {
                            logger.warn("failed to ack [{}]", e1, deliveryTag);
                          }
                        }
                      }

                      @Override
                      public void onFailure(Throwable e) {
                        logger.warn(
                            "failed to execute bulk for delivery tags [{}], not ack'ing",
                            e,
                            deliveryTags);
                      }
                    });
              }
            }
          }
        }
      }
      cleanup(0, "closing river");
    }
  public static void main(String[] args) throws Exception {
    Natives.tryMlockall();
    Settings settings =
        settingsBuilder()
            .put("refresh_interval", "-1")
            .put(SETTING_NUMBER_OF_SHARDS, 1)
            .put(SETTING_NUMBER_OF_REPLICAS, 0)
            .build();

    String clusterName = TermsAggregationSearchAndIndexingBenchmark.class.getSimpleName();
    nodes = new InternalNode[1];
    for (int i = 0; i < nodes.length; i++) {
      nodes[i] =
          (InternalNode)
              nodeBuilder()
                  .settings(settingsBuilder().put(settings).put("name", "node1"))
                  .clusterName(clusterName)
                  .node();
    }
    Client client = nodes[0].client();

    client
        .admin()
        .cluster()
        .prepareHealth(indexName)
        .setWaitForGreenStatus()
        .setTimeout("10s")
        .execute()
        .actionGet();
    try {
      client
          .admin()
          .indices()
          .prepareCreate(indexName)
          .addMapping(typeName, generateMapping("eager", "lazy"))
          .get();
      Thread.sleep(5000);

      long startTime = System.currentTimeMillis();
      ObjectOpenHashSet<String> uniqueTerms = ObjectOpenHashSet.newInstance();
      for (int i = 0; i < NUMBER_OF_TERMS; i++) {
        boolean added;
        do {
          added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
        } while (!added);
      }
      String[] sValues = uniqueTerms.toArray(String.class);
      long ITERS = COUNT / BATCH;
      long i = 1;
      int counter = 0;
      for (; i <= ITERS; i++) {
        BulkRequestBuilder request = client.prepareBulk();
        for (int j = 0; j < BATCH; j++) {
          counter++;

          XContentBuilder builder = jsonBuilder().startObject();
          builder.field("id", Integer.toString(counter));
          final String sValue = sValues[counter % sValues.length];
          builder.field("s_value", sValue);
          builder.field("s_value_dv", sValue);

          for (String field : new String[] {"sm_value", "sm_value_dv"}) {
            builder.startArray(field);
            for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
              builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
            }
            builder.endArray();
          }

          request.add(
              Requests.indexRequest(indexName)
                  .type("type1")
                  .id(Integer.toString(counter))
                  .source(builder));
        }
        BulkResponse response = request.execute().actionGet();
        if (response.hasFailures()) {
          System.err.println("--> failures...");
        }
        if (((i * BATCH) % 10000) == 0) {
          System.out.println("--> Indexed " + (i * BATCH));
        }
      }

      System.out.println(
          "--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
    } catch (IndexAlreadyExistsException e) {
      System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
      ClusterHealthResponse clusterHealthResponse =
          client
              .admin()
              .cluster()
              .prepareHealth(indexName)
              .setWaitForGreenStatus()
              .setTimeout("10m")
              .execute()
              .actionGet();
      if (clusterHealthResponse.isTimedOut()) {
        System.err.println("--> Timed out waiting for cluster health");
      }
    }
    client
        .admin()
        .indices()
        .preparePutMapping(indexName)
        .setType(typeName)
        .setSource(generateMapping("lazy", "lazy"))
        .get();
    client.admin().indices().prepareRefresh().execute().actionGet();
    System.out.println(
        "--> Number of docs in index: "
            + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());

    String[] nodeIds = new String[nodes.length];
    for (int i = 0; i < nodeIds.length; i++) {
      nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
    }

    List<TestRun> testRuns = new ArrayList<>();
    testRuns.add(new TestRun("Regular field ordinals", "eager", "lazy", "s_value", "ordinals"));
    testRuns.add(
        new TestRun("Docvalues field ordinals", "lazy", "eager", "s_value_dv", "ordinals"));
    testRuns.add(
        new TestRun(
            "Regular field global ordinals", "eager_global_ordinals", "lazy", "s_value", null));
    testRuns.add(
        new TestRun("Docvalues field global", "lazy", "eager_global_ordinals", "s_value_dv", null));

    List<TestResult> testResults = new ArrayList<>();
    for (TestRun testRun : testRuns) {
      client
          .admin()
          .indices()
          .preparePutMapping(indexName)
          .setType(typeName)
          .setSource(
              generateMapping(testRun.indexedFieldEagerLoading, testRun.docValuesEagerLoading))
          .get();
      client.admin().indices().prepareClearCache(indexName).setFieldDataCache(true).get();
      SearchThread searchThread =
          new SearchThread(client, testRun.termsAggsField, testRun.termsAggsExecutionHint);
      RefreshThread refreshThread = new RefreshThread(client);
      System.out.println("--> Running '" + testRun.name + "' round...");
      new Thread(refreshThread).start();
      new Thread(searchThread).start();
      Thread.sleep(2 * 60 * 1000);
      refreshThread.stop();
      searchThread.stop();

      System.out.println("--> Avg refresh time: " + refreshThread.avgRefreshTime + " ms");
      System.out.println("--> Avg query time: " + searchThread.avgQueryTime + " ms");

      ClusterStatsResponse clusterStateResponse =
          client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
      System.out.println(
          "--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
      ByteSizeValue fieldDataMemoryUsed =
          clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
      System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
      testResults.add(
          new TestResult(
              testRun.name,
              refreshThread.avgRefreshTime,
              searchThread.avgQueryTime,
              fieldDataMemoryUsed));
    }

    System.out.println(
        "----------------------------------------- SUMMARY ----------------------------------------------");
    System.out.format(
        Locale.ENGLISH,
        "%30s%18s%15s%15s\n",
        "name",
        "avg refresh time",
        "avg query time",
        "fieldata size");
    for (TestResult testResult : testResults) {
      System.out.format(
          Locale.ENGLISH,
          "%30s%18s%15s%15s\n",
          testResult.name,
          testResult.avgRefreshTime,
          testResult.avgQueryTime,
          testResult.fieldDataSizeInMemory);
    }
    System.out.println(
        "----------------------------------------- SUMMARY ----------------------------------------------");

    client.close();
    for (InternalNode node : nodes) {
      node.close();
    }
  }
  public void testLimitsRequestSize() throws Exception {
    ByteSizeValue inFlightRequestsLimit = new ByteSizeValue(8, ByteSizeUnit.KB);
    if (noopBreakerUsed()) {
      logger.info("--> noop breakers used, skipping test");
      return;
    }

    internalCluster().ensureAtLeastNumDataNodes(2);

    NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
    List<NodeStats> dataNodeStats = new ArrayList<>();
    for (NodeStats stat : nodeStats.getNodes()) {
      if (stat.getNode().isDataNode()) {
        dataNodeStats.add(stat);
      }
    }

    assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
    Collections.shuffle(dataNodeStats, random());

    // send bulk request from source node to target node later. The sole shard is bound to the
    // target node.
    NodeStats targetNode = dataNodeStats.get(0);
    NodeStats sourceNode = dataNodeStats.get(1);

    assertAcked(
        prepareCreate("index")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
                    .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
                    .put("index.routing.allocation.include._name", targetNode.getNode().getName())
                    .put(
                        EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
                        EnableAllocationDecider.Rebalance.NONE)));

    Client client = client(sourceNode.getNode().getName());

    // we use the limit size as a (very) rough indication on how many requests we should sent to hit
    // the limit
    int numRequests = inFlightRequestsLimit.bytesAsInt();
    BulkRequest bulkRequest = new BulkRequest();
    for (int i = 0; i < numRequests; i++) {
      IndexRequest indexRequest = new IndexRequest("index", "type", Integer.toString(i));
      indexRequest.source("field", "value", "num", i);
      bulkRequest.add(indexRequest);
    }

    Settings limitSettings =
        Settings.builder()
            .put(
                HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING
                    .getKey(),
                inFlightRequestsLimit)
            .build();

    assertAcked(
        client().admin().cluster().prepareUpdateSettings().setTransientSettings(limitSettings));

    // can either fail directly with an exception or the response contains exceptions (depending on
    // client)
    try {
      BulkResponse response = client.bulk(bulkRequest).actionGet();
      if (!response.hasFailures()) {
        fail("Should have thrown CircuitBreakingException");
      } else {
        // each item must have failed with CircuitBreakingException
        for (BulkItemResponse bulkItemResponse : response) {
          Throwable cause = ExceptionsHelper.unwrapCause(bulkItemResponse.getFailure().getCause());
          assertThat(cause, instanceOf(CircuitBreakingException.class));
          assertEquals(
              ((CircuitBreakingException) cause).getByteLimit(), inFlightRequestsLimit.bytes());
        }
      }
    } catch (CircuitBreakingException ex) {
      assertEquals(ex.getByteLimit(), inFlightRequestsLimit.bytes());
    }
  }
    public void run() {
      String id = null; // document id
      String type = null; // document type
      String indexName = null; // document index
      Map<String, Object> data = null; // document data for indexing
      ObjectReader dataReader = mapper.reader(new TypeReference<Map<String, Object>>() {});
      int interval =
          (LONGPOLLING_INTERVAL < 0 || LONGPOLLING_INTERVAL > 20)
              ? DEFAULT_LONGPOLLING_INTERVAL
              : LONGPOLLING_INTERVAL;

      while (!closed) {
        // pull messages from SQS
        if (DEBUG) logger.info("Waiting {}s for messages...", interval);

        List<JsonNode> msgs = pullMessages(interval);

        try {
          BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();

          for (JsonNode msg : msgs) {
            if (msg.has("_id") && msg.has("_type")) {
              id = msg.get("_id").textValue();
              type = msg.get("_type").textValue();
              // Support for dynamic indexes
              indexName = msg.has("_index") ? msg.get("_index").textValue() : INDEX;

              JsonNode dataNode = msg.get("_data");
              if (dataNode != null && dataNode.isObject()) {
                data = dataReader.readValue(msg.get("_data"));
                bulkRequestBuilder.add(
                    client.prepareIndex(indexName, type, id).setSource(data).request());
              } else {
                bulkRequestBuilder.add(client.prepareDelete(indexName, type, id).request());
              }
            }
          }

          if (bulkRequestBuilder.numberOfActions() > 0) {
            BulkResponse response = bulkRequestBuilder.execute().actionGet();
            if (response.hasFailures()) {
              logger.warn(
                  "Bulk operation completed with errors: " + response.buildFailureMessage());
            }
            idleCount = 0;
          } else {
            idleCount++;
            if (DEBUG) logger.info("No new messages. {}", idleCount);
            // no tasks in queue => throttle down pull requests
            if (SLEEP > 0 && idleCount >= 3) {
              try {
                if (DEBUG) logger.info("Queue is empty. Sleeping for {}s", interval);
                Thread.sleep(SLEEP * 1000);
              } catch (InterruptedException e) {
                if (closed) {
                  if (DEBUG) logger.info("Done.");
                  break;
                }
              }
            }
          }
        } catch (Exception e) {
          logger.error("Bulk index operation failed {}", e);
          continue;
        }
      }
    }