@Override
 public BulkNodeClient stopBulk(String index) throws IOException {
   if (metric == null) {
     return this;
   }
   if (metric.isBulk(index)) {
     ClientHelper.updateIndexSetting(
         client, index, "refresh_interval", metric.getStopBulkRefreshIntervals().get(index));
     metric.removeBulk(index);
   }
   return this;
 }
 @Override
 public BulkNodeClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval)
     throws IOException {
   if (metric == null) {
     return this;
   }
   if (!metric.isBulk(index)) {
     metric.setupBulk(index, startRefreshInterval, stopRefreshIterval);
     ClientHelper.updateIndexSetting(client, index, "refresh_interval", startRefreshInterval);
   }
   return this;
 }
 @Override
 public synchronized void shutdown() {
   try {
     if (bulkProcessor != null) {
       logger.debug("closing bulk processor...");
       bulkProcessor.close();
     }
     if (metric != null && metric.indices() != null && !metric.indices().isEmpty()) {
       logger.debug("stopping bulk mode for indices {}...", metric.indices());
       for (String index : ImmutableSet.copyOf(metric.indices())) {
         stopBulk(index);
       }
     }
     logger.debug("shutting down...");
     client.close();
     logger.debug("shutting down completed");
   } catch (Exception e) {
     logger.error(e.getMessage(), e);
   }
 }
 @Override
 public BulkNodeClient bulkDelete(DeleteRequest deleteRequest) {
   if (closed) {
     throw new ElasticsearchIllegalStateException("client is closed");
   }
   try {
     if (metric != null) {
       metric.getCurrentIngest().inc();
     }
     bulkProcessor.add(deleteRequest);
   } catch (Exception e) {
     throwable = e;
     closed = true;
     logger.error("bulk add of delete failed: " + e.getMessage(), e);
   } finally {
     if (metric != null) {
       metric.getCurrentIngest().dec();
     }
   }
   return this;
 }
 @Override
 public BulkNodeClient index(String index, String type, String id, String source) {
   if (closed) {
     throw new ElasticsearchIllegalStateException("client is closed");
   }
   try {
     if (metric != null) {
       metric.getCurrentIngest().inc();
     }
     bulkProcessor.add(new IndexRequest(index).type(type).id(id).create(false).source(source));
   } catch (Exception e) {
     throwable = e;
     closed = true;
     logger.error("bulk add of index request failed: " + e.getMessage(), e);
   } finally {
     if (metric != null) {
       metric.getCurrentIngest().dec();
     }
   }
   return this;
 }
  @Override
  public BulkNodeClient newClient(Client client) {
    this.client = client;
    if (metric == null) {
      this.metric = new Metric();
      metric.start();
    }
    BulkProcessor.Listener listener =
        new BulkProcessor.Listener() {
          @Override
          public void beforeBulk(long executionId, BulkRequest request) {
            metric.getCurrentIngest().inc();
            long l = metric.getCurrentIngest().count();
            int n = request.numberOfActions();
            metric.getSubmitted().inc(n);
            metric.getCurrentIngestNumDocs().inc(n);
            metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
            logger.debug(
                "before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]",
                executionId,
                request.numberOfActions(),
                request.estimatedSizeInBytes(),
                l);
          }

          @Override
          public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
            metric.getCurrentIngest().dec();
            long l = metric.getCurrentIngest().count();
            metric.getSucceeded().inc(response.getItems().length);
            metric.getFailed().inc(0);
            metric.getTotalIngest().inc(response.getTookInMillis());
            int n = 0;
            for (BulkItemResponse itemResponse : response.getItems()) {
              if (itemResponse.isFailed()) {
                n++;
                metric.getSucceeded().dec(1);
                metric.getFailed().inc(1);
              }
            }
            logger.debug(
                "after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests",
                executionId,
                metric.getSucceeded().count(),
                metric.getFailed().count(),
                response.getTook().millis(),
                l);
            if (n > 0) {
              logger.error(
                  "bulk [{}] failed with {} failed items, failure message = {}",
                  executionId,
                  n,
                  response.buildFailureMessage());
            } else {
              metric.getCurrentIngestNumDocs().dec(response.getItems().length);
            }
          }

          @Override
          public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
            metric.getCurrentIngest().dec();
            throwable = failure;
            closed = true;
            logger.error("after bulk [" + executionId + "] error", failure);
          }
        };
    BulkProcessor.Builder builder =
        BulkProcessor.builder(client, listener)
            .setBulkActions(maxActionsPerBulkRequest) // off-by-one
            .setConcurrentRequests(maxConcurrentBulkRequests)
            .setFlushInterval(flushInterval);
    if (maxVolume != null) {
      builder.setBulkSize(maxVolume);
    }
    this.bulkProcessor = builder.build();
    try {
      waitForCluster(ClusterHealthStatus.YELLOW, TimeValue.timeValueSeconds(30));
      closed = false;
    } catch (IOException e) {
      logger.error(e.getMessage(), e);
      closed = true;
    }
    return this;
  }