public static void main(String[] args) throws Exception {
    System.setProperty("es.logger.prefix", "");
    Natives.tryMlockall();

    Settings settings =
        settingsBuilder()
            .put("gateway.type", "local")
            .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, "false")
            .put(SETTING_NUMBER_OF_SHARDS, 1)
            .put(SETTING_NUMBER_OF_REPLICAS, 0)
            .put(TransportModule.TRANSPORT_TYPE_KEY, "local")
            .build();

    String clusterName = ReplicaRecoveryBenchmark.class.getSimpleName();
    Node node1 =
        nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings)).node();

    final ESLogger logger = ESLoggerFactory.getLogger("benchmark");

    final Client client1 = node1.client();
    client1
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setPersistentSettings("logger.indices.recovery: TRACE")
        .get();
    final BackgroundIndexer indexer =
        new BackgroundIndexer(
            INDEX_NAME, TYPE_NAME, client1, 0, CONCURRENT_INDEXERS, false, new Random());
    indexer.setMinFieldSize(10);
    indexer.setMaxFieldSize(150);
    try {
      client1.admin().indices().prepareDelete(INDEX_NAME).get();
    } catch (IndexMissingException e) {
    }
    client1.admin().indices().prepareCreate(INDEX_NAME).get();
    indexer.start(DOC_COUNT / 2);
    while (indexer.totalIndexedDocs() < DOC_COUNT / 2) {
      Thread.sleep(5000);
      logger.info("--> indexed {} of {}", indexer.totalIndexedDocs(), DOC_COUNT);
    }
    client1.admin().indices().prepareFlush().get();
    indexer.continueIndexing(DOC_COUNT / 2);
    while (indexer.totalIndexedDocs() < DOC_COUNT) {
      Thread.sleep(5000);
      logger.info("--> indexed {} of {}", indexer.totalIndexedDocs(), DOC_COUNT);
    }

    logger.info("--> starting another node and allocating a shard on it");

    Node node2 =
        nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings)).node();

    client1
        .admin()
        .indices()
        .prepareUpdateSettings(INDEX_NAME)
        .setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 1")
        .get();

    final AtomicBoolean end = new AtomicBoolean(false);

    final Thread backgroundLogger =
        new Thread(
            new Runnable() {

              long lastTime = System.currentTimeMillis();
              long lastDocs = indexer.totalIndexedDocs();
              long lastBytes = 0;
              long lastTranslogOps = 0;

              @Override
              public void run() {
                while (true) {
                  try {
                    Thread.sleep(5000);
                  } catch (InterruptedException e) {

                  }
                  if (end.get()) {
                    return;
                  }
                  long currentTime = System.currentTimeMillis();
                  long currentDocs = indexer.totalIndexedDocs();
                  RecoveryResponse recoveryResponse =
                      client1
                          .admin()
                          .indices()
                          .prepareRecoveries(INDEX_NAME)
                          .setActiveOnly(true)
                          .get();
                  List<ShardRecoveryResponse> indexRecoveries =
                      recoveryResponse.shardResponses().get(INDEX_NAME);
                  long translogOps;
                  long bytes;
                  if (indexRecoveries.size() > 0) {
                    translogOps =
                        indexRecoveries.get(0).recoveryState().getTranslog().recoveredOperations();
                    bytes =
                        recoveryResponse
                            .shardResponses()
                            .get(INDEX_NAME)
                            .get(0)
                            .recoveryState()
                            .getIndex()
                            .recoveredBytes();
                  } else {
                    bytes = lastBytes = 0;
                    translogOps = lastTranslogOps = 0;
                  }
                  float seconds = (currentTime - lastTime) / 1000.0F;
                  logger.info(
                      "--> indexed [{}];[{}] doc/s, recovered [{}] MB/s , translog ops [{}]/s ",
                      currentDocs,
                      (currentDocs - lastDocs) / seconds,
                      (bytes - lastBytes) / 1024.0F / 1024F / seconds,
                      (translogOps - lastTranslogOps) / seconds);
                  lastBytes = bytes;
                  lastTranslogOps = translogOps;
                  lastTime = currentTime;
                  lastDocs = currentDocs;
                }
              }
            });

    backgroundLogger.start();

    client1.admin().cluster().prepareHealth().setWaitForGreenStatus().get();

    logger.info("--> green. starting relocation cycles");

    long startDocIndexed = indexer.totalIndexedDocs();
    indexer.continueIndexing(DOC_COUNT * 50);

    long totalRecoveryTime = 0;
    long startTime = System.currentTimeMillis();
    long[] recoveryTimes = new long[3];
    for (int iteration = 0; iteration < 3; iteration++) {
      logger.info("--> removing replicas");
      client1
          .admin()
          .indices()
          .prepareUpdateSettings(INDEX_NAME)
          .setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 0")
          .get();
      logger.info("--> adding replica again");
      long recoveryStart = System.currentTimeMillis();
      client1
          .admin()
          .indices()
          .prepareUpdateSettings(INDEX_NAME)
          .setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 1")
          .get();
      client1
          .admin()
          .cluster()
          .prepareHealth(INDEX_NAME)
          .setWaitForGreenStatus()
          .setTimeout("15m")
          .get();
      long recoveryTime = System.currentTimeMillis() - recoveryStart;
      totalRecoveryTime += recoveryTime;
      recoveryTimes[iteration] = recoveryTime;
      logger.info("--> recovery done in [{}]", new TimeValue(recoveryTime));

      // sleep some to let things clean up
      Thread.sleep(10000);
    }

    long endDocIndexed = indexer.totalIndexedDocs();
    long totalTime = System.currentTimeMillis() - startTime;
    indexer.stop();

    end.set(true);

    backgroundLogger.interrupt();

    backgroundLogger.join();

    logger.info(
        "average doc/s [{}], average relocation time [{}], taking [{}], [{}], [{}]",
        (endDocIndexed - startDocIndexed) * 1000.0 / totalTime,
        new TimeValue(totalRecoveryTime / 3),
        TimeValue.timeValueMillis(recoveryTimes[0]),
        TimeValue.timeValueMillis(recoveryTimes[1]),
        TimeValue.timeValueMillis(recoveryTimes[2]));

    client1.close();
    node1.close();
    node2.close();
  }
  public static void main(String[] args) throws Exception {
    Natives.tryMlockall();
    Settings settings =
        settingsBuilder()
            .put("refresh_interval", "-1")
            .put(SETTING_NUMBER_OF_SHARDS, 1)
            .put(SETTING_NUMBER_OF_REPLICAS, 0)
            .build();

    String clusterName = TermsAggregationSearchAndIndexingBenchmark.class.getSimpleName();
    nodes = new InternalNode[1];
    for (int i = 0; i < nodes.length; i++) {
      nodes[i] =
          (InternalNode)
              nodeBuilder()
                  .settings(settingsBuilder().put(settings).put("name", "node1"))
                  .clusterName(clusterName)
                  .node();
    }
    Client client = nodes[0].client();

    client
        .admin()
        .cluster()
        .prepareHealth(indexName)
        .setWaitForGreenStatus()
        .setTimeout("10s")
        .execute()
        .actionGet();
    try {
      client
          .admin()
          .indices()
          .prepareCreate(indexName)
          .addMapping(typeName, generateMapping("eager", "lazy"))
          .get();
      Thread.sleep(5000);

      long startTime = System.currentTimeMillis();
      ObjectOpenHashSet<String> uniqueTerms = ObjectOpenHashSet.newInstance();
      for (int i = 0; i < NUMBER_OF_TERMS; i++) {
        boolean added;
        do {
          added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
        } while (!added);
      }
      String[] sValues = uniqueTerms.toArray(String.class);
      long ITERS = COUNT / BATCH;
      long i = 1;
      int counter = 0;
      for (; i <= ITERS; i++) {
        BulkRequestBuilder request = client.prepareBulk();
        for (int j = 0; j < BATCH; j++) {
          counter++;

          XContentBuilder builder = jsonBuilder().startObject();
          builder.field("id", Integer.toString(counter));
          final String sValue = sValues[counter % sValues.length];
          builder.field("s_value", sValue);
          builder.field("s_value_dv", sValue);

          for (String field : new String[] {"sm_value", "sm_value_dv"}) {
            builder.startArray(field);
            for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
              builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
            }
            builder.endArray();
          }

          request.add(
              Requests.indexRequest(indexName)
                  .type("type1")
                  .id(Integer.toString(counter))
                  .source(builder));
        }
        BulkResponse response = request.execute().actionGet();
        if (response.hasFailures()) {
          System.err.println("--> failures...");
        }
        if (((i * BATCH) % 10000) == 0) {
          System.out.println("--> Indexed " + (i * BATCH));
        }
      }

      System.out.println(
          "--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
    } catch (IndexAlreadyExistsException e) {
      System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
      ClusterHealthResponse clusterHealthResponse =
          client
              .admin()
              .cluster()
              .prepareHealth(indexName)
              .setWaitForGreenStatus()
              .setTimeout("10m")
              .execute()
              .actionGet();
      if (clusterHealthResponse.isTimedOut()) {
        System.err.println("--> Timed out waiting for cluster health");
      }
    }
    client
        .admin()
        .indices()
        .preparePutMapping(indexName)
        .setType(typeName)
        .setSource(generateMapping("lazy", "lazy"))
        .get();
    client.admin().indices().prepareRefresh().execute().actionGet();
    System.out.println(
        "--> Number of docs in index: "
            + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());

    String[] nodeIds = new String[nodes.length];
    for (int i = 0; i < nodeIds.length; i++) {
      nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
    }

    List<TestRun> testRuns = new ArrayList<>();
    testRuns.add(new TestRun("Regular field ordinals", "eager", "lazy", "s_value", "ordinals"));
    testRuns.add(
        new TestRun("Docvalues field ordinals", "lazy", "eager", "s_value_dv", "ordinals"));
    testRuns.add(
        new TestRun(
            "Regular field global ordinals", "eager_global_ordinals", "lazy", "s_value", null));
    testRuns.add(
        new TestRun("Docvalues field global", "lazy", "eager_global_ordinals", "s_value_dv", null));

    List<TestResult> testResults = new ArrayList<>();
    for (TestRun testRun : testRuns) {
      client
          .admin()
          .indices()
          .preparePutMapping(indexName)
          .setType(typeName)
          .setSource(
              generateMapping(testRun.indexedFieldEagerLoading, testRun.docValuesEagerLoading))
          .get();
      client.admin().indices().prepareClearCache(indexName).setFieldDataCache(true).get();
      SearchThread searchThread =
          new SearchThread(client, testRun.termsAggsField, testRun.termsAggsExecutionHint);
      RefreshThread refreshThread = new RefreshThread(client);
      System.out.println("--> Running '" + testRun.name + "' round...");
      new Thread(refreshThread).start();
      new Thread(searchThread).start();
      Thread.sleep(2 * 60 * 1000);
      refreshThread.stop();
      searchThread.stop();

      System.out.println("--> Avg refresh time: " + refreshThread.avgRefreshTime + " ms");
      System.out.println("--> Avg query time: " + searchThread.avgQueryTime + " ms");

      ClusterStatsResponse clusterStateResponse =
          client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
      System.out.println(
          "--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
      ByteSizeValue fieldDataMemoryUsed =
          clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
      System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
      testResults.add(
          new TestResult(
              testRun.name,
              refreshThread.avgRefreshTime,
              searchThread.avgQueryTime,
              fieldDataMemoryUsed));
    }

    System.out.println(
        "----------------------------------------- SUMMARY ----------------------------------------------");
    System.out.format(
        Locale.ENGLISH,
        "%30s%18s%15s%15s\n",
        "name",
        "avg refresh time",
        "avg query time",
        "fieldata size");
    for (TestResult testResult : testResults) {
      System.out.format(
          Locale.ENGLISH,
          "%30s%18s%15s%15s\n",
          testResult.name,
          testResult.avgRefreshTime,
          testResult.avgQueryTime,
          testResult.fieldDataSizeInMemory);
    }
    System.out.println(
        "----------------------------------------- SUMMARY ----------------------------------------------");

    client.close();
    for (InternalNode node : nodes) {
      node.close();
    }
  }