コード例 #1
0
ファイル: RestTable.java プロジェクト: jp96/elasticsearch
 private static String renderValue(RestRequest request, Object value) {
   if (value == null) {
     return null;
   }
   if (value instanceof ByteSizeValue) {
     ByteSizeValue v = (ByteSizeValue) value;
     String resolution = request.param("bytes");
     if ("b".equals(resolution)) {
       return Long.toString(v.bytes());
     } else if ("k".equals(resolution)) {
       return Long.toString(v.kb());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.mb());
     } else if ("g".equals(resolution)) {
       return Long.toString(v.gb());
     } else if ("t".equals(resolution)) {
       return Long.toString(v.tb());
     } else if ("p".equals(resolution)) {
       return Long.toString(v.pb());
     } else {
       return v.toString();
     }
   }
   if (value instanceof SizeValue) {
     SizeValue v = (SizeValue) value;
     String resolution = request.param("size");
     if ("b".equals(resolution)) {
       return Long.toString(v.singles());
     } else if ("k".equals(resolution)) {
       return Long.toString(v.kilo());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.mega());
     } else if ("g".equals(resolution)) {
       return Long.toString(v.giga());
     } else if ("t".equals(resolution)) {
       return Long.toString(v.tera());
     } else if ("p".equals(resolution)) {
       return Long.toString(v.peta());
     } else {
       return v.toString();
     }
   }
   if (value instanceof TimeValue) {
     TimeValue v = (TimeValue) value;
     String resolution = request.param("time");
     if ("ms".equals(resolution)) {
       return Long.toString(v.millis());
     } else if ("s".equals(resolution)) {
       return Long.toString(v.seconds());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.minutes());
     } else if ("h".equals(resolution)) {
       return Long.toString(v.hours());
     } else {
       return v.toString();
     }
   }
   // Add additional built in data points we can render based on request parameters?
   return value.toString();
 }
コード例 #2
0
ファイル: ThreadPool.java プロジェクト: mehiel/elasticsearch
 @Override
 public void readFrom(StreamInput in) throws IOException {
   name = in.readString();
   type = in.readString();
   min = in.readInt();
   max = in.readInt();
   if (in.readBoolean()) {
     keepAlive = TimeValue.readTimeValue(in);
   }
   if (in.readBoolean()) {
     queueSize = SizeValue.readSizeValue(in);
   }
   in.readBoolean(); // here to conform with removed waitTime
   in.readBoolean(); // here to conform with removed rejected setting
   in.readBoolean(); // here to conform with queue type
 }
コード例 #3
0
ファイル: ThreadPool.java プロジェクト: mehiel/elasticsearch
 @Override
 public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
   builder.startObject(name, XContentBuilder.FieldCaseConversion.NONE);
   builder.field(Fields.TYPE, type);
   if (min != -1) {
     builder.field(Fields.MIN, min);
   }
   if (max != -1) {
     builder.field(Fields.MAX, max);
   }
   if (keepAlive != null) {
     builder.field(Fields.KEEP_ALIVE, keepAlive.toString());
   }
   if (queueSize != null) {
     builder.field(Fields.QUEUE_SIZE, queueSize.toString());
   }
   builder.endObject();
   return builder;
 }
コード例 #4
0
ファイル: ThreadPool.java プロジェクト: mehiel/elasticsearch
 @Override
 public void writeTo(StreamOutput out) throws IOException {
   out.writeString(name);
   out.writeString(type);
   out.writeInt(min);
   out.writeInt(max);
   if (keepAlive == null) {
     out.writeBoolean(false);
   } else {
     out.writeBoolean(true);
     keepAlive.writeTo(out);
   }
   if (queueSize == null) {
     out.writeBoolean(false);
   } else {
     out.writeBoolean(true);
     queueSize.writeTo(out);
   }
   out.writeBoolean(false); // here to conform with removed waitTime
   out.writeBoolean(false); // here to conform with removed rejected setting
   out.writeBoolean(false); // here to conform with queue type
 }
コード例 #5
0
  public static void main(String[] args) throws Exception {

    FSDirectory dir = FSDirectory.open(new File("work/test"));
    IndexWriter writer =
        new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));

    final int NUMBER_OF_THREADS = 2;
    final long INDEX_COUNT = SizeValue.parseSizeValue("1m").singles();
    final long SCAN_COUNT = SizeValue.parseSizeValue("100k").singles();
    final long startUid = 1000000;

    long LIMIT = startUid + INDEX_COUNT;
    StopWatch watch = new StopWatch().start();
    System.out.println("Indexing " + INDEX_COUNT + " docs...");
    for (long i = startUid; i < LIMIT; i++) {
      Document doc = new Document();
      doc.add(new StringField("_uid", Long.toString(i), Store.NO));
      doc.add(new NumericDocValuesField("_version", i));
      writer.addDocument(doc);
    }
    System.out.println("Done indexing, took " + watch.stop().lastTaskTime());

    final IndexReader reader = DirectoryReader.open(writer, true);

    final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
    Thread[] threads = new Thread[NUMBER_OF_THREADS];
    for (int i = 0; i < threads.length; i++) {
      threads[i] =
          new Thread(
              new Runnable() {
                @Override
                public void run() {
                  try {
                    for (long i = 0; i < SCAN_COUNT; i++) {
                      long id =
                          startUid
                              + (Math.abs(ThreadLocalRandom.current().nextInt()) % INDEX_COUNT);
                      final long version =
                          Versions.loadVersion(reader, new Term("_uid", Long.toString(id)));
                      if (version != id) {
                        System.err.println("wrong id...");
                        break;
                      }
                    }
                  } catch (Exception e) {
                    e.printStackTrace();
                  } finally {
                    latch.countDown();
                  }
                }
              });
    }

    watch = new StopWatch().start();
    for (int i = 0; i < threads.length; i++) {
      threads[i].start();
    }
    latch.await();
    watch.stop();
    System.out.println(
        "Scanned in "
            + watch.totalTime()
            + " TP Seconds "
            + ((SCAN_COUNT * NUMBER_OF_THREADS) / watch.totalTime().secondsFrac()));
  }
コード例 #6
0
public class ChildSearchAndIndexingBenchmark {

  static int PARENT_COUNT = (int) SizeValue.parseSizeValue("1m").singles();
  static int NUM_CHILDREN_PER_PARENT = 12;
  static int QUERY_VALUE_RATIO_PER_PARENT = 3;
  static int QUERY_COUNT = 50;
  static String indexName = "test";
  static Random random = new Random();

  public static void main(String[] args) throws Exception {
    Settings settings =
        settingsBuilder()
            .put("refresh_interval", "-1")
            .put(SETTING_NUMBER_OF_SHARDS, 1)
            .put(SETTING_NUMBER_OF_REPLICAS, 0)
            .build();

    String clusterName = ChildSearchAndIndexingBenchmark.class.getSimpleName();
    Node node1 =
        nodeBuilder()
            .settings(settingsBuilder().put(settings).put("name", "node1"))
            .clusterName(clusterName)
            .node();
    Client client = node1.client();

    client
        .admin()
        .cluster()
        .prepareHealth(indexName)
        .setWaitForGreenStatus()
        .setTimeout("10s")
        .execute()
        .actionGet();
    try {
      client.admin().indices().create(createIndexRequest(indexName)).actionGet();
      client
          .admin()
          .indices()
          .preparePutMapping(indexName)
          .setType("child")
          .setSource(
              XContentFactory.jsonBuilder()
                  .startObject()
                  .startObject("child")
                  .startObject("_parent")
                  .field("type", "parent")
                  .endObject()
                  .endObject()
                  .endObject())
          .execute()
          .actionGet();
      Thread.sleep(5000);

      long startTime = System.currentTimeMillis();
      ParentChildIndexGenerator generator =
          new ParentChildIndexGenerator(
              client, PARENT_COUNT, NUM_CHILDREN_PER_PARENT, QUERY_VALUE_RATIO_PER_PARENT);
      generator.index();
      System.out.println(
          "--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
    } catch (IndexAlreadyExistsException e) {
      System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
      ClusterHealthResponse clusterHealthResponse =
          client
              .admin()
              .cluster()
              .prepareHealth(indexName)
              .setWaitForGreenStatus()
              .setTimeout("10m")
              .execute()
              .actionGet();
      if (clusterHealthResponse.isTimedOut()) {
        System.err.println("--> Timed out waiting for cluster health");
      }
    }
    client.admin().indices().prepareRefresh().execute().actionGet();
    System.out.println(
        "--> Number of docs in index: "
            + client
                .prepareSearch()
                .setSize(0)
                .setQuery(matchAllQuery())
                .execute()
                .actionGet()
                .getHits()
                .totalHits());

    SearchThread searchThread = new SearchThread(client);
    new Thread(searchThread).start();
    IndexThread indexThread = new IndexThread(client);
    new Thread(indexThread).start();

    System.in.read();

    indexThread.stop();
    searchThread.stop();
    client.close();
    node1.close();
  }

  static class IndexThread implements Runnable {

    private final Client client;
    private volatile boolean run = true;

    IndexThread(Client client) {
      this.client = client;
    }

    @Override
    public void run() {
      while (run) {
        int childIdLimit = PARENT_COUNT * NUM_CHILDREN_PER_PARENT;
        for (int childId = 1; run && childId < childIdLimit; ) {
          try {
            for (int j = 0; j < 8; j++) {
              GetResponse getResponse =
                  client
                      .prepareGet(indexName, "child", String.valueOf(++childId))
                      .setFields("_source", "_parent")
                      .setRouting("1") // Doesn't matter what value, since there is only one shard
                      .get();
              client
                  .prepareIndex(indexName, "child", Integer.toString(childId) + "_" + j)
                  .setParent(getResponse.getField("_parent").getValue().toString())
                  .setSource(getResponse.getSource())
                  .get();
            }
            client.admin().indices().prepareRefresh(indexName).execute().actionGet();
            Thread.sleep(1000);
            if (childId % 500 == 0) {
              NodesStatsResponse statsResponse =
                  client
                      .admin()
                      .cluster()
                      .prepareNodesStats()
                      .clear()
                      .setIndices(true)
                      .execute()
                      .actionGet();
              System.out.println(
                  "Deleted docs: " + statsResponse.getAt(0).getIndices().getDocs().getDeleted());
            }
          } catch (Throwable e) {
            e.printStackTrace();
          }
        }
      }
    }

    public void stop() {
      run = false;
    }
  }

  static class SearchThread implements Runnable {

    private final Client client;
    private final int numValues;
    private volatile boolean run = true;

    SearchThread(Client client) {
      this.client = client;
      this.numValues = NUM_CHILDREN_PER_PARENT / NUM_CHILDREN_PER_PARENT;
    }

    @Override
    public void run() {
      while (run) {
        try {
          long totalQueryTime = 0;
          for (int j = 0; j < QUERY_COUNT; j++) {
            SearchResponse searchResponse =
                client
                    .prepareSearch(indexName)
                    .setQuery(
                        boolQuery()
                            .must(matchAllQuery())
                            .filter(
                                hasChildQuery(
                                    "child",
                                    termQuery("field2", "value" + random.nextInt(numValues)))))
                    .execute()
                    .actionGet();
            if (searchResponse.getFailedShards() > 0) {
              System.err.println(
                  "Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
            }
            totalQueryTime += searchResponse.getTookInMillis();
          }
          System.out.println(
              "--> has_child filter with term filter Query Avg: "
                  + (totalQueryTime / QUERY_COUNT)
                  + "ms");

          totalQueryTime = 0;
          for (int j = 1; j <= QUERY_COUNT; j++) {
            SearchResponse searchResponse =
                client
                    .prepareSearch(indexName)
                    .setQuery(
                        boolQuery()
                            .must(matchAllQuery())
                            .filter(hasChildQuery("child", matchAllQuery())))
                    .execute()
                    .actionGet();
            if (searchResponse.getFailedShards() > 0) {
              System.err.println(
                  "Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
            }
            totalQueryTime += searchResponse.getTookInMillis();
          }
          System.out.println(
              "--> has_child filter with match_all child query, Query Avg: "
                  + (totalQueryTime / QUERY_COUNT)
                  + "ms");

          NodesStatsResponse statsResponse =
              client.admin().cluster().prepareNodesStats().setJvm(true).execute().actionGet();
          System.out.println(
              "--> Committed heap size: "
                  + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
          System.out.println(
              "--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
          Thread.sleep(1000);
        } catch (Throwable e) {
          e.printStackTrace();
        }
      }
    }

    public void stop() {
      run = false;
    }
  }
}
コード例 #7
0
public class ReplicaRecoveryBenchmark {

  private static final String INDEX_NAME = "index";
  private static final String TYPE_NAME = "type";

  static int DOC_COUNT = (int) SizeValue.parseSizeValue("40k").singles();
  static int CONCURRENT_INDEXERS = 2;

  public static void main(String[] args) throws Exception {
    System.setProperty("es.logger.prefix", "");
    Natives.tryMlockall();

    Settings settings =
        settingsBuilder()
            .put("gateway.type", "local")
            .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, "false")
            .put(SETTING_NUMBER_OF_SHARDS, 1)
            .put(SETTING_NUMBER_OF_REPLICAS, 0)
            .put(TransportModule.TRANSPORT_TYPE_KEY, "local")
            .build();

    String clusterName = ReplicaRecoveryBenchmark.class.getSimpleName();
    Node node1 =
        nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings)).node();

    final ESLogger logger = ESLoggerFactory.getLogger("benchmark");

    final Client client1 = node1.client();
    client1
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setPersistentSettings("logger.indices.recovery: TRACE")
        .get();
    final BackgroundIndexer indexer =
        new BackgroundIndexer(
            INDEX_NAME, TYPE_NAME, client1, 0, CONCURRENT_INDEXERS, false, new Random());
    indexer.setMinFieldSize(10);
    indexer.setMaxFieldSize(150);
    try {
      client1.admin().indices().prepareDelete(INDEX_NAME).get();
    } catch (IndexMissingException e) {
    }
    client1.admin().indices().prepareCreate(INDEX_NAME).get();
    indexer.start(DOC_COUNT / 2);
    while (indexer.totalIndexedDocs() < DOC_COUNT / 2) {
      Thread.sleep(5000);
      logger.info("--> indexed {} of {}", indexer.totalIndexedDocs(), DOC_COUNT);
    }
    client1.admin().indices().prepareFlush().get();
    indexer.continueIndexing(DOC_COUNT / 2);
    while (indexer.totalIndexedDocs() < DOC_COUNT) {
      Thread.sleep(5000);
      logger.info("--> indexed {} of {}", indexer.totalIndexedDocs(), DOC_COUNT);
    }

    logger.info("--> starting another node and allocating a shard on it");

    Node node2 =
        nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings)).node();

    client1
        .admin()
        .indices()
        .prepareUpdateSettings(INDEX_NAME)
        .setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 1")
        .get();

    final AtomicBoolean end = new AtomicBoolean(false);

    final Thread backgroundLogger =
        new Thread(
            new Runnable() {

              long lastTime = System.currentTimeMillis();
              long lastDocs = indexer.totalIndexedDocs();
              long lastBytes = 0;
              long lastTranslogOps = 0;

              @Override
              public void run() {
                while (true) {
                  try {
                    Thread.sleep(5000);
                  } catch (InterruptedException e) {

                  }
                  if (end.get()) {
                    return;
                  }
                  long currentTime = System.currentTimeMillis();
                  long currentDocs = indexer.totalIndexedDocs();
                  RecoveryResponse recoveryResponse =
                      client1
                          .admin()
                          .indices()
                          .prepareRecoveries(INDEX_NAME)
                          .setActiveOnly(true)
                          .get();
                  List<ShardRecoveryResponse> indexRecoveries =
                      recoveryResponse.shardResponses().get(INDEX_NAME);
                  long translogOps;
                  long bytes;
                  if (indexRecoveries.size() > 0) {
                    translogOps =
                        indexRecoveries.get(0).recoveryState().getTranslog().recoveredOperations();
                    bytes =
                        recoveryResponse
                            .shardResponses()
                            .get(INDEX_NAME)
                            .get(0)
                            .recoveryState()
                            .getIndex()
                            .recoveredBytes();
                  } else {
                    bytes = lastBytes = 0;
                    translogOps = lastTranslogOps = 0;
                  }
                  float seconds = (currentTime - lastTime) / 1000.0F;
                  logger.info(
                      "--> indexed [{}];[{}] doc/s, recovered [{}] MB/s , translog ops [{}]/s ",
                      currentDocs,
                      (currentDocs - lastDocs) / seconds,
                      (bytes - lastBytes) / 1024.0F / 1024F / seconds,
                      (translogOps - lastTranslogOps) / seconds);
                  lastBytes = bytes;
                  lastTranslogOps = translogOps;
                  lastTime = currentTime;
                  lastDocs = currentDocs;
                }
              }
            });

    backgroundLogger.start();

    client1.admin().cluster().prepareHealth().setWaitForGreenStatus().get();

    logger.info("--> green. starting relocation cycles");

    long startDocIndexed = indexer.totalIndexedDocs();
    indexer.continueIndexing(DOC_COUNT * 50);

    long totalRecoveryTime = 0;
    long startTime = System.currentTimeMillis();
    long[] recoveryTimes = new long[3];
    for (int iteration = 0; iteration < 3; iteration++) {
      logger.info("--> removing replicas");
      client1
          .admin()
          .indices()
          .prepareUpdateSettings(INDEX_NAME)
          .setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 0")
          .get();
      logger.info("--> adding replica again");
      long recoveryStart = System.currentTimeMillis();
      client1
          .admin()
          .indices()
          .prepareUpdateSettings(INDEX_NAME)
          .setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 1")
          .get();
      client1
          .admin()
          .cluster()
          .prepareHealth(INDEX_NAME)
          .setWaitForGreenStatus()
          .setTimeout("15m")
          .get();
      long recoveryTime = System.currentTimeMillis() - recoveryStart;
      totalRecoveryTime += recoveryTime;
      recoveryTimes[iteration] = recoveryTime;
      logger.info("--> recovery done in [{}]", new TimeValue(recoveryTime));

      // sleep some to let things clean up
      Thread.sleep(10000);
    }

    long endDocIndexed = indexer.totalIndexedDocs();
    long totalTime = System.currentTimeMillis() - startTime;
    indexer.stop();

    end.set(true);

    backgroundLogger.interrupt();

    backgroundLogger.join();

    logger.info(
        "average doc/s [{}], average relocation time [{}], taking [{}], [{}], [{}]",
        (endDocIndexed - startDocIndexed) * 1000.0 / totalTime,
        new TimeValue(totalRecoveryTime / 3),
        TimeValue.timeValueMillis(recoveryTimes[0]),
        TimeValue.timeValueMillis(recoveryTimes[1]),
        TimeValue.timeValueMillis(recoveryTimes[2]));

    client1.close();
    node1.close();
    node2.close();
  }
}
コード例 #8
0
  public static void main(String[] args) {

    int NUMBER_OF_KEYS = (int) SizeValue.parseSizeValue("20").singles();
    int STRING_SIZE = 5;
    long PUT_OPERATIONS = SizeValue.parseSizeValue("5m").singles();
    long ITERATIONS = 10;
    boolean REUSE = true;

    String[] values = new String[NUMBER_OF_KEYS];
    for (int i = 0; i < values.length; i++) {
      values[i] = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), STRING_SIZE);
    }

    StopWatch stopWatch;

    stopWatch = new StopWatch().start();
    ObjectIntOpenHashMap<String> map = new ObjectIntOpenHashMap<>();
    for (long iter = 0; iter < ITERATIONS; iter++) {
      if (REUSE) {
        map.clear();
      } else {
        map = new ObjectIntOpenHashMap<>();
      }
      for (long i = 0; i < PUT_OPERATIONS; i++) {
        map.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
      }
    }
    map.clear();
    map = null;

    stopWatch.stop();
    System.out.println(
        "TObjectIntHashMap: "
            + stopWatch.totalTime()
            + ", "
            + stopWatch.totalTime().millisFrac() / ITERATIONS
            + "ms");

    stopWatch = new StopWatch().start();
    //        TObjectIntCustomHashMap<String> iMap = new TObjectIntCustomHashMap<String>(new
    // StringIdentityHashingStrategy());
    ObjectIntOpenHashMap<String> iMap = new ObjectIntOpenHashMap<>();
    for (long iter = 0; iter < ITERATIONS; iter++) {
      if (REUSE) {
        iMap.clear();
      } else {
        iMap = new ObjectIntOpenHashMap<>();
      }
      for (long i = 0; i < PUT_OPERATIONS; i++) {
        iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
      }
    }
    stopWatch.stop();
    System.out.println(
        "TObjectIntCustomHashMap(StringIdentity): "
            + stopWatch.totalTime()
            + ", "
            + stopWatch.totalTime().millisFrac() / ITERATIONS
            + "ms");
    iMap.clear();
    iMap = null;

    stopWatch = new StopWatch().start();
    iMap = new ObjectIntOpenHashMap<>();
    for (long iter = 0; iter < ITERATIONS; iter++) {
      if (REUSE) {
        iMap.clear();
      } else {
        iMap = new ObjectIntOpenHashMap<>();
      }
      for (long i = 0; i < PUT_OPERATIONS; i++) {
        iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
      }
    }
    stopWatch.stop();
    System.out.println(
        "TObjectIntCustomHashMap(PureIdentity): "
            + stopWatch.totalTime()
            + ", "
            + stopWatch.totalTime().millisFrac() / ITERATIONS
            + "ms");
    iMap.clear();
    iMap = null;

    // now test with THashMap
    stopWatch = new StopWatch().start();
    ObjectObjectOpenHashMap<String, StringEntry> tMap = new ObjectObjectOpenHashMap<>();
    for (long iter = 0; iter < ITERATIONS; iter++) {
      if (REUSE) {
        tMap.clear();
      } else {
        tMap = new ObjectObjectOpenHashMap<>();
      }
      for (long i = 0; i < PUT_OPERATIONS; i++) {
        String key = values[(int) (i % NUMBER_OF_KEYS)];
        StringEntry stringEntry = tMap.get(key);
        if (stringEntry == null) {
          stringEntry = new StringEntry(key, 1);
          tMap.put(key, stringEntry);
        } else {
          stringEntry.counter++;
        }
      }
    }

    tMap.clear();
    tMap = null;

    stopWatch.stop();
    System.out.println(
        "THashMap: "
            + stopWatch.totalTime()
            + ", "
            + stopWatch.totalTime().millisFrac() / ITERATIONS
            + "ms");

    stopWatch = new StopWatch().start();
    HashMap<String, StringEntry> hMap = new HashMap<>();
    for (long iter = 0; iter < ITERATIONS; iter++) {
      if (REUSE) {
        hMap.clear();
      } else {
        hMap = new HashMap<>();
      }
      for (long i = 0; i < PUT_OPERATIONS; i++) {
        String key = values[(int) (i % NUMBER_OF_KEYS)];
        StringEntry stringEntry = hMap.get(key);
        if (stringEntry == null) {
          stringEntry = new StringEntry(key, 1);
          hMap.put(key, stringEntry);
        } else {
          stringEntry.counter++;
        }
      }
    }

    hMap.clear();
    hMap = null;

    stopWatch.stop();
    System.out.println(
        "HashMap: "
            + stopWatch.totalTime()
            + ", "
            + stopWatch.totalTime().millisFrac() / ITERATIONS
            + "ms");

    stopWatch = new StopWatch().start();
    IdentityHashMap<String, StringEntry> ihMap = new IdentityHashMap<>();
    for (long iter = 0; iter < ITERATIONS; iter++) {
      if (REUSE) {
        ihMap.clear();
      } else {
        hMap = new HashMap<>();
      }
      for (long i = 0; i < PUT_OPERATIONS; i++) {
        String key = values[(int) (i % NUMBER_OF_KEYS)];
        StringEntry stringEntry = ihMap.get(key);
        if (stringEntry == null) {
          stringEntry = new StringEntry(key, 1);
          ihMap.put(key, stringEntry);
        } else {
          stringEntry.counter++;
        }
      }
    }
    stopWatch.stop();
    System.out.println(
        "IdentityHashMap: "
            + stopWatch.totalTime()
            + ", "
            + stopWatch.totalTime().millisFrac() / ITERATIONS
            + "ms");

    ihMap.clear();
    ihMap = null;

    int[] iValues = new int[NUMBER_OF_KEYS];
    for (int i = 0; i < values.length; i++) {
      iValues[i] = ThreadLocalRandom.current().nextInt();
    }

    stopWatch = new StopWatch().start();
    IntIntOpenHashMap intMap = new IntIntOpenHashMap();
    for (long iter = 0; iter < ITERATIONS; iter++) {
      if (REUSE) {
        intMap.clear();
      } else {
        intMap = new IntIntOpenHashMap();
      }
      for (long i = 0; i < PUT_OPERATIONS; i++) {
        int key = iValues[(int) (i % NUMBER_OF_KEYS)];
        intMap.addTo(key, 1);
      }
    }
    stopWatch.stop();
    System.out.println(
        "TIntIntHashMap: "
            + stopWatch.totalTime()
            + ", "
            + stopWatch.totalTime().millisFrac() / ITERATIONS
            + "ms");

    intMap.clear();
    intMap = null;

    // now test with THashMap
    stopWatch = new StopWatch().start();
    IntObjectOpenHashMap<IntEntry> tIntMap = new IntObjectOpenHashMap<>();
    for (long iter = 0; iter < ITERATIONS; iter++) {
      if (REUSE) {
        tIntMap.clear();
      } else {
        tIntMap = new IntObjectOpenHashMap<>();
      }
      for (long i = 0; i < PUT_OPERATIONS; i++) {
        int key = iValues[(int) (i % NUMBER_OF_KEYS)];
        IntEntry intEntry = tIntMap.get(key);
        if (intEntry == null) {
          intEntry = new IntEntry(key, 1);
          tIntMap.put(key, intEntry);
        } else {
          intEntry.counter++;
        }
      }
    }

    tIntMap.clear();
    tIntMap = null;

    stopWatch.stop();
    System.out.println(
        "TIntObjectHashMap: "
            + stopWatch.totalTime()
            + ", "
            + stopWatch.totalTime().millisFrac() / ITERATIONS
            + "ms");
  }
コード例 #9
0
ファイル: ThreadPool.java プロジェクト: mehiel/elasticsearch
  private ExecutorHolder rebuild(
      String name,
      ExecutorHolder previousExecutorHolder,
      @Nullable Settings settings,
      Settings defaultSettings) {
    if (Names.SAME.equals(name)) {
      // Don't allow to change the "same" thread executor
      return previousExecutorHolder;
    }
    if (settings == null) {
      settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
    }
    Info previousInfo = previousExecutorHolder != null ? previousExecutorHolder.info : null;
    String type =
        settings.get(
            "type", previousInfo != null ? previousInfo.getType() : defaultSettings.get("type"));
    ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name);
    if ("same".equals(type)) {
      if (previousExecutorHolder != null) {
        logger.debug("updating thread_pool [{}], type [{}]", name, type);
      } else {
        logger.debug("creating thread_pool [{}], type [{}]", name, type);
      }
      return new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(name, type));
    } else if ("cached".equals(type)) {
      TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
      if (previousExecutorHolder != null) {
        if ("cached".equals(previousInfo.getType())) {
          TimeValue updatedKeepAlive =
              settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
          if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
            logger.debug(
                "updating thread_pool [{}], type [{}], keep_alive [{}]",
                name,
                type,
                updatedKeepAlive);
            ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                .setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
            return new ExecutorHolder(
                previousExecutorHolder.executor,
                new Info(name, type, -1, -1, updatedKeepAlive, null));
          }
          return previousExecutorHolder;
        }
        if (previousInfo.getKeepAlive() != null) {
          defaultKeepAlive = previousInfo.getKeepAlive();
        }
      }
      TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
      if (previousExecutorHolder != null) {
        logger.debug(
            "updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
      } else {
        logger.debug(
            "creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
      }
      Executor executor =
          EsExecutors.newCached(keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, -1, -1, keepAlive, null));
    } else if ("fixed".equals(type)) {
      int defaultSize =
          defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
      SizeValue defaultQueueSize =
          defaultSettings.getAsSize("queue", defaultSettings.getAsSize("queue_size", null));

      if (previousExecutorHolder != null) {
        if ("fixed".equals(previousInfo.getType())) {
          SizeValue updatedQueueSize =
              settings.getAsSize(
                  "capacity",
                  settings.getAsSize(
                      "queue", settings.getAsSize("queue_size", previousInfo.getQueueSize())));
          if (Objects.equal(previousInfo.getQueueSize(), updatedQueueSize)) {
            int updatedSize = settings.getAsInt("size", previousInfo.getMax());
            if (previousInfo.getMax() != updatedSize) {
              logger.debug(
                  "updating thread_pool [{}], type [{}], size [{}], queue_size [{}]",
                  name,
                  type,
                  updatedSize,
                  updatedQueueSize);
              ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedSize);
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setMaximumPoolSize(updatedSize);
              return new ExecutorHolder(
                  previousExecutorHolder.executor,
                  new Info(name, type, updatedSize, updatedSize, null, updatedQueueSize));
            }
            return previousExecutorHolder;
          }
        }
        if (previousInfo.getMax() >= 0) {
          defaultSize = previousInfo.getMax();
        }
        defaultQueueSize = previousInfo.getQueueSize();
      }

      int size = settings.getAsInt("size", defaultSize);
      SizeValue queueSize =
          settings.getAsSize(
              "capacity",
              settings.getAsSize("queue", settings.getAsSize("queue_size", defaultQueueSize)));
      logger.debug(
          "creating thread_pool [{}], type [{}], size [{}], queue_size [{}]",
          name,
          type,
          size,
          queueSize);
      Executor executor =
          EsExecutors.newFixed(
              size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize));
    } else if ("scaling".equals(type)) {
      TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
      int defaultMin = defaultSettings.getAsInt("min", 1);
      int defaultSize =
          defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
      if (previousExecutorHolder != null) {
        if ("scaling".equals(previousInfo.getType())) {
          TimeValue updatedKeepAlive =
              settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
          int updatedMin = settings.getAsInt("min", previousInfo.getMin());
          int updatedSize =
              settings.getAsInt("max", settings.getAsInt("size", previousInfo.getMax()));
          if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)
              || previousInfo.getMin() != updatedMin
              || previousInfo.getMax() != updatedSize) {
            logger.debug(
                "updating thread_pool [{}], type [{}], keep_alive [{}]",
                name,
                type,
                updatedKeepAlive);
            if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
            }
            if (previousInfo.getMin() != updatedMin) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedMin);
            }
            if (previousInfo.getMax() != updatedSize) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setMaximumPoolSize(updatedSize);
            }
            return new ExecutorHolder(
                previousExecutorHolder.executor,
                new Info(name, type, updatedMin, updatedSize, updatedKeepAlive, null));
          }
          return previousExecutorHolder;
        }
        if (previousInfo.getKeepAlive() != null) {
          defaultKeepAlive = previousInfo.getKeepAlive();
        }
        if (previousInfo.getMin() >= 0) {
          defaultMin = previousInfo.getMin();
        }
        if (previousInfo.getMax() >= 0) {
          defaultSize = previousInfo.getMax();
        }
      }
      TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
      int min = settings.getAsInt("min", defaultMin);
      int size = settings.getAsInt("max", settings.getAsInt("size", defaultSize));
      if (previousExecutorHolder != null) {
        logger.debug(
            "updating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]",
            name,
            type,
            min,
            size,
            keepAlive);
      } else {
        logger.debug(
            "creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]",
            name,
            type,
            min,
            size,
            keepAlive);
      }
      Executor executor =
          EsExecutors.newScaling(
              min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null));
    }
    throw new ElasticSearchIllegalArgumentException(
        "No type found [" + type + "], for [" + name + "]");
  }
  public static void main(String[] args) throws Exception {

    Settings settings = settingsBuilder().put("gateway.type", "none").build();

    Node node1 = nodeBuilder().settings(settings).node();
    Node node2 = nodeBuilder().settings(settings).node();
    final Node client = nodeBuilder().settings(settings).client(true).node();

    final int NUMBER_OF_DOCS = 10000;
    final int NUMBER_OF_THREADS = 10;
    final long NUMBER_OF_ITERATIONS = SizeValue.parseSizeValue("10k").singles();
    final long DELETE_EVERY = 10;

    final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
    Thread[] threads = new Thread[NUMBER_OF_THREADS];
    for (int i = 0; i < threads.length; i++) {
      threads[i] =
          new Thread() {
            @Override
            public void run() {
              try {
                for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
                  if ((i % DELETE_EVERY) == 0) {
                    client
                        .client()
                        .prepareDelete(
                            "test",
                            "type1",
                            Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)))
                        .execute()
                        .actionGet();
                  } else {
                    client
                        .client()
                        .prepareIndex(
                            "test",
                            "type1",
                            Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)))
                        .setSource("field1", "value1")
                        .execute()
                        .actionGet();
                  }
                }
              } finally {
                latch.countDown();
              }
            }
          };
    }

    for (Thread thread : threads) {
      thread.start();
    }

    latch.await();
    System.out.println("done indexing, verifying docs");
    client.client().admin().indices().prepareRefresh().execute().actionGet();
    for (int i = 0; i < NUMBER_OF_DOCS; i++) {
      String id = Integer.toString(i);
      for (int j = 0; j < 5; j++) {
        SearchResponse response =
            client
                .client()
                .prepareSearch()
                .setQuery(QueryBuilders.termQuery("_id", id))
                .execute()
                .actionGet();
        if (response.getHits().totalHits() > 1) {
          System.err.println("[" + i + "] FAIL, HITS [" + response.getHits().totalHits() + "]");
        }
      }
      GetResponse getResponse =
          client.client().prepareGet("test", "type1", id).execute().actionGet();
      if (getResponse.exists()) {
        long version = getResponse.version();
        for (int j = 0; j < 5; j++) {
          getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
          if (!getResponse.exists()) {
            System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
            break;
          }
          if (version != getResponse.version()) {
            System.err.println(
                "["
                    + i
                    + "] FAIL, DIFFERENT VERSIONS: ["
                    + version
                    + "], ["
                    + getResponse.version()
                    + "]");
            break;
          }
        }
      } else {
        for (int j = 0; j < 5; j++) {
          getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
          if (getResponse.exists()) {
            System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
            break;
          }
        }
      }
    }
    System.out.println("done.");

    client.close();
    node1.close();
    node2.close();
  }
public class TermsAggregationSearchAndIndexingBenchmark {

  static String indexName = "test";
  static String typeName = "type1";
  static Random random = new Random();

  static long COUNT = SizeValue.parseSizeValue("2m").singles();
  static int BATCH = 1000;
  static int NUMBER_OF_TERMS = (int) SizeValue.parseSizeValue("100k").singles();
  static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
  static int STRING_TERM_SIZE = 5;

  static InternalNode[] nodes;

  public static void main(String[] args) throws Exception {
    Natives.tryMlockall();
    Settings settings =
        settingsBuilder()
            .put("refresh_interval", "-1")
            .put(SETTING_NUMBER_OF_SHARDS, 1)
            .put(SETTING_NUMBER_OF_REPLICAS, 0)
            .build();

    String clusterName = TermsAggregationSearchAndIndexingBenchmark.class.getSimpleName();
    nodes = new InternalNode[1];
    for (int i = 0; i < nodes.length; i++) {
      nodes[i] =
          (InternalNode)
              nodeBuilder()
                  .settings(settingsBuilder().put(settings).put("name", "node1"))
                  .clusterName(clusterName)
                  .node();
    }
    Client client = nodes[0].client();

    client
        .admin()
        .cluster()
        .prepareHealth(indexName)
        .setWaitForGreenStatus()
        .setTimeout("10s")
        .execute()
        .actionGet();
    try {
      client
          .admin()
          .indices()
          .prepareCreate(indexName)
          .addMapping(typeName, generateMapping("eager", "lazy"))
          .get();
      Thread.sleep(5000);

      long startTime = System.currentTimeMillis();
      ObjectOpenHashSet<String> uniqueTerms = ObjectOpenHashSet.newInstance();
      for (int i = 0; i < NUMBER_OF_TERMS; i++) {
        boolean added;
        do {
          added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
        } while (!added);
      }
      String[] sValues = uniqueTerms.toArray(String.class);
      long ITERS = COUNT / BATCH;
      long i = 1;
      int counter = 0;
      for (; i <= ITERS; i++) {
        BulkRequestBuilder request = client.prepareBulk();
        for (int j = 0; j < BATCH; j++) {
          counter++;

          XContentBuilder builder = jsonBuilder().startObject();
          builder.field("id", Integer.toString(counter));
          final String sValue = sValues[counter % sValues.length];
          builder.field("s_value", sValue);
          builder.field("s_value_dv", sValue);

          for (String field : new String[] {"sm_value", "sm_value_dv"}) {
            builder.startArray(field);
            for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
              builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
            }
            builder.endArray();
          }

          request.add(
              Requests.indexRequest(indexName)
                  .type("type1")
                  .id(Integer.toString(counter))
                  .source(builder));
        }
        BulkResponse response = request.execute().actionGet();
        if (response.hasFailures()) {
          System.err.println("--> failures...");
        }
        if (((i * BATCH) % 10000) == 0) {
          System.out.println("--> Indexed " + (i * BATCH));
        }
      }

      System.out.println(
          "--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
    } catch (IndexAlreadyExistsException e) {
      System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
      ClusterHealthResponse clusterHealthResponse =
          client
              .admin()
              .cluster()
              .prepareHealth(indexName)
              .setWaitForGreenStatus()
              .setTimeout("10m")
              .execute()
              .actionGet();
      if (clusterHealthResponse.isTimedOut()) {
        System.err.println("--> Timed out waiting for cluster health");
      }
    }
    client
        .admin()
        .indices()
        .preparePutMapping(indexName)
        .setType(typeName)
        .setSource(generateMapping("lazy", "lazy"))
        .get();
    client.admin().indices().prepareRefresh().execute().actionGet();
    System.out.println(
        "--> Number of docs in index: "
            + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());

    String[] nodeIds = new String[nodes.length];
    for (int i = 0; i < nodeIds.length; i++) {
      nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
    }

    List<TestRun> testRuns = new ArrayList<>();
    testRuns.add(new TestRun("Regular field ordinals", "eager", "lazy", "s_value", "ordinals"));
    testRuns.add(
        new TestRun("Docvalues field ordinals", "lazy", "eager", "s_value_dv", "ordinals"));
    testRuns.add(
        new TestRun(
            "Regular field global ordinals", "eager_global_ordinals", "lazy", "s_value", null));
    testRuns.add(
        new TestRun("Docvalues field global", "lazy", "eager_global_ordinals", "s_value_dv", null));

    List<TestResult> testResults = new ArrayList<>();
    for (TestRun testRun : testRuns) {
      client
          .admin()
          .indices()
          .preparePutMapping(indexName)
          .setType(typeName)
          .setSource(
              generateMapping(testRun.indexedFieldEagerLoading, testRun.docValuesEagerLoading))
          .get();
      client.admin().indices().prepareClearCache(indexName).setFieldDataCache(true).get();
      SearchThread searchThread =
          new SearchThread(client, testRun.termsAggsField, testRun.termsAggsExecutionHint);
      RefreshThread refreshThread = new RefreshThread(client);
      System.out.println("--> Running '" + testRun.name + "' round...");
      new Thread(refreshThread).start();
      new Thread(searchThread).start();
      Thread.sleep(2 * 60 * 1000);
      refreshThread.stop();
      searchThread.stop();

      System.out.println("--> Avg refresh time: " + refreshThread.avgRefreshTime + " ms");
      System.out.println("--> Avg query time: " + searchThread.avgQueryTime + " ms");

      ClusterStatsResponse clusterStateResponse =
          client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
      System.out.println(
          "--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
      ByteSizeValue fieldDataMemoryUsed =
          clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
      System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
      testResults.add(
          new TestResult(
              testRun.name,
              refreshThread.avgRefreshTime,
              searchThread.avgQueryTime,
              fieldDataMemoryUsed));
    }

    System.out.println(
        "----------------------------------------- SUMMARY ----------------------------------------------");
    System.out.format(
        Locale.ENGLISH,
        "%30s%18s%15s%15s\n",
        "name",
        "avg refresh time",
        "avg query time",
        "fieldata size");
    for (TestResult testResult : testResults) {
      System.out.format(
          Locale.ENGLISH,
          "%30s%18s%15s%15s\n",
          testResult.name,
          testResult.avgRefreshTime,
          testResult.avgQueryTime,
          testResult.fieldDataSizeInMemory);
    }
    System.out.println(
        "----------------------------------------- SUMMARY ----------------------------------------------");

    client.close();
    for (InternalNode node : nodes) {
      node.close();
    }
  }

  static class RefreshThread implements Runnable {

    private final Client client;
    private volatile boolean run = true;
    private volatile boolean stopped = false;
    private volatile long avgRefreshTime = 0;

    RefreshThread(Client client) throws IOException {
      this.client = client;
    }

    @Override
    public void run() {
      long totalRefreshTime = 0;
      int numExecutedRefreshed = 0;
      while (run) {
        long docIdLimit = COUNT;
        for (long docId = 1; run && docId < docIdLimit; ) {
          try {
            for (int j = 0; j < 8; j++) {
              GetResponse getResponse =
                  client.prepareGet(indexName, "type1", String.valueOf(++docId)).get();
              client
                  .prepareIndex(indexName, "type1", getResponse.getId())
                  .setSource(getResponse.getSource())
                  .get();
            }
            long startTime = System.currentTimeMillis();
            client.admin().indices().prepareRefresh(indexName).execute().actionGet();
            totalRefreshTime += System.currentTimeMillis() - startTime;
            numExecutedRefreshed++;
            Thread.sleep(500);
          } catch (Throwable e) {
            e.printStackTrace();
          }
        }
      }
      avgRefreshTime = totalRefreshTime / numExecutedRefreshed;
      stopped = true;
    }

    public void stop() throws InterruptedException {
      run = false;
      while (!stopped) {
        Thread.sleep(100);
      }
    }
  }

  private static class TestRun {

    final String name;
    final String indexedFieldEagerLoading;
    final String docValuesEagerLoading;
    final String termsAggsField;
    final String termsAggsExecutionHint;

    private TestRun(
        String name,
        String indexedFieldEagerLoading,
        String docValuesEagerLoading,
        String termsAggsField,
        String termsAggsExecutionHint) {
      this.name = name;
      this.indexedFieldEagerLoading = indexedFieldEagerLoading;
      this.docValuesEagerLoading = docValuesEagerLoading;
      this.termsAggsField = termsAggsField;
      this.termsAggsExecutionHint = termsAggsExecutionHint;
    }
  }

  private static class TestResult {

    final String name;
    final TimeValue avgRefreshTime;
    final TimeValue avgQueryTime;
    final ByteSizeValue fieldDataSizeInMemory;

    private TestResult(
        String name, long avgRefreshTime, long avgQueryTime, ByteSizeValue fieldDataSizeInMemory) {
      this.name = name;
      this.avgRefreshTime = TimeValue.timeValueMillis(avgRefreshTime);
      this.avgQueryTime = TimeValue.timeValueMillis(avgQueryTime);
      this.fieldDataSizeInMemory = fieldDataSizeInMemory;
    }
  }

  static class SearchThread implements Runnable {

    private final Client client;
    private final String field;
    private final String executionHint;
    private volatile boolean run = true;
    private volatile boolean stopped = false;
    private volatile long avgQueryTime = 0;

    SearchThread(Client client, String field, String executionHint) {
      this.client = client;
      this.field = field;
      this.executionHint = executionHint;
    }

    @Override
    public void run() {
      long totalQueryTime = 0;
      int numExecutedQueries = 0;
      while (run) {
        try {
          SearchResponse searchResponse =
              Method.AGGREGATION
                  .addTermsAgg(
                      client
                          .prepareSearch()
                          .setSearchType(SearchType.COUNT)
                          .setQuery(matchAllQuery()),
                      "test",
                      field,
                      executionHint)
                  .execute()
                  .actionGet();
          if (searchResponse.getHits().totalHits() != COUNT) {
            System.err.println("--> mismatch on hits");
          }
          totalQueryTime += searchResponse.getTookInMillis();
          numExecutedQueries++;
        } catch (Throwable e) {
          e.printStackTrace();
        }
      }
      avgQueryTime = totalQueryTime / numExecutedQueries;
      stopped = true;
    }

    public void stop() throws InterruptedException {
      run = false;
      while (!stopped) {
        Thread.sleep(100);
      }
    }
  }

  private static XContentBuilder generateMapping(String loading1, String loading2)
      throws IOException {
    return jsonBuilder()
        .startObject()
        .startObject("type1")
        .startObject("properties")
        .startObject("s_value")
        .field("type", "string")
        .field("index", "not_analyzed")
        .startObject("fielddata")
        .field("loading", loading1)
        .endObject()
        .endObject()
        .startObject("s_value_dv")
        .field("type", "string")
        .field("index", "no")
        .startObject("fielddata")
        .field("loading", loading2)
        .field("format", "doc_values")
        .endObject()
        .endObject()
        .endObject()
        .endObject()
        .endObject();
  }
}