@Test
  public void delete_active_rules_from_profile() throws Exception {
    esSetup
        .client()
        .prepareBulk()
        // On profile 1
        .add(
            Requests.indexRequest()
                .index("rules")
                .type("active_rule")
                .parent("1")
                .source(testFileAsString("delete_from_profile/active_rule25.json")))
        .add(
            Requests.indexRequest()
                .index("rules")
                .type("active_rule")
                .parent("3")
                .source(testFileAsString("delete_from_profile/active_rule2702.json")))
        // On profile 2
        .add(
            Requests.indexRequest()
                .index("rules")
                .type("active_rule")
                .parent("2")
                .source(testFileAsString("delete_from_profile/active_rule523.json")))
        .setRefresh(true)
        .execute()
        .actionGet();

    esActiveRule.deleteActiveRulesFromProfile(1);
    assertThat(!esSetup.exists("rules", "active_rule", "25"));
    assertThat(!esSetup.exists("rules", "active_rule", "2702"));
    assertThat(esSetup.exists("rules", "active_rule", "523"));
  }
 @Override
 public void handleRequest(final InteractiveRequest request, final InteractiveChannel channel) {
   String index = request.paramAsString("index");
   String type = request.paramAsString("type");
   String id = request.paramAsString("id");
   try {
     if (index == null) {
       channel.sendResponse(TYPE, new IllegalArgumentException("index is null"));
       return;
     }
     if (type == null) {
       channel.sendResponse(TYPE, new IllegalArgumentException("type is null"));
       return;
     }
     if (id == null) {
       channel.sendResponse(TYPE, new IllegalArgumentException("id is null"));
       return;
     }
     IndexRequest indexRequest =
         Requests.indexRequest(index)
             .type(type)
             .id(id)
             .source((Map<String, Object>) request.asMap().get("data"));
     add(indexRequest, channel);
   } catch (IOException ex) {
     try {
       channel.sendResponse(TYPE, ex);
     } catch (IOException ex1) {
       logger.error("error while sending exception");
     }
   }
 }
    @Override
    public void run() {
      while (!closed) {
        try {
          for (int i = 0; i < simpleNumber; i++) {
            XContentBuilder builder = XContentFactory.jsonBuilder();
            builder.startObject();

            builder.field(fieldName, i);
            builder.endObject();
            currentRequest.add(
                Requests.indexRequest(indexName)
                    .type(typeName)
                    .id(UUID.randomUUID().toString())
                    .create(true)
                    .source(builder));
            processBulkIfNeeded();
          }
          if (currentRequest.numberOfActions() > 0) {
            currentRequest.execute().get();
            currentRequest = client.prepareBulk();
          }
          delay();
        } catch (Exception e) {
          logger.error(e.getMessage(), e, (Object) null);
          closed = true;
        }
        if (closed) {
          return;
        }
      }
    }
 @Override
 public void run() {
   try {
     barrier1.await();
     barrier2.await();
     for (; counter < max; counter++) {
       Client client = client(counter);
       long id = idGenerator.incrementAndGet();
       client
           .index(
               Requests.indexRequest()
                   .index("test")
                   .type("type1")
                   .id(Long.toString(id))
                   .source(
                       XContentFactory.jsonBuilder()
                           .startObject()
                           .field("num", id % fieldNumLimit)
                           .endObject()))
           .actionGet();
     }
     System.out.println("Indexer [" + id + "]: Done");
   } catch (Exception e) {
     System.err.println("Failed to index:");
     e.printStackTrace();
   } finally {
     latch.countDown();
   }
 }
    @Override
    public void run() {
      while (!closed) {
        try {
          for (int i = 0; i < simpleNumber; i++) {
            XContentBuilder builder = XContentFactory.jsonBuilder();
            builder.startObject();

            builder.field(fieldName, i);
            builder.endObject();
            bulkProcessor.add(
                Requests.indexRequest(indexName)
                    .type(typeName)
                    .id(UUID.randomUUID().toString())
                    .create(true)
                    .source(builder));
          }
          // in this case we force the bulking, but it should seldom done
          bulkProcessor.flush();
          delay();
        } catch (Exception e) {
          logger.error(e.getMessage(), e, (Object) null);
          closed = true;
        }
        if (closed) {
          return;
        }
      }
    }
  private IndexRequest prepareInsert(
      DocTableInfo tableInfo, ShardUpsertRequest request, ShardUpsertRequest.Item item)
      throws IOException {
    List<GeneratedReferenceInfo> generatedReferencesWithValue = new ArrayList<>();
    BytesReference source;
    if (request.isRawSourceInsert()) {
      assert item.insertValues().length > 0 : "empty insert values array";
      source = new BytesArray((BytesRef) item.insertValues()[0]);
    } else {
      XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
      for (int i = 0; i < item.insertValues().length; i++) {
        Reference ref = request.insertColumns()[i];
        if (ref.info().granularity() == RowGranularity.DOC) {
          // don't include values for partitions in the _source
          // ideally columns with partition granularity shouldn't be part of the request
          builder.field(ref.ident().columnIdent().fqn(), item.insertValues()[i]);
          if (ref.info() instanceof GeneratedReferenceInfo) {
            generatedReferencesWithValue.add((GeneratedReferenceInfo) ref.info());
          }
        }
      }
      source = builder.bytes();
    }

    int generatedColumnSize = 0;
    for (GeneratedReferenceInfo generatedReferenceInfo : tableInfo.generatedColumns()) {
      if (!tableInfo.partitionedByColumns().contains(generatedReferenceInfo)) {
        generatedColumnSize++;
      }
    }

    int numMissingGeneratedColumns = generatedColumnSize - generatedReferencesWithValue.size();
    if (numMissingGeneratedColumns > 0
        || (generatedReferencesWithValue.size() > 0 && request.validateGeneratedColumns())) {
      // we need to evaluate some generated column expressions
      Map<String, Object> sourceMap =
          processGeneratedColumnsOnInsert(
              tableInfo,
              request.insertColumns(),
              item.insertValues(),
              request.isRawSourceInsert(),
              request.validateGeneratedColumns());
      source = XContentFactory.jsonBuilder().map(sourceMap).bytes();
    }

    IndexRequest indexRequest =
        Requests.indexRequest(request.index())
            .type(request.type())
            .id(item.id())
            .routing(request.routing())
            .source(source)
            .create(!request.overwriteDuplicates())
            .operationThreaded(false);
    if (logger.isTraceEnabled()) {
      logger.trace(
          "Inserting document with id {}, source: {}", item.id(), indexRequest.source().toUtf8());
    }
    return indexRequest;
  }
 @Test
 public void should_delete_from_integer_ids() throws Exception {
   esSetup
       .client()
       .prepareBulk()
       .add(
           Requests.indexRequest()
               .index("rules")
               .type("active_rule")
               .parent("1")
               .source(testFileAsString("delete_from_profile/active_rule25.json")))
       .add(
           Requests.indexRequest()
               .index("rules")
               .type("active_rule")
               .parent("3")
               .source(testFileAsString("delete_from_profile/active_rule2702.json")))
       .setRefresh(true)
       .execute()
       .actionGet();
   esActiveRule.deleteActiveRules(ImmutableList.of(25, 2702));
 }
  @Test
  public void testCopyHeadersRequest() {
    Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
    Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
    Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
    Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
    usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
    Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
    Map<String, String> transportContext =
        Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();

    Map<String, String> expectedHeaders = new HashMap<>();
    expectedHeaders.putAll(transportHeaders);
    expectedHeaders.putAll(copiedHeaders);

    Map<String, String> expectedContext = new HashMap<>();
    expectedContext.putAll(transportContext);
    expectedContext.putAll(restContext);

    Client client =
        client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders);

    SearchRequest searchRequest = Requests.searchRequest();
    putHeaders(searchRequest, transportHeaders);
    putContext(searchRequest, transportContext);
    assertHeaders(searchRequest, transportHeaders);
    client.search(searchRequest);
    assertHeaders(searchRequest, expectedHeaders);
    assertContext(searchRequest, expectedContext);

    GetRequest getRequest = Requests.getRequest("index");
    putHeaders(getRequest, transportHeaders);
    putContext(getRequest, transportContext);
    assertHeaders(getRequest, transportHeaders);
    client.get(getRequest);
    assertHeaders(getRequest, expectedHeaders);
    assertContext(getRequest, expectedContext);

    IndexRequest indexRequest = Requests.indexRequest();
    putHeaders(indexRequest, transportHeaders);
    putContext(indexRequest, transportContext);
    assertHeaders(indexRequest, transportHeaders);
    client.index(indexRequest);
    assertHeaders(indexRequest, expectedHeaders);
    assertContext(indexRequest, expectedContext);
  }
  /**
   * Updates the specified objects
   *
   * @return the id's of the failed objects (e.g. due to versioning)
   */
  public Collection<Integer> bulkUpdate(
      Collection<T> objects, String indexName, boolean refresh, boolean enableVersioning) {
    // now using bulk API instead of feeding each doc separate with feedDoc
    BulkRequestBuilder brb = client.prepareBulk();
    // this works differently then the direct call to refresh!? maybe refresh is not async?
    //        brb.setRefresh(refresh);
    for (T o : objects) {
      if (o.getId() == null) {
        logger.warn("Skipped object without id when bulkUpdate:" + o);
        continue;
      }

      try {
        XContentBuilder source = createDoc(o);
        IndexRequest indexReq =
            Requests.indexRequest(indexName).type(getIndexType()).id(o.getId()).source(source);

        if (enableVersioning) indexReq.version(o.getVersion());

        brb.add(indexReq);
      } catch (IOException ex) {
        logger.warn("Cannot add object:" + o + " to bulkIndexing action." + ex.getMessage());
      }
    }
    if (brb.numberOfActions() > 0) {
      BulkResponse rsp = brb.execute().actionGet();
      if (rsp.hasFailures()) {
        List<Integer> list = new ArrayList<Integer>(rsp.items().length);
        for (BulkItemResponse br : rsp.items()) {
          if (br.isFailed()) {
            //                        logger.info("Error:" + br.failureMessage());
            list.add(br.itemId());
          }
        }
        return list;
      }
      if (refresh) refresh(indexName);
    }

    return Collections.emptyList();
  }
  @Test
  @Slow
  public void testSnapshotOperations() throws Exception {
    startNode("server1", getClassDefaultSettings());

    // get the environment, so we can clear the work dir when needed
    Environment environment =
        ((InternalNode) node("server1")).injector().getInstance(Environment.class);

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    // Translog tests

    logger.info("Creating index [{}]", "test");
    client("server1").admin().indices().prepareCreate("test").execute().actionGet();

    // create a mapping
    PutMappingResponse putMappingResponse =
        client("server1")
            .admin()
            .indices()
            .preparePutMapping("test")
            .setType("type1")
            .setSource(mappingSource())
            .execute()
            .actionGet();
    assertThat(putMappingResponse.isAcknowledged(), equalTo(true));

    // verify that mapping is there
    ClusterStateResponse clusterState =
        client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    // create two and delete the first
    logger.info("Indexing #1");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")))
        .actionGet();
    logger.info("Indexing #2");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test")))
        .actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Deleting #1");
    client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    // do it again, it should be a no op
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (only translog should be populated)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    // verify that mapping is there
    clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    logger.info("Getting #1, should not exists");
    GetResponse getResponse =
        client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));

    // Now flush and add some data (so we have index recovery as well)
    logger.info(
        "Flushing, so we have actual content in the index files (#2 should be in the index)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();
    logger.info("Indexing #3, so we have something in the translog as well");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test")))
        .actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) and reuse work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Closing the server");
    closeNode("server1");
    logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
    FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info(
        "Flushing, so we have actual content in the index files (#3 should be in the index now as well)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (just from the index, nothing in the translog)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Deleting the index");
    client("server1").admin().indices().delete(deleteIndexRequest("test")).actionGet();
  }
  public static void main(String[] args) throws Exception {
    Natives.tryMlockall();
    Settings settings =
        settingsBuilder()
            .put("refresh_interval", "-1")
            .put(SETTING_NUMBER_OF_SHARDS, 1)
            .put(SETTING_NUMBER_OF_REPLICAS, 0)
            .build();

    String clusterName = TermsAggregationSearchAndIndexingBenchmark.class.getSimpleName();
    nodes = new InternalNode[1];
    for (int i = 0; i < nodes.length; i++) {
      nodes[i] =
          (InternalNode)
              nodeBuilder()
                  .settings(settingsBuilder().put(settings).put("name", "node1"))
                  .clusterName(clusterName)
                  .node();
    }
    Client client = nodes[0].client();

    client
        .admin()
        .cluster()
        .prepareHealth(indexName)
        .setWaitForGreenStatus()
        .setTimeout("10s")
        .execute()
        .actionGet();
    try {
      client
          .admin()
          .indices()
          .prepareCreate(indexName)
          .addMapping(typeName, generateMapping("eager", "lazy"))
          .get();
      Thread.sleep(5000);

      long startTime = System.currentTimeMillis();
      ObjectOpenHashSet<String> uniqueTerms = ObjectOpenHashSet.newInstance();
      for (int i = 0; i < NUMBER_OF_TERMS; i++) {
        boolean added;
        do {
          added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
        } while (!added);
      }
      String[] sValues = uniqueTerms.toArray(String.class);
      long ITERS = COUNT / BATCH;
      long i = 1;
      int counter = 0;
      for (; i <= ITERS; i++) {
        BulkRequestBuilder request = client.prepareBulk();
        for (int j = 0; j < BATCH; j++) {
          counter++;

          XContentBuilder builder = jsonBuilder().startObject();
          builder.field("id", Integer.toString(counter));
          final String sValue = sValues[counter % sValues.length];
          builder.field("s_value", sValue);
          builder.field("s_value_dv", sValue);

          for (String field : new String[] {"sm_value", "sm_value_dv"}) {
            builder.startArray(field);
            for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
              builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
            }
            builder.endArray();
          }

          request.add(
              Requests.indexRequest(indexName)
                  .type("type1")
                  .id(Integer.toString(counter))
                  .source(builder));
        }
        BulkResponse response = request.execute().actionGet();
        if (response.hasFailures()) {
          System.err.println("--> failures...");
        }
        if (((i * BATCH) % 10000) == 0) {
          System.out.println("--> Indexed " + (i * BATCH));
        }
      }

      System.out.println(
          "--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
    } catch (IndexAlreadyExistsException e) {
      System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
      ClusterHealthResponse clusterHealthResponse =
          client
              .admin()
              .cluster()
              .prepareHealth(indexName)
              .setWaitForGreenStatus()
              .setTimeout("10m")
              .execute()
              .actionGet();
      if (clusterHealthResponse.isTimedOut()) {
        System.err.println("--> Timed out waiting for cluster health");
      }
    }
    client
        .admin()
        .indices()
        .preparePutMapping(indexName)
        .setType(typeName)
        .setSource(generateMapping("lazy", "lazy"))
        .get();
    client.admin().indices().prepareRefresh().execute().actionGet();
    System.out.println(
        "--> Number of docs in index: "
            + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());

    String[] nodeIds = new String[nodes.length];
    for (int i = 0; i < nodeIds.length; i++) {
      nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
    }

    List<TestRun> testRuns = new ArrayList<>();
    testRuns.add(new TestRun("Regular field ordinals", "eager", "lazy", "s_value", "ordinals"));
    testRuns.add(
        new TestRun("Docvalues field ordinals", "lazy", "eager", "s_value_dv", "ordinals"));
    testRuns.add(
        new TestRun(
            "Regular field global ordinals", "eager_global_ordinals", "lazy", "s_value", null));
    testRuns.add(
        new TestRun("Docvalues field global", "lazy", "eager_global_ordinals", "s_value_dv", null));

    List<TestResult> testResults = new ArrayList<>();
    for (TestRun testRun : testRuns) {
      client
          .admin()
          .indices()
          .preparePutMapping(indexName)
          .setType(typeName)
          .setSource(
              generateMapping(testRun.indexedFieldEagerLoading, testRun.docValuesEagerLoading))
          .get();
      client.admin().indices().prepareClearCache(indexName).setFieldDataCache(true).get();
      SearchThread searchThread =
          new SearchThread(client, testRun.termsAggsField, testRun.termsAggsExecutionHint);
      RefreshThread refreshThread = new RefreshThread(client);
      System.out.println("--> Running '" + testRun.name + "' round...");
      new Thread(refreshThread).start();
      new Thread(searchThread).start();
      Thread.sleep(2 * 60 * 1000);
      refreshThread.stop();
      searchThread.stop();

      System.out.println("--> Avg refresh time: " + refreshThread.avgRefreshTime + " ms");
      System.out.println("--> Avg query time: " + searchThread.avgQueryTime + " ms");

      ClusterStatsResponse clusterStateResponse =
          client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
      System.out.println(
          "--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
      ByteSizeValue fieldDataMemoryUsed =
          clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
      System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
      testResults.add(
          new TestResult(
              testRun.name,
              refreshThread.avgRefreshTime,
              searchThread.avgQueryTime,
              fieldDataMemoryUsed));
    }

    System.out.println(
        "----------------------------------------- SUMMARY ----------------------------------------------");
    System.out.format(
        Locale.ENGLISH,
        "%30s%18s%15s%15s\n",
        "name",
        "avg refresh time",
        "avg query time",
        "fieldata size");
    for (TestResult testResult : testResults) {
      System.out.format(
          Locale.ENGLISH,
          "%30s%18s%15s%15s\n",
          testResult.name,
          testResult.avgRefreshTime,
          testResult.avgQueryTime,
          testResult.fieldDataSizeInMemory);
    }
    System.out.println(
        "----------------------------------------- SUMMARY ----------------------------------------------");

    client.close();
    for (InternalNode node : nodes) {
      node.close();
    }
  }
Beispiel #12
0
  /**
   * Prepares an update request by converting it into an index or delete request or an update
   * response (no action).
   */
  @SuppressWarnings("unchecked")
  protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult getResult) {
    long getDateNS = System.nanoTime();
    if (!getResult.isExists()) {
      if (request.upsertRequest() == null && !request.docAsUpsert()) {
        throw new DocumentMissingException(shardId, request.type(), request.id());
      }
      IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
      TimeValue ttl = indexRequest.ttl();
      if (request.scriptedUpsert() && request.script() != null) {
        // Run the script to perform the create logic
        IndexRequest upsert = request.upsertRequest();
        Map<String, Object> upsertDoc = upsert.sourceAsMap();
        Map<String, Object> ctx = new HashMap<>(2);
        // Tell the script that this is a create and not an update
        ctx.put("op", "create");
        ctx.put("_source", upsertDoc);
        ctx = executeScript(request.script, ctx);
        // Allow the script to set TTL using ctx._ttl
        if (ttl == null) {
          ttl = getTTLFromScriptContext(ctx);
        }

        // Allow the script to abort the create by setting "op" to "none"
        String scriptOpChoice = (String) ctx.get("op");

        // Only valid options for an upsert script are "create"
        // (the default) or "none", meaning abort upsert
        if (!"create".equals(scriptOpChoice)) {
          if (!"none".equals(scriptOpChoice)) {
            logger.warn(
                "Used upsert operation [{}] for script [{}], doing nothing...",
                scriptOpChoice,
                request.script.getScript());
          }
          UpdateResponse update =
              new UpdateResponse(
                  shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false);
          update.setGetResult(getResult);
          return new Result(update, Operation.NONE, upsertDoc, XContentType.JSON);
        }
        indexRequest.source((Map) ctx.get("_source"));
      }

      indexRequest
          .index(request.index())
          .type(request.type())
          .id(request.id())
          // it has to be a "create!"
          .create(true)
          .ttl(ttl)
          .refresh(request.refresh())
          .routing(request.routing())
          .parent(request.parent())
          .consistencyLevel(request.consistencyLevel());
      if (request.versionType() != VersionType.INTERNAL) {
        // in all but the internal versioning mode, we want to create the new document using the
        // given version.
        indexRequest.version(request.version()).versionType(request.versionType());
      }
      return new Result(indexRequest, Operation.UPSERT, null, null);
    }

    long updateVersion = getResult.getVersion();

    if (request.versionType() != VersionType.INTERNAL) {
      assert request.versionType() == VersionType.FORCE;
      updateVersion = request.version(); // remember, match_any is excluded by the conflict test
    }

    if (getResult.internalSourceRef() == null) {
      // no source, we can't do nothing, through a failure...
      throw new DocumentSourceMissingException(shardId, request.type(), request.id());
    }

    Tuple<XContentType, Map<String, Object>> sourceAndContent =
        XContentHelper.convertToMap(getResult.internalSourceRef(), true);
    String operation = null;
    String timestamp = null;
    TimeValue ttl = null;
    final Map<String, Object> updatedSourceAsMap;
    final XContentType updateSourceContentType = sourceAndContent.v1();
    String routing =
        getResult.getFields().containsKey(RoutingFieldMapper.NAME)
            ? getResult.field(RoutingFieldMapper.NAME).getValue().toString()
            : null;
    String parent =
        getResult.getFields().containsKey(ParentFieldMapper.NAME)
            ? getResult.field(ParentFieldMapper.NAME).getValue().toString()
            : null;

    if (request.script() == null && request.doc() != null) {
      IndexRequest indexRequest = request.doc();
      updatedSourceAsMap = sourceAndContent.v2();
      if (indexRequest.ttl() != null) {
        ttl = indexRequest.ttl();
      }
      timestamp = indexRequest.timestamp();
      if (indexRequest.routing() != null) {
        routing = indexRequest.routing();
      }
      if (indexRequest.parent() != null) {
        parent = indexRequest.parent();
      }
      boolean noop =
          !XContentHelper.update(
              updatedSourceAsMap, indexRequest.sourceAsMap(), request.detectNoop());
      // noop could still be true even if detectNoop isn't because update detects empty maps as
      // noops.  BUT we can only
      // actually turn the update into a noop if detectNoop is true to preserve backwards
      // compatibility and to handle
      // cases where users repopulating multi-fields or adding synonyms, etc.
      if (request.detectNoop() && noop) {
        operation = "none";
      }
    } else {
      Map<String, Object> ctx = new HashMap<>(16);
      Long originalTtl =
          getResult.getFields().containsKey(TTLFieldMapper.NAME)
              ? (Long) getResult.field(TTLFieldMapper.NAME).getValue()
              : null;
      Long originalTimestamp =
          getResult.getFields().containsKey(TimestampFieldMapper.NAME)
              ? (Long) getResult.field(TimestampFieldMapper.NAME).getValue()
              : null;
      ctx.put("_index", getResult.getIndex());
      ctx.put("_type", getResult.getType());
      ctx.put("_id", getResult.getId());
      ctx.put("_version", getResult.getVersion());
      ctx.put("_routing", routing);
      ctx.put("_parent", parent);
      ctx.put("_timestamp", originalTimestamp);
      ctx.put("_ttl", originalTtl);
      ctx.put("_source", sourceAndContent.v2());

      ctx = executeScript(request.script, ctx);

      operation = (String) ctx.get("op");

      Object fetchedTimestamp = ctx.get("_timestamp");
      if (fetchedTimestamp != null) {
        timestamp = fetchedTimestamp.toString();
      } else if (originalTimestamp != null) {
        // No timestamp has been given in the update script, so we keep the previous timestamp if
        // there is one
        timestamp = originalTimestamp.toString();
      }

      ttl = getTTLFromScriptContext(ctx);

      updatedSourceAsMap = (Map<String, Object>) ctx.get("_source");
    }

    // apply script to update the source
    // No TTL has been given in the update script so we keep previous TTL value if there is one
    if (ttl == null) {
      Long ttlAsLong =
          getResult.getFields().containsKey(TTLFieldMapper.NAME)
              ? (Long) getResult.field(TTLFieldMapper.NAME).getValue()
              : null;
      if (ttlAsLong != null) {
        ttl =
            new TimeValue(
                ttlAsLong
                    - TimeValue.nsecToMSec(
                        System.nanoTime()
                            - getDateNS)); // It is an approximation of exact TTL value, could be
                                           // improved
      }
    }

    if (operation == null || "index".equals(operation)) {
      final IndexRequest indexRequest =
          Requests.indexRequest(request.index())
              .type(request.type())
              .id(request.id())
              .routing(routing)
              .parent(parent)
              .source(updatedSourceAsMap, updateSourceContentType)
              .version(updateVersion)
              .versionType(request.versionType())
              .consistencyLevel(request.consistencyLevel())
              .timestamp(timestamp)
              .ttl(ttl)
              .refresh(request.refresh());
      return new Result(indexRequest, Operation.INDEX, updatedSourceAsMap, updateSourceContentType);
    } else if ("delete".equals(operation)) {
      DeleteRequest deleteRequest =
          Requests.deleteRequest(request.index())
              .type(request.type())
              .id(request.id())
              .routing(routing)
              .parent(parent)
              .version(updateVersion)
              .versionType(request.versionType())
              .consistencyLevel(request.consistencyLevel());
      return new Result(
          deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType);
    } else if ("none".equals(operation)) {
      UpdateResponse update =
          new UpdateResponse(
              shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false);
      update.setGetResult(
          extractGetResult(
              request,
              request.index(),
              getResult.getVersion(),
              updatedSourceAsMap,
              updateSourceContentType,
              getResult.internalSourceRef()));
      return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
    } else {
      logger.warn(
          "Used update operation [{}] for script [{}], doing nothing...",
          operation,
          request.script.getScript());
      UpdateResponse update =
          new UpdateResponse(
              shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false);
      return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
    }
  }
  protected void shardOperation(
      final UpdateRequest request,
      final ActionListener<UpdateResponse> listener,
      final int retryCount)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(request.shardId());

    long getDate = System.currentTimeMillis();
    final GetResult getResult =
        indexShard
            .getService()
            .get(
                request.type(),
                request.id(),
                new String[] {
                  SourceFieldMapper.NAME,
                  RoutingFieldMapper.NAME,
                  ParentFieldMapper.NAME,
                  TTLFieldMapper.NAME
                },
                true);

    // no doc, what to do, what to do...
    if (!getResult.isExists()) {
      if (request.upsertRequest() == null) {
        listener.onFailure(
            new DocumentMissingException(
                new ShardId(request.index(), request.shardId()), request.type(), request.id()));
        return;
      }
      final IndexRequest indexRequest = request.upsertRequest();
      indexRequest
          .index(request.index())
          .type(request.type())
          .id(request.id())
          // it has to be a "create!"
          .create(true)
          .routing(request.routing())
          .percolate(request.percolate())
          .refresh(request.refresh())
          .replicationType(request.replicationType())
          .consistencyLevel(request.consistencyLevel());
      indexRequest.operationThreaded(false);
      // we fetch it from the index request so we don't generate the bytes twice, its already done
      // in the index request
      final BytesReference updateSourceBytes = indexRequest.source();
      indexAction.execute(
          indexRequest,
          new ActionListener<IndexResponse>() {
            @Override
            public void onResponse(IndexResponse response) {
              UpdateResponse update =
                  new UpdateResponse(
                      response.getIndex(),
                      response.getType(),
                      response.getId(),
                      response.getVersion());
              update.setMatches(response.getMatches());
              if (request.fields() != null && request.fields().length > 0) {
                Tuple<XContentType, Map<String, Object>> sourceAndContent =
                    XContentHelper.convertToMap(updateSourceBytes, true);
                update.setGetResult(
                    extractGetResult(
                        request,
                        response.getVersion(),
                        sourceAndContent.v2(),
                        sourceAndContent.v1(),
                        updateSourceBytes));
              } else {
                update.setGetResult(null);
              }
              listener.onResponse(update);
            }

            @Override
            public void onFailure(Throwable e) {
              e = ExceptionsHelper.unwrapCause(e);
              if (e instanceof VersionConflictEngineException
                  || e instanceof DocumentAlreadyExistsException) {
                if (retryCount < request.retryOnConflict()) {
                  threadPool
                      .executor(executor())
                      .execute(
                          new Runnable() {
                            @Override
                            public void run() {
                              shardOperation(request, listener, retryCount + 1);
                            }
                          });
                  return;
                }
              }
              listener.onFailure(e);
            }
          });
      return;
    }

    if (getResult.internalSourceRef() == null) {
      // no source, we can't do nothing, through a failure...
      listener.onFailure(
          new DocumentSourceMissingException(
              new ShardId(request.index(), request.shardId()), request.type(), request.id()));
      return;
    }

    Tuple<XContentType, Map<String, Object>> sourceAndContent =
        XContentHelper.convertToMap(getResult.internalSourceRef(), true);
    String operation = null;
    String timestamp = null;
    Long ttl = null;
    Object fetchedTTL = null;
    final Map<String, Object> updatedSourceAsMap;
    final XContentType updateSourceContentType = sourceAndContent.v1();
    String routing =
        getResult.getFields().containsKey(RoutingFieldMapper.NAME)
            ? getResult.field(RoutingFieldMapper.NAME).getValue().toString()
            : null;
    String parent =
        getResult.getFields().containsKey(ParentFieldMapper.NAME)
            ? getResult.field(ParentFieldMapper.NAME).getValue().toString()
            : null;

    if (request.script() == null && request.doc() != null) {
      IndexRequest indexRequest = request.doc();
      updatedSourceAsMap = sourceAndContent.v2();
      if (indexRequest.ttl() > 0) {
        ttl = indexRequest.ttl();
      }
      timestamp = indexRequest.timestamp();
      if (indexRequest.routing() != null) {
        routing = indexRequest.routing();
      }
      if (indexRequest.parent() != null) {
        parent = indexRequest.parent();
      }
      XContentHelper.update(updatedSourceAsMap, indexRequest.sourceAsMap());
    } else {
      Map<String, Object> ctx = new HashMap<String, Object>(2);
      ctx.put("_source", sourceAndContent.v2());

      try {
        ExecutableScript script =
            scriptService.executable(request.scriptLang, request.script, request.scriptParams);
        script.setNextVar("ctx", ctx);
        script.run();
        // we need to unwrap the ctx...
        ctx = (Map<String, Object>) script.unwrap(ctx);
      } catch (Exception e) {
        throw new ElasticSearchIllegalArgumentException("failed to execute script", e);
      }

      operation = (String) ctx.get("op");
      timestamp = (String) ctx.get("_timestamp");
      fetchedTTL = ctx.get("_ttl");
      if (fetchedTTL != null) {
        if (fetchedTTL instanceof Number) {
          ttl = ((Number) fetchedTTL).longValue();
        } else {
          ttl = TimeValue.parseTimeValue((String) fetchedTTL, null).millis();
        }
      }

      updatedSourceAsMap = (Map<String, Object>) ctx.get("_source");
    }

    // apply script to update the source
    // No TTL has been given in the update script so we keep previous TTL value if there is one
    if (ttl == null) {
      ttl =
          getResult.getFields().containsKey(TTLFieldMapper.NAME)
              ? (Long) getResult.field(TTLFieldMapper.NAME).getValue()
              : null;
      if (ttl != null) {
        ttl =
            ttl
                - (System.currentTimeMillis()
                    - getDate); // It is an approximation of exact TTL value, could be improved
      }
    }

    // TODO: external version type, does it make sense here? does not seem like it...

    if (operation == null || "index".equals(operation)) {
      final IndexRequest indexRequest =
          Requests.indexRequest(request.index())
              .type(request.type())
              .id(request.id())
              .routing(routing)
              .parent(parent)
              .source(updatedSourceAsMap, updateSourceContentType)
              .version(getResult.getVersion())
              .replicationType(request.replicationType())
              .consistencyLevel(request.consistencyLevel())
              .timestamp(timestamp)
              .ttl(ttl)
              .percolate(request.percolate())
              .refresh(request.refresh());
      indexRequest.operationThreaded(false);
      // we fetch it from the index request so we don't generate the bytes twice, its already done
      // in the index request
      final BytesReference updateSourceBytes = indexRequest.source();
      indexAction.execute(
          indexRequest,
          new ActionListener<IndexResponse>() {
            @Override
            public void onResponse(IndexResponse response) {
              UpdateResponse update =
                  new UpdateResponse(
                      response.getIndex(),
                      response.getType(),
                      response.getId(),
                      response.getVersion());
              update.setMatches(response.getMatches());
              update.setGetResult(
                  extractGetResult(
                      request,
                      response.getVersion(),
                      updatedSourceAsMap,
                      updateSourceContentType,
                      updateSourceBytes));
              listener.onResponse(update);
            }

            @Override
            public void onFailure(Throwable e) {
              e = ExceptionsHelper.unwrapCause(e);
              if (e instanceof VersionConflictEngineException) {
                if (retryCount < request.retryOnConflict()) {
                  threadPool
                      .executor(executor())
                      .execute(
                          new Runnable() {
                            @Override
                            public void run() {
                              shardOperation(request, listener, retryCount + 1);
                            }
                          });
                  return;
                }
              }
              listener.onFailure(e);
            }
          });
    } else if ("delete".equals(operation)) {
      DeleteRequest deleteRequest =
          Requests.deleteRequest(request.index())
              .type(request.type())
              .id(request.id())
              .routing(routing)
              .parent(parent)
              .version(getResult.getVersion())
              .replicationType(request.replicationType())
              .consistencyLevel(request.consistencyLevel());
      deleteRequest.operationThreaded(false);
      deleteAction.execute(
          deleteRequest,
          new ActionListener<DeleteResponse>() {
            @Override
            public void onResponse(DeleteResponse response) {
              UpdateResponse update =
                  new UpdateResponse(
                      response.getIndex(),
                      response.getType(),
                      response.getId(),
                      response.getVersion());
              update.setGetResult(
                  extractGetResult(
                      request,
                      response.getVersion(),
                      updatedSourceAsMap,
                      updateSourceContentType,
                      null));
              listener.onResponse(update);
            }

            @Override
            public void onFailure(Throwable e) {
              e = ExceptionsHelper.unwrapCause(e);
              if (e instanceof VersionConflictEngineException) {
                if (retryCount < request.retryOnConflict()) {
                  threadPool
                      .executor(executor())
                      .execute(
                          new Runnable() {
                            @Override
                            public void run() {
                              shardOperation(request, listener, retryCount + 1);
                            }
                          });
                  return;
                }
              }
              listener.onFailure(e);
            }
          });
    } else if ("none".equals(operation)) {
      UpdateResponse update =
          new UpdateResponse(
              getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion());
      update.setGetResult(
          extractGetResult(
              request, getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, null));
      listener.onResponse(update);
    } else {
      logger.warn(
          "Used update operation [{}] for script [{}], doing nothing...",
          operation,
          request.script);
      listener.onResponse(
          new UpdateResponse(
              getResult.getIndex(),
              getResult.getType(),
              getResult.getId(),
              getResult.getVersion()));
    }
  }
  /**
   * Prepares an update request by converting it into an index request.
   *
   * <p>TODO: detect a NOOP and return an update response if true
   */
  @SuppressWarnings("unchecked")
  public IndexRequest prepareUpdate(
      DocTableInfo tableInfo,
      ShardUpsertRequest request,
      ShardUpsertRequest.Item item,
      ShardId shardId)
      throws ElasticsearchException {
    IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    IndexShard indexShard = indexService.shardSafe(shardId.id());
    final GetResult getResult =
        indexShard
            .getService()
            .get(
                request.type(),
                item.id(),
                new String[] {RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME},
                true,
                item.version(),
                VersionType.INTERNAL,
                FetchSourceContext.FETCH_SOURCE,
                false);

    if (!getResult.isExists()) {
      throw new DocumentMissingException(
          new ShardId(request.index(), request.shardId()), request.type(), item.id());
    }

    if (getResult.internalSourceRef() == null) {
      // no source, we can't do nothing, through a failure...
      throw new DocumentSourceMissingException(
          new ShardId(request.index(), request.shardId()), request.type(), item.id());
    }

    Tuple<XContentType, Map<String, Object>> sourceAndContent =
        XContentHelper.convertToMap(getResult.internalSourceRef(), true);
    final Map<String, Object> updatedSourceAsMap;
    final XContentType updateSourceContentType = sourceAndContent.v1();
    String routing =
        getResult.getFields().containsKey(RoutingFieldMapper.NAME)
            ? getResult.field(RoutingFieldMapper.NAME).getValue().toString()
            : null;
    String parent =
        getResult.getFields().containsKey(ParentFieldMapper.NAME)
            ? getResult.field(ParentFieldMapper.NAME).getValue().toString()
            : null;

    updatedSourceAsMap = sourceAndContent.v2();

    SymbolToFieldExtractorContext ctx =
        new SymbolToFieldExtractorContext(functions, item.insertValues());

    Map<String, Object> pathsToUpdate = new LinkedHashMap<>();
    Map<String, Object> updatedGeneratedColumns = new LinkedHashMap<>();
    for (int i = 0; i < request.updateColumns().length; i++) {
      /**
       * NOTE: mapping isn't applied. So if an Insert was done using the ES Rest Endpoint the data
       * might be returned in the wrong format (date as string instead of long)
       */
      String columnPath = request.updateColumns()[i];
      Object value =
          SYMBOL_TO_FIELD_EXTRACTOR.convert(item.updateAssignments()[i], ctx).extract(getResult);
      ReferenceInfo referenceInfo = tableInfo.getReferenceInfo(ColumnIdent.fromPath(columnPath));
      if (referenceInfo instanceof GeneratedReferenceInfo) {
        updatedGeneratedColumns.put(columnPath, value);

      } else {
        pathsToUpdate.put(columnPath, value);
      }
    }

    processGeneratedColumns(
        tableInfo,
        pathsToUpdate,
        updatedGeneratedColumns,
        request.validateGeneratedColumns(),
        getResult);

    updateSourceByPaths(updatedSourceAsMap, pathsToUpdate);

    final IndexRequest indexRequest =
        Requests.indexRequest(request.index())
            .type(request.type())
            .id(item.id())
            .routing(routing)
            .parent(parent)
            .source(updatedSourceAsMap, updateSourceContentType)
            .version(getResult.getVersion());
    indexRequest.operationThreaded(false);
    return indexRequest;
  }