@Test
  public void testRenameAttribute() throws Throwable {
    logger.debug("Start testRenameAttribute");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "ctx.document.score2 = ctx.document.score; delete ctx.document.score;";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      String id = dbObject.get("_id").toString();
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));

      SearchResponse sr =
          getNode()
              .client()
              .prepareSearch(getIndex())
              .setQuery(fieldQuery("_id", id))
              .execute()
              .actionGet();
      logger.debug("SearchResponse {}", sr.toString());
      long totalHits = sr.getHits().getTotalHits();
      logger.debug("TotalHits: {}", totalHits);
      assertThat(totalHits, equalTo(1l));

      assertThat(sr.getHits().getHits()[0].sourceAsMap().containsKey("score2"), equalTo(true));
      mongoCollection.remove(dbObject);
    } catch (Throwable t) {
      logger.error("testRenameAttribute failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }
 @Override
 public T get() throws InterruptedException, ExecutionException {
   try {
     return in.get(timeout, unit);
   } catch (TimeoutException e) {
     throw new ExecutionException(e.getCause());
   }
 }
  @Test
  public void testIgnoreScript() throws Throwable {
    logger.debug("Start testIgnoreScript");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "ctx.ignore = true;";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));
      CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
      logger.info("Document count: {}", countResponse.getCount());
      assertThat(countResponse.getCount(), equalTo(0l));

      mongoCollection.remove(dbObject);

    } catch (Throwable t) {
      logger.error("testIgnoreScript failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }
  @Test
  public void testListBenchmarks() throws Exception {
    SearchRequest searchRequest = prepareBlockingScriptQuery();
    final BenchmarkRequest request =
        BenchmarkTestUtil.randomRequest(
            client(), indices, numExecutorNodes, competitionSettingsMap, searchRequest);
    logger.info(
        "--> Submitting benchmark - competitors [{}] iterations [{}]",
        request.competitors().size(),
        request.settings().iterations());

    final ActionFuture<BenchmarkResponse> future = client().bench(request);
    try {
      waitForQuery.await();
      final BenchmarkStatusResponse statusResponse =
          client().prepareBenchStatus().execute().actionGet();
      waitForTestLatch.countDown();
      assertThat(statusResponse.benchmarkResponses().size(), equalTo(1));
      for (BenchmarkResponse benchmarkResponse : statusResponse.benchmarkResponses()) {
        assertThat(benchmarkResponse.benchmarkName(), equalTo(BENCHMARK_NAME));
        assertThat(benchmarkResponse.state(), equalTo(BenchmarkResponse.State.RUNNING));
        assertFalse(benchmarkResponse.hasErrors());

        for (CompetitionResult result : benchmarkResponse.competitionResults().values()) {
          assertThat(result.nodeResults().size(), lessThanOrEqualTo(numExecutorNodes));
          validateCompetitionResult(
              result, competitionSettingsMap.get(result.competitionName()), false);
        }
      }

    } finally {
      if (waitForTestLatch.getCount() == 1) {
        waitForTestLatch.countDown();
      }
      client().prepareAbortBench(BENCHMARK_NAME).get();
      // Confirm that there are no active benchmarks in the cluster
      assertThat(
          client().prepareBenchStatus().execute().actionGet().totalActiveBenchmarks(), equalTo(0));
      assertThat(waitForTestLatch.getCount(), is(0l));
    }
    // Confirm that benchmark was indeed aborted
    assertThat(
        future.get().state(),
        isOneOf(BenchmarkResponse.State.ABORTED, BenchmarkResponse.State.COMPLETE));
  }
 @BeforeClass
 public void createDatabase() {
   logger.debug("createDatabase {}", getDatabase());
   try {
     mongoDB = getMongo().getDB(getDatabase());
     mongoDB.setWriteConcern(WriteConcern.REPLICAS_SAFE);
     super.createRiver(
         "/test/elasticsearch/plugin/river/mongodb/gridfs/test-gridfs-mongodb-river.json");
     ActionFuture<IndicesExistsResponse> response =
         getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
     assertThat(response.actionGet().isExists(), equalTo(true));
     logger.info("Start createCollection");
     mongoCollection = mongoDB.createCollection(getCollection(), null);
     Assert.assertNotNull(mongoCollection);
   } catch (Throwable t) {
     logger.error("createDatabase failed.", t);
   }
 }
  @Test
  public void testAbortBenchmark() throws Exception {
    final int iters =
        between(1, 3); // we run this more than once to make sure metadata is cleaned up propperly
    for (int i = 0; i < iters; i++) {
      SearchRequest searchRequest = prepareBlockingScriptQuery();
      final BenchmarkRequest request =
          BenchmarkTestUtil.randomRequest(
              client(), indices, numExecutorNodes, competitionSettingsMap, searchRequest);
      request.settings().iterations(Integer.MAX_VALUE, true); // massive amount of iterations
      logger.info(
          "--> Submitting benchmark - competitors [{}] iterations [{}]",
          request.competitors().size(),
          request.settings().iterations());
      boolean aborted = false;
      final ActionFuture<BenchmarkResponse> benchmarkResponse = client().bench(request);
      try {
        waitForQuery.await();
        final AbortBenchmarkResponse abortResponse =
            client().prepareAbortBench(BENCHMARK_NAME).get();
        aborted = true;
        // Confirm that the benchmark was actually aborted and did not finish on its own
        assertAcked(abortResponse);
        // Confirm that there are no active benchmarks in the cluster
        final BenchmarkStatusResponse statusResponse =
            client().prepareBenchStatus().execute().actionGet();
        waitForTestLatch.countDown(); // let the queries go - we already aborted and got the status
        assertThat(statusResponse.totalActiveBenchmarks(), equalTo(0));

        // Confirm that benchmark was indeed aborted
        assertThat(benchmarkResponse.get().state(), is(BenchmarkResponse.State.ABORTED));

      } finally {
        if (waitForTestLatch.getCount() == 1) {
          waitForTestLatch.countDown();
        }
        if (!aborted) {
          client().prepareAbortBench(BENCHMARK_NAME).get();
        }
        assertThat(waitForTestLatch.getCount(), is(0l));
      }
    }
  }
 @Override
 public T actionGet(TimeValue timeout) throws ElasticsearchException {
   return in.actionGet(timeout);
 }
 @Override
 public T get(long timeout, TimeUnit unit)
     throws InterruptedException, ExecutionException, TimeoutException {
   return in.get(timeout, unit);
 }
  @Test
  public void initialImport() throws Throwable {
    logger.debug("Start InitialImport");
    try {
      createDatabase();

      DBObject dbObject1 = new BasicDBObject(ImmutableMap.of("name", "Richard"));
      WriteResult result1 = mongoCollection.insert(dbObject1);
      logger.info("WriteResult: {}", result1.toString());
      Thread.sleep(wait);

      // Make sure we're starting out with the river not setup
      GetResponse statusResponse =
          getNode()
              .client()
              .prepareGet("_river", river, MongoDBRiver.STATUS_ID)
              .execute()
              .actionGet();
      Assert.assertFalse(
          statusResponse.isExists(),
          "Expected no river but found one "
              + XContentMapValues.extractValue(
                  MongoDBRiver.TYPE + "." + MongoDBRiver.STATUS_FIELD,
                  statusResponse.getSourceAsMap()));

      // Setup the river
      createRiver();
      Thread.sleep(wait);

      // Check that it did an initial import successfully
      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));
      Assert.assertEquals(
          Status.RUNNING, MongoDBRiverHelper.getRiverStatus(getNode().client(), river));
      assertThat(
          getNode().client().count(countRequest(getIndex())).actionGet().getCount(), equalTo(1l));

      // Check that it syncs the oplog
      DBObject dbObject2 = new BasicDBObject(ImmutableMap.of("name", "Ben"));
      WriteResult result2 = mongoCollection.insert(dbObject2);
      logger.info("WriteResult: {}", result2.toString());
      Thread.sleep(wait);

      refreshIndex();
      Assert.assertEquals(
          Status.RUNNING, MongoDBRiverHelper.getRiverStatus(getNode().client(), river));
      assertThat(
          getNode().client().count(countRequest(getIndex())).actionGet().getCount(), equalTo(2l));

      mongoCollection.remove(dbObject1, WriteConcern.REPLICAS_SAFE);

      Thread.sleep(wait);
      refreshIndex();
      assertThat(
          getNode().client().count(countRequest(getIndex())).actionGet().getCount(), equalTo(1L));

    } catch (Throwable t) {
      logger.error("InitialImport failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      cleanUp();
    }
  }
 @Override
 public boolean isDone() {
   return in.isDone();
 }
 @Override
 public boolean isCancelled() {
   return in.isCancelled();
 }
 @Override
 public boolean cancel(boolean mayInterruptIfRunning) {
   return in.cancel(mayInterruptIfRunning);
 }
 @Override
 public Throwable getRootFailure() {
   return in.getRootFailure();
 }
  @Test
  public void testAbortByPattern() throws Exception {
    final int iters =
        between(1, 3); // we run this more than once to make sure metadata is cleaned up propperly
    for (int i = 0; i < iters; i++) {
      List<BenchmarkRequest> requests = new ArrayList<>();
      List<ActionFuture<BenchmarkResponse>> responses = new ArrayList<>();

      SearchRequest searchRequest = prepareBlockingScriptQuery();
      final int benches = between(1, 3);
      String[] names = new String[benches];
      for (int k = 0; k < benches; k++) {
        final BenchmarkRequest request =
            BenchmarkTestUtil.randomRequest(
                client(), indices, numExecutorNodes, competitionSettingsMap, searchRequest);
        request.settings().iterations(Integer.MAX_VALUE, true); // massive amount of iterations
        names[k] = BENCHMARK_NAME + Integer.toString(k);
        request.benchmarkName(names[k]);
        requests.add(request);
        logger.info(
            "--> Submitting benchmark - competitors [{}] iterations [{}]",
            request.competitors().size(),
            request.settings().iterations());
      }

      boolean aborted = false;
      for (BenchmarkRequest r : requests) {
        final ActionFuture<BenchmarkResponse> benchmarkResponse = client().bench(r);
        responses.add(benchmarkResponse);
      }
      try {
        waitForQuery.await();
        if (benches > 1) {
          awaitBusy(
              new Predicate<Object>() {
                @Override
                public boolean apply(java.lang.Object input) {
                  return client().prepareBenchStatus().get().benchmarkResponses().size() == benches;
                }
              });
        }
        final String badPatternA = "*z";
        final String badPatternB = "xxx";
        final String[] patterns;
        switch (getRandom().nextInt(3)) {
          case 0:
            patterns = new String[] {"*"};
            break;
          case 1:
            patterns = new String[] {BENCHMARK_NAME_WILDCARD, badPatternA, badPatternB};
            break;
          case 2:
            patterns = names;
            break;
          default:
            patterns = new String[] {BENCHMARK_NAME_WILDCARD};
        }
        final AbortBenchmarkResponse abortResponse = client().prepareAbortBench(patterns).get();
        aborted = true;
        assertAcked(abortResponse);

        // Confirm that there are no active benchmarks in the cluster
        final BenchmarkStatusResponse statusResponse =
            client().prepareBenchStatus().execute().actionGet();
        waitForTestLatch.countDown(); // let the queries go - we already aborted and got the status
        assertThat(statusResponse.totalActiveBenchmarks(), equalTo(0));

        // Confirm that benchmark was indeed aborted
        for (ActionFuture<BenchmarkResponse> r : responses) {
          assertThat(r.get().state(), is(BenchmarkResponse.State.ABORTED));
        }
      } finally {
        if (waitForTestLatch.getCount() == 1) {
          waitForTestLatch.countDown();
        }
        if (!aborted) {
          client().prepareAbortBench(BENCHMARK_NAME).get();
        }
        assertThat(waitForTestLatch.getCount(), is(0l));
      }
    }
  }
 @Override
 public T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException {
   return in.actionGet(timeout, unit);
 }
 @Override
 public T actionGet(long timeoutMillis) throws ElasticsearchException {
   return in.actionGet(timeoutMillis);
 }
  public void testPlugin() throws Exception {
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .addMapping(
            "type1",
            jsonBuilder()
                .startObject()
                .startObject("type1")
                .startObject("properties")
                .startObject("test")
                .field("type", "text")
                .endObject()
                .startObject("num1")
                .field("type", "date")
                .endObject()
                .endObject()
                .endObject()
                .endObject())
        .execute()
        .actionGet();
    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForYellowStatus()
        .execute()
        .actionGet();

    client()
        .index(
            indexRequest("test")
                .type("type1")
                .id("1")
                .source(
                    jsonBuilder()
                        .startObject()
                        .field("test", "value")
                        .field("num1", "2013-05-26")
                        .endObject()))
        .actionGet();
    client()
        .index(
            indexRequest("test")
                .type("type1")
                .id("2")
                .source(
                    jsonBuilder()
                        .startObject()
                        .field("test", "value")
                        .field("num1", "2013-05-27")
                        .endObject()))
        .actionGet();

    client().admin().indices().prepareRefresh().execute().actionGet();
    DecayFunctionBuilder<?> gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d");

    ActionFuture<SearchResponse> response =
        client()
            .search(
                searchRequest()
                    .searchType(SearchType.QUERY_THEN_FETCH)
                    .source(
                        searchSource()
                            .explain(false)
                            .query(functionScoreQuery(termQuery("test", "value"), gfb))));

    SearchResponse sr = response.actionGet();
    ElasticsearchAssertions.assertNoFailures(sr);
    SearchHits sh = sr.getHits();

    assertThat(sh.hits().length, equalTo(2));
    assertThat(sh.getAt(0).getId(), equalTo("1"));
    assertThat(sh.getAt(1).getId(), equalTo("2"));
  }