コード例 #1
0
  @Test
  public void testIpMultiField() throws Exception {
    assertAcked(
        client()
            .admin()
            .indices()
            .prepareCreate("my-index")
            .addMapping("my-type", createMappingSource("ip")));

    GetMappingsResponse getMappingsResponse =
        client().admin().indices().prepareGetMappings("my-index").get();
    MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
    assertThat(mappingMetaData, not(nullValue()));
    Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
    Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
    assertThat(aField.size(), equalTo(2));
    assertThat(aField.get("type").toString(), equalTo("ip"));
    assertThat(aField.get("fields"), notNullValue());

    Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
    assertThat(bField.size(), equalTo(2));
    assertThat(bField.get("type").toString(), equalTo("string"));
    assertThat(bField.get("index").toString(), equalTo("not_analyzed"));

    client()
        .prepareIndex("my-index", "my-type", "1")
        .setSource("a", "127.0.0.1")
        .setRefresh(true)
        .get();
    CountResponse countResponse =
        client().prepareCount("my-index").setQuery(matchQuery("a.b", "127.0.0.1")).get();
    assertThat(countResponse.getCount(), equalTo(1l));
  }
コード例 #2
0
  @Test
  public void connectRiverAndSendMessages() throws InterruptedException {

    Thread.sleep(1000);

    String field = "content";
    String msg = "sammy";

    logger.debug("Publishing message [channel={}, msg={}]", channel, msg);
    jedis.publish(channel, msg);

    Thread.sleep(1000);
    refreshIndex();

    QuerySourceBuilder builder = new QuerySourceBuilder();
    builder.setQuery(queryString(field + ":" + msg));

    logger.debug(
        "Counting [index={}, type={}, field={}, msg={}]",
        new Object[] {index, channel, field, msg});
    CountResponse resp =
        node.client().count(countRequest(index).types(channel).source(builder)).actionGet();
    assertEquals(1, resp.getCount());

    msg = "coldplay";

    logger.debug(
        "Counting [index={}, type={}, field={}, msg={}]",
        new Object[] {index, channel, field, msg});
    resp = node.client().count(countRequest(index).types(channel).source(builder)).actionGet();
    assertEquals(0, resp.getCount());

    logger.debug("Publishing message [channel={}]", channel);
    jedis.publish(channel, msg);

    Thread.sleep(1000);
    refreshIndex();

    logger.debug(
        "Counting [index={}, type={}, field={}, msg={}]",
        new Object[] {index, channel, field, msg});
    resp = node.client().count(countRequest(index).types(channel).source(builder)).actionGet();
    assertEquals(1, resp.getCount());

    shutdown();
  }
  @Test
  public void testIgnoreScript() throws Throwable {
    logger.debug("Start testIgnoreScript");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "ctx.ignore = true;";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));
      CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
      logger.info("Document count: {}", countResponse.getCount());
      assertThat(countResponse.getCount(), equalTo(0l));

      mongoCollection.remove(dbObject);

    } catch (Throwable t) {
      logger.error("testIgnoreScript failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }
コード例 #4
0
  protected long getCount(final String index, final String type) {
    logger.debug("getCount()");

    esSetup.client().admin().indices().refresh(new RefreshRequest()).actionGet();

    final CountResponse count =
        esSetup.client().count(new CountRequest(index).types(type)).actionGet();

    return count.getCount();
  }
コード例 #5
0
ファイル: BaseIndex.java プロジェクト: ihr/sonarqube
 @Override
 public IndexStat getIndexStat() {
   CountRequestBuilder countRequest =
       client
           .prepareCount(this.getIndexName())
           .setTypes(this.getIndexType())
           .setQuery(QueryBuilders.matchAllQuery());
   CountResponse response = countRequest.get();
   return new IndexStat(getLastSynchronization(), response.getCount());
 }
  @Test
  public void testImportAttachment() throws Exception {
    logger.debug("*** testImportAttachment ***");
    byte[] content =
        copyToBytesFromClasspath(
            "/test/elasticsearch/plugin/river/mongodb/gridfs/test-attachment.html");
    logger.debug("Content in bytes: {}", content.length);
    GridFS gridFS = new GridFS(mongoDB);
    GridFSInputFile in = gridFS.createFile(content);
    in.setFilename("test-attachment.html");
    in.setContentType("text/html");
    in.save();
    in.validate();

    String id = in.getId().toString();
    logger.debug("GridFS in: {}", in);
    logger.debug("Document created with id: {}", id);

    GridFSDBFile out = gridFS.findOne(in.getFilename());
    logger.debug("GridFS from findOne: {}", out);
    out = gridFS.findOne(new ObjectId(id));
    logger.debug("GridFS from findOne: {}", out);
    Assert.assertEquals(out.getId(), in.getId());

    Thread.sleep(wait);
    refreshIndex();

    CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
    logger.debug("Index total count: {}", countResponse.getCount());
    assertThat(countResponse.getCount(), equalTo(1l));

    countResponse =
        getNode().client().count(countRequest(getIndex()).query(fieldQuery("_id", id))).actionGet();
    logger.debug("Index count for id {}: {}", id, countResponse.getCount());
    assertThat(countResponse.getCount(), equalTo(1l));

    SearchResponse response =
        getNode()
            .client()
            .prepareSearch(getIndex())
            .setQuery(QueryBuilders.queryString("Aliquam"))
            .execute()
            .actionGet();
    logger.debug("SearchResponse {}", response.toString());
    long totalHits = response.getHits().getTotalHits();
    logger.debug("TotalHits: {}", totalHits);
    assertThat(totalHits, equalTo(1l));

    gridFS.remove(new ObjectId(id));

    Thread.sleep(wait);
    refreshIndex();

    countResponse =
        getNode().client().count(countRequest(getIndex()).query(fieldQuery("_id", id))).actionGet();
    logger.debug("Count after delete request: {}", countResponse.getCount());
    assertThat(countResponse.getCount(), equalTo(0L));
  }
コード例 #7
0
  @Test
  public void dynamicUpdates() throws Exception {
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
        .execute()
        .actionGet();
    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    int recCount = randomIntBetween(200, 600);
    int numberOfTypes = randomIntBetween(1, 5);
    List<IndexRequestBuilder> indexRequests = Lists.newArrayList();
    for (int rec = 0; rec < recCount; rec++) {
      String type = "type" + (rec % numberOfTypes);
      String fieldName = "field_" + type + "_" + rec;
      indexRequests.add(
          client()
              .prepareIndex("test", type, Integer.toString(rec))
              .setSource(fieldName, "some_value"));
    }
    indexRandom(true, indexRequests);

    logger.info("checking all the documents are there");
    RefreshResponse refreshResponse =
        client().admin().indices().prepareRefresh().execute().actionGet();
    assertThat(refreshResponse.getFailedShards(), equalTo(0));
    CountResponse response = client().prepareCount("test").execute().actionGet();
    assertThat(response.getCount(), equalTo((long) recCount));

    logger.info("checking all the fields are in the mappings");

    for (int rec = 0; rec < recCount; rec++) {
      String type = "type" + (rec % numberOfTypes);
      String fieldName = "field_" + type + "_" + rec;
      assertConcreteMappingsOnAll("test", type, fieldName);
    }
  }
コード例 #8
0
ファイル: EsIndexBoltTest.java プロジェクト: kenshin233/storm
  @Test
  public void testEsIndexBolt() throws Exception {
    String index = "index1";
    String type = "type1";

    Tuple tuple = createTestTuple(index, type);

    bolt.execute(tuple);

    verify(outputCollector).ack(tuple);

    node.client().admin().indices().prepareRefresh(index).execute().actionGet();
    CountResponse resp =
        node.client()
            .prepareCount(index)
            .setQuery(new TermQueryBuilder("_type", type))
            .execute()
            .actionGet();

    Assert.assertEquals(1, resp.getCount());
  }
コード例 #9
0
  @Test
  public void dynamicUpdates() throws Exception {

    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            ImmutableSettings.settingsBuilder()
                .put("index.number_of_shards", 2)
                .put("index.number_of_replicas", 0))
        .execute()
        .actionGet();
    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    long recCount = 20;
    for (int rec = 0; rec < recCount; rec++) {
      client()
          .prepareIndex("test", "type", "rec" + rec)
          .setSource("field" + rec, "some_value")
          .execute()
          .actionGet();
    }
    RefreshResponse refreshResponse =
        client().admin().indices().prepareRefresh().execute().actionGet();
    assertThat(refreshResponse.getFailedShards(), equalTo(0));
    logger.info("Searching");
    CountResponse response = client().prepareCount("test").execute().actionGet();
    assertThat(response.getCount(), equalTo(recCount));
  }
  @Test
  public void testDeleteDocument() throws Throwable {
    logger.debug("Start testDeleteDocument");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "if (ctx.document.to_be_deleted == true) { ctx.operation = 'd' };";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      String id = dbObject.get("_id").toString();
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));

      SearchResponse sr =
          getNode()
              .client()
              .prepareSearch(getIndex())
              .setQuery(fieldQuery("_id", id))
              .execute()
              .actionGet();
      logger.debug("SearchResponse {}", sr.toString());
      long totalHits = sr.getHits().getTotalHits();
      logger.debug("TotalHits: {}", totalHits);
      assertThat(totalHits, equalTo(1l));

      dbObject.put("to_be_deleted", Boolean.TRUE);
      mongoCollection.save(dbObject);

      Thread.sleep(wait);
      refreshIndex();

      CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
      logger.info("Document count: {}", countResponse.getCount());
      assertThat(countResponse.getCount(), equalTo(0l));

      mongoCollection.remove(dbObject);
    } catch (Throwable t) {
      logger.error("testDeleteDocument failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }
コード例 #11
0
 public long countAll(String... indices) {
   CountResponse response =
       client.prepareCount(indices).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
   return response.getCount();
 }
コード例 #12
0
  @Test
  public void dynamicUpdates() throws Exception {
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            ImmutableSettings.settingsBuilder()
                .put("index.number_of_shards", 1)
                .put("index.number_of_replicas", 0))
        .execute()
        .actionGet();
    client()
        .admin()
        .cluster()
        .prepareHealth()
        .setWaitForEvents(Priority.LANGUID)
        .setWaitForGreenStatus()
        .execute()
        .actionGet();

    int recCount = randomIntBetween(200, 600);
    int numberOfTypes = randomIntBetween(1, 5);
    List<IndexRequestBuilder> indexRequests = Lists.newArrayList();
    for (int rec = 0; rec < recCount; rec++) {
      String type = "type" + (rec % numberOfTypes);
      String fieldName = "field_" + type + "_" + rec;
      indexRequests.add(
          client()
              .prepareIndex("test", type, Integer.toString(rec))
              .setSource(fieldName, "some_value"));
    }
    indexRandom(true, indexRequests);

    logger.info("checking all the documents are there");
    RefreshResponse refreshResponse =
        client().admin().indices().prepareRefresh().execute().actionGet();
    assertThat(refreshResponse.getFailedShards(), equalTo(0));
    CountResponse response = client().prepareCount("test").execute().actionGet();
    assertThat(response.getCount(), equalTo((long) recCount));

    logger.info("checking all the fields are in the mappings");

    reRunTest:
    while (true) {
      Map<String, String> typeToSource = Maps.newHashMap();
      ClusterState state = client().admin().cluster().prepareState().get().getState();
      for (ObjectObjectCursor<String, MappingMetaData> cursor :
          state.getMetaData().getIndices().get("test").getMappings()) {
        typeToSource.put(cursor.key, cursor.value.source().string());
      }
      for (int rec = 0; rec < recCount; rec++) {
        String type = "type" + (rec % numberOfTypes);
        String fieldName = "field_" + type + "_" + rec;
        fieldName = "\"" + fieldName + "\""; // quote it, so we make sure we catch the exact one
        if (!typeToSource.containsKey(type) || !typeToSource.get(type).contains(fieldName)) {
          client()
              .admin()
              .cluster()
              .prepareHealth()
              .setWaitForEvents(Priority.LANGUID)
              .execute()
              .actionGet();
          awaitBusy(
              new Predicate<Object>() {
                @Override
                public boolean apply(Object input) {
                  PendingClusterTasksResponse pendingTasks =
                      client().admin().cluster().preparePendingClusterTasks().get();
                  return pendingTasks.pendingTasks().isEmpty();
                }
              });
          client()
              .admin()
              .cluster()
              .prepareHealth()
              .setWaitForEvents(Priority.LANGUID)
              .execute()
              .actionGet();
          // its going to break, before we do, make sure that the cluster state hasn't changed on
          // us...
          ClusterState state2 = client().admin().cluster().prepareState().get().getState();
          if (state.version() != state2.version()) {
            logger.info(
                "not the same version, used for test {}, new one {}, re-running test, first wait for mapping to wait",
                state.version(),
                state2.version());
            continue reRunTest;
          }
          logger.info(
              "failing, type {}, field {}, mapping {}", type, fieldName, typeToSource.get(type));
          assertThat(typeToSource.get(type), containsString(fieldName));
        }
      }
      break;
    }
  }
コード例 #13
0
 /**
  * If you need to run specific tests, just override this method. By default, we check the number
  * of expected documents
  *
  * @param node Elasticsearch current node
  */
 protected void postInjectionTests(Node node) {
   CountResponse response = node.client().prepareCount("test").execute().actionGet();
   // We have consumed all messages. We can now check expected number of documents
   Assert.assertEquals(
       "Wrong number of documents found", expectedDocuments(), response.getCount());
 }
  @Test
  public void testImportAttachment() throws Exception {
    logger.debug("*** testImportAttachment ***");
    try {
      // createDatabase();
      byte[] content = copyToBytesFromClasspath(TEST_ATTACHMENT_HTML);
      logger.debug("Content in bytes: {}", content.length);
      GridFS gridFS = new GridFS(mongoDB);
      GridFSInputFile in = gridFS.createFile(content);
      in.setFilename("test-attachment.html");
      in.setContentType("text/html");
      in.save();
      in.validate();

      String id = in.getId().toString();
      logger.debug("GridFS in: {}", in);
      logger.debug("Document created with id: {}", id);

      GridFSDBFile out = gridFS.findOne(in.getFilename());
      logger.debug("GridFS from findOne: {}", out);
      out = gridFS.findOne(new ObjectId(id));
      logger.debug("GridFS from findOne: {}", out);
      Assert.assertEquals(out.getId(), in.getId());

      Thread.sleep(wait);
      refreshIndex();

      CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
      logger.debug("Index total count: {}", countResponse.getCount());
      assertThat(countResponse.getCount(), equalTo(1l));

      GetResponse getResponse = getNode().client().get(getRequest(getIndex()).id(id)).get();
      logger.debug("Get request for id {}: {}", id, getResponse.isExists());
      assertThat(getResponse.isExists(), equalTo(true));
      //            countResponse =
      // getNode().client().count(countRequest(getIndex()).query(fieldQuery("_id",
      // id))).actionGet();
      //            logger.debug("Index count for id {}: {}", id, countResponse.getCount());
      //            assertThat(countResponse.getCount(), equalTo(1l));

      SearchResponse response =
          getNode()
              .client()
              .prepareSearch(getIndex())
              .setQuery(QueryBuilders.queryString("Aliquam"))
              .execute()
              .actionGet();
      logger.debug("SearchResponse {}", response.toString());
      long totalHits = response.getHits().getTotalHits();
      logger.debug("TotalHits: {}", totalHits);
      assertThat(totalHits, equalTo(1l));

      gridFS.remove(new ObjectId(id));

      Thread.sleep(wait);
      refreshIndex();

      getResponse = getNode().client().get(getRequest(getIndex()).id(id)).get();
      logger.debug("Get request for id {}: {}", id, getResponse.isExists());
      assertThat(getResponse.isExists(), equalTo(false));
      //            countResponse =
      // getNode().client().count(countRequest(getIndex()).query(fieldQuery("_id",
      // id))).actionGet();
      //            logger.debug("Count after delete request: {}", countResponse.getCount());
      //            assertThat(countResponse.getCount(), equalTo(0L));
    } catch (Throwable t) {
      logger.error("testImportAttachment failed.", t);
      Assert.fail("testImportAttachment failed", t);
    } finally {
      // cleanUp();
    }
  }
コード例 #15
0
 private boolean isAnyIndexPresent() {
   CountResponse numberOfElements = client.prepareCount().execute().actionGet();
   return numberOfElements.getCount() > 0;
 }
  public static void main(String[] args) throws Exception {
    final int NUM_NODES = 40;
    final int NUM_INDICES = 100;
    final int NUM_DOCS = 2;
    final int FLUSH_AFTER = 1;

    final Settings nodeSettings =
        Settings.settingsBuilder()
            .put("transport.netty.connections_per_node.low", 0)
            .put("transport.netty.connections_per_node.med", 0)
            .put("transport.netty.connections_per_node.high", 1)
            .build();

    final Settings indexSettings =
        Settings.settingsBuilder().put("index.number_of_shards", 1).build();

    List<Node> nodes = Lists.newArrayList();
    for (int i = 0; i < NUM_NODES; i++) {
      nodes.add(
          NodeBuilder.nodeBuilder()
              .settings(Settings.settingsBuilder().put(nodeSettings).put("name", "node" + i))
              .node());
    }
    Client client = nodes.get(0).client();

    for (int index = 0; index < NUM_INDICES; index++) {
      String indexName = "index_" + index;
      System.out.println("--> Processing index [" + indexName + "]...");
      client
          .admin()
          .indices()
          .prepareCreate(indexName)
          .setSettings(indexSettings)
          .execute()
          .actionGet();

      boolean flushed = false;
      for (int doc = 0; doc < NUM_DOCS; doc++) {
        if (!flushed && doc > FLUSH_AFTER) {
          flushed = true;
          client.admin().indices().prepareFlush(indexName).execute().actionGet();
        }
        client
            .prepareIndex(indexName, "type1", Integer.toString(doc))
            .setSource("field", "value" + doc)
            .execute()
            .actionGet();
      }
      System.out.println("--> DONE index [" + indexName + "]");
    }

    System.out.println("--> Initiating shutdown");
    for (Node node : nodes) {
      node.close();
    }

    System.out.println("--> Waiting for all nodes to be closed...");
    while (true) {
      boolean allAreClosed = true;
      for (Node node : nodes) {
        if (!node.isClosed()) {
          allAreClosed = false;
          break;
        }
      }
      if (allAreClosed) {
        break;
      }
      Thread.sleep(100);
    }
    System.out.println("Waiting a bit for node lock to really be released?");
    Thread.sleep(5000);
    System.out.println("--> All nodes are closed, starting back...");

    nodes = Lists.newArrayList();
    for (int i = 0; i < NUM_NODES; i++) {
      nodes.add(
          NodeBuilder.nodeBuilder()
              .settings(Settings.settingsBuilder().put(nodeSettings).put("name", "node" + i))
              .node());
    }
    client = nodes.get(0).client();

    System.out.println("--> Waiting for green status");
    while (true) {
      ClusterHealthResponse clusterHealth =
          client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
      if (clusterHealth.isTimedOut()) {
        System.err.println(
            "--> cluster health timed out..., active shards ["
                + clusterHealth.getActiveShards()
                + "]");
      } else {
        break;
      }
    }

    System.out.println("Verifying counts...");
    for (int index = 0; index < NUM_INDICES; index++) {
      String indexName = "index_" + index;
      CountResponse count =
          client
              .prepareCount(indexName)
              .setQuery(QueryBuilders.matchAllQuery())
              .execute()
              .actionGet();
      if (count.getCount() != NUM_DOCS) {
        System.err.println(
            "Wrong count value, expected ["
                + NUM_DOCS
                + "], got ["
                + count.getCount()
                + "] for index ["
                + indexName
                + "]");
      }
    }

    System.out.println("Test end");
    for (Node node : nodes) {
      node.close();
    }
  }
コード例 #17
0
  @Test
  public void test_all_messages_are_consumed() throws Exception {

    // We try to connect to RabbitMQ.
    // If it's not launched, we don't fail the test but only log it
    try {
      ConnectionFactory cfconn = new ConnectionFactory();
      cfconn.setHost("localhost");
      cfconn.setPort(AMQP.PROTOCOL.PORT);
      Connection conn = cfconn.newConnection();

      Channel ch = conn.createChannel();
      ch.exchangeDeclare("elasticsearch", "direct", true);
      AMQP.Queue.DeclareOk queue = ch.queueDeclare("elasticsearch", true, false, false, null);

      // We purge the queue in case of something is remaining there
      ch.queuePurge("elasticsearch");

      pushMessages(ch);

      // We can now create our node and our river
      Settings settings =
          ImmutableSettings.settingsBuilder()
              .put("gateway.type", "none")
              .put("index.number_of_shards", 1)
              .put("index.number_of_replicas", 0)
              .put(nodeSettings())
              .build();
      node = NodeBuilder.nodeBuilder().local(true).settings(settings).node();

      // We first remove existing index if any
      try {
        node.client().admin().indices().prepareDelete(INDEX).execute().actionGet();
      } catch (IndexMissingException e) {
        // Index is missing? It's perfectly fine!
      }

      // Let's create an index for our docs and we will disable refresh
      node.client().admin().indices().prepareCreate(INDEX).execute().actionGet();

      node.client()
          .prepareIndex("_river", "test", "_meta")
          .setSource(river())
          .execute()
          .actionGet();

      // We need at some point to check if we have consumed the river
      int steps = timeout();
      long count = 0;

      while (true) {
        // We wait for one second
        Thread.sleep(1000);

        CountResponse response = node.client().prepareCount("test").execute().actionGet();
        count = response.getCount();

        steps--;
        if (steps < 0 || count == expectedDocuments()) {
          break;
        }
      }

      ch.close();
      conn.close();

      postInjectionTests(node);
    } catch (ConnectException e) {
      logger.warn(
          "RabbitMQ service is not launched on localhost:{}. Can not start Integration test. "
              + "Launch `rabbitmq-server`.",
          AMQP.PROTOCOL.PORT);
    }
  }