@Test
  @Slow
  public void testIndexActions() throws Exception {
    startNode("server1");

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    client("server1").admin().indices().create(createIndexRequest("test")).actionGet();

    closeNode("server1");

    startNode("server1");
    Thread.sleep(500);
    try {
      client("server1").admin().indices().create(createIndexRequest("test")).actionGet();
      assert false : "index should exists";
    } catch (IndexAlreadyExistsException e) {
      // all is well
    }
  }
 @BeforeClass
 public void createNodes() throws Exception {
   Settings settings =
       settingsBuilder()
           .put("index.number_of_shards", 3)
           .put("index.number_of_replicas", 0)
           .build();
   startNode("server1", settings);
   startNode("server2", settings);
   client = getClient();
 }
 @Override
 protected void beforeClass() throws Exception {
   // Filter cache is cleaned periodically, default is 60s, so make sure it runs often.
   // Thread.sleep for 60s is bad
   startNode(
       "node1",
       ImmutableSettings.settingsBuilder().put("indices.cache.filter.clean_interval", "1ms"));
 }
  @Test
  public void testClusterHealth() throws Exception {
    startNode("server1");
    createTestIndex();
    // Shouldn't log cluster health call
    HttpClientResponse response = httpClient("server1").request("_cluster/health");
    assertThat((String) response.get("status"), equalTo("green"));

    Map<String, Object> data = createSearchQuery("user:kimchy");
    httpClient("server1").request("POST", "_search", data);
    // Should start with logging for the POST /_search request
    Map<String, Object> logJson =
        XContentFactory.xContent(XContentType.JSON)
            .createParser(mockESLoggerFactory.getMessage().substring(5))
            .mapAndClose();
    assertThat((Integer) logJson.get("size"), greaterThan(100));
    assertThat(
        (String) logJson.get("data"),
        equalTo("{\"query\":{\"query_string\":{\"query\":\"user:kimchy\"}}}"));
  }
Beispiel #5
0
 @BeforeClass
 public void startNodes() throws Exception {
   startNode("node1", nodeSettings());
   startNode("node2", nodeSettings());
   client = getClient();
 }
  private void testLoad(boolean fullRecovery) {
    logger.info("Running with fullRecover [{}]", fullRecovery);

    startNode("server1");

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    // get the environment, so we can clear the work dir when needed
    Environment environment =
        ((InternalNode) node("server1")).injector().getInstance(Environment.class);

    logger.info("--> creating test index ...");
    client("server1").admin().indices().prepareCreate("test").execute().actionGet();

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("--> refreshing and checking count");
    client("server1").admin().indices().prepareRefresh().execute().actionGet();
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(),
        equalTo(0l));

    logger.info("--> indexing 1234 docs");
    for (long i = 0; i < 1234; i++) {
      client("server1")
          .prepareIndex("test", "type1", Long.toString(i))
          .setCreate(
              true) // make sure we use create, so if we recover wrongly, we will get increments...
          .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map())
          .execute()
          .actionGet();

      // snapshot every 100 so we get some actions going on in the gateway
      if ((i % 11) == 0) {
        client("server1").admin().indices().prepareGatewaySnapshot().execute().actionGet();
      }
      // flush every once is a while, so we get different data
      if ((i % 55) == 0) {
        client("server1").admin().indices().prepareFlush().execute().actionGet();
      }
    }

    logger.info("--> refreshing and checking count");
    client("server1").admin().indices().prepareRefresh().execute().actionGet();
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(),
        equalTo(1234l));

    logger.info("--> closing the server");
    closeNode("server1");
    if (fullRecovery) {
      logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
      FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
      logger.info(
          "Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
    }

    startNode("server1");

    logger.info("--> running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("--> done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("--> checking count");
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(),
        equalTo(1234l));

    logger.info("--> checking reuse / recovery status");
    IndicesStatusResponse statusResponse =
        client("server1").admin().indices().prepareStatus().setRecovery(true).execute().actionGet();
    for (IndexShardStatus indexShardStatus : statusResponse.getIndex("test")) {
      for (ShardStatus shardStatus : indexShardStatus) {
        if (shardStatus.getShardRouting().primary()) {
          if (fullRecovery || !isPersistentStorage()) {
            assertThat(
                shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(), equalTo(0l));
          } else {
            assertThat(
                shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(),
                greaterThan(
                    shardStatus.getGatewayRecoveryStatus().getIndexSize().bytes()
                        - 8196 /* segments file and others */));
          }
        }
      }
    }
  }
  @Test
  @Slow
  public void testSnapshotOperations() throws Exception {
    startNode("server1", getClassDefaultSettings());

    // get the environment, so we can clear the work dir when needed
    Environment environment =
        ((InternalNode) node("server1")).injector().getInstance(Environment.class);

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    // Translog tests

    logger.info("Creating index [{}]", "test");
    client("server1").admin().indices().prepareCreate("test").execute().actionGet();

    // create a mapping
    PutMappingResponse putMappingResponse =
        client("server1")
            .admin()
            .indices()
            .preparePutMapping("test")
            .setType("type1")
            .setSource(mappingSource())
            .execute()
            .actionGet();
    assertThat(putMappingResponse.isAcknowledged(), equalTo(true));

    // verify that mapping is there
    ClusterStateResponse clusterState =
        client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    // create two and delete the first
    logger.info("Indexing #1");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")))
        .actionGet();
    logger.info("Indexing #2");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test")))
        .actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Deleting #1");
    client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    // do it again, it should be a no op
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (only translog should be populated)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    // verify that mapping is there
    clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    logger.info("Getting #1, should not exists");
    GetResponse getResponse =
        client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));

    // Now flush and add some data (so we have index recovery as well)
    logger.info(
        "Flushing, so we have actual content in the index files (#2 should be in the index)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();
    logger.info("Indexing #3, so we have something in the translog as well");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test")))
        .actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) and reuse work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Closing the server");
    closeNode("server1");
    logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
    FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info(
        "Flushing, so we have actual content in the index files (#3 should be in the index now as well)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (just from the index, nothing in the translog)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Deleting the index");
    client("server1").admin().indices().delete(deleteIndexRequest("test")).actionGet();
  }
 @BeforeClass
 public void startNodes() {
   startNode("node1");
   startNode("node2");
   client = client("node1");
 }
  @Test
  public void testBroadcastOperations() throws IOException {
    startNode("server1");

    client("server1").admin().indices().prepareCreate("test").execute().actionGet(5000);

    logger.info("Running Cluster Health");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.status());
    assertThat(clusterHealth.timedOut(), equalTo(false));
    assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.YELLOW));

    client("server1")
        .index(indexRequest("test").type("type1").id("1").source(source("1", "test")))
        .actionGet();
    FlushResponse flushResponse =
        client("server1").admin().indices().flush(flushRequest("test")).actionGet();
    assertThat(flushResponse.totalShards(), equalTo(10));
    assertThat(flushResponse.successfulShards(), equalTo(5));
    assertThat(flushResponse.failedShards(), equalTo(0));
    client("server1")
        .index(indexRequest("test").type("type1").id("2").source(source("2", "test")))
        .actionGet();
    RefreshResponse refreshResponse =
        client("server1").admin().indices().refresh(refreshRequest("test")).actionGet();
    assertThat(refreshResponse.totalShards(), equalTo(10));
    assertThat(refreshResponse.successfulShards(), equalTo(5));
    assertThat(refreshResponse.failedShards(), equalTo(0));

    logger.info("Count");
    // check count
    for (int i = 0; i < 5; i++) {
      // test successful
      CountResponse countResponse =
          client("server1")
              .count(
                  countRequest("test")
                      .query(termQuery("_type", "type1"))
                      .operationThreading(BroadcastOperationThreading.NO_THREADS))
              .actionGet();
      assertThat(countResponse.count(), equalTo(2l));
      assertThat(countResponse.totalShards(), equalTo(5));
      assertThat(countResponse.successfulShards(), equalTo(5));
      assertThat(countResponse.failedShards(), equalTo(0));
    }

    for (int i = 0; i < 5; i++) {
      CountResponse countResponse =
          client("server1")
              .count(
                  countRequest("test")
                      .query(termQuery("_type", "type1"))
                      .operationThreading(BroadcastOperationThreading.SINGLE_THREAD))
              .actionGet();
      assertThat(countResponse.count(), equalTo(2l));
      assertThat(countResponse.totalShards(), equalTo(5));
      assertThat(countResponse.successfulShards(), equalTo(5));
      assertThat(countResponse.failedShards(), equalTo(0));
    }

    for (int i = 0; i < 5; i++) {
      CountResponse countResponse =
          client("server1")
              .count(
                  countRequest("test")
                      .query(termQuery("_type", "type1"))
                      .operationThreading(BroadcastOperationThreading.THREAD_PER_SHARD))
              .actionGet();
      assertThat(countResponse.count(), equalTo(2l));
      assertThat(countResponse.totalShards(), equalTo(5));
      assertThat(countResponse.successfulShards(), equalTo(5));
      assertThat(countResponse.failedShards(), equalTo(0));
    }

    for (int i = 0; i < 5; i++) {
      // test failed (simply query that can't be parsed)
      CountResponse countResponse =
          client("server1")
              .count(
                  countRequest("test")
                      .query(Unicode.fromStringAsBytes("{ term : { _type : \"type1 } }")))
              .actionGet();

      assertThat(countResponse.count(), equalTo(0l));
      assertThat(countResponse.totalShards(), equalTo(5));
      assertThat(countResponse.successfulShards(), equalTo(0));
      assertThat(countResponse.failedShards(), equalTo(5));
      for (ShardOperationFailedException exp : countResponse.shardFailures()) {
        assertThat(exp.reason(), containsString("QueryParsingException"));
      }
    }
  }
 @BeforeClass
 public void createNodes() throws Exception {
   startNode("server1");
   startNode("server2");
   client = getClient();
 }