@Test
  @SuppressWarnings("squid:S2925")
  public void testThatMappingFromTemplateIsApplied() throws Exception {
    registry.counter(name("test", "cache-evictions")).inc();
    reportAndRefresh();

    // somehow the cluster state is not immediately updated... need to check
    Thread.sleep(200);
    ClusterStateResponse clusterStateResponse =
        client()
            .admin()
            .cluster()
            .prepareState()
            .setRoutingTable(false)
            .setLocal(false)
            .setNodes(true)
            .setIndices(indexWithDate)
            .execute()
            .actionGet();

    org.assertj.core.api.Assertions.assertThat(
            clusterStateResponse.getState().getMetaData().getIndices().containsKey(indexWithDate))
        .isTrue();
    IndexMetaData indexMetaData =
        clusterStateResponse.getState().getMetaData().getIndices().get(indexWithDate);
    org.assertj.core.api.Assertions.assertThat(indexMetaData.getMappings().containsKey("counter"))
        .isTrue();
    Map<String, Object> properties =
        getAsMap(indexMetaData.mapping("counter").sourceAsMap(), "properties");
    Map<String, Object> mapping = getAsMap(properties, "name");
    org.assertj.core.api.Assertions.assertThat(mapping).containsKey("index");
    org.assertj.core.api.Assertions.assertThat(mapping.get("index").toString())
        .isEqualTo("not_analyzed");
  }
  @Test
  public void testJustMasterNode() throws Exception {
    logger.info("--> cleaning nodes");

    logger.info("--> starting 1 master node non data");
    internalCluster()
        .startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").build());

    logger.info("--> create an index");
    client().admin().indices().prepareCreate("test").execute().actionGet();

    logger.info("--> closing master node");
    internalCluster().closeNonSharedNodes(false);

    logger.info("--> starting 1 master node non data again");
    internalCluster()
        .startNode(settingsBuilder().put("node.data", false).put("gateway.type", "local").build());

    logger.info("--> waiting for test index to be created");
    ClusterHealthResponse health =
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForEvents(Priority.LANGUID)
            .setIndices("test")
            .execute()
            .actionGet();
    assertThat(health.isTimedOut(), equalTo(false));

    logger.info("--> verify we have an index");
    ClusterStateResponse clusterStateResponse =
        client().admin().cluster().prepareState().setIndices("test").execute().actionGet();
    assertThat(clusterStateResponse.getState().metaData().hasIndex("test"), equalTo(true));
  }
Beispiel #3
0
  private MoveAllocationCommand getAllocationCommand() {
    String fromNodeId = null;
    String toNodeId = null;
    ShardRouting shardToBeMoved = null;
    ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
    for (RoutingNode routingNode : clusterStateResponse.getState().getRoutingNodes()) {
      if (routingNode.node().isDataNode()) {
        if (fromNodeId == null && routingNode.numberOfOwningShards() > 0) {
          fromNodeId = routingNode.nodeId();
          shardToBeMoved = routingNode.get(randomInt(routingNode.size() - 1));
        } else {
          toNodeId = routingNode.nodeId();
        }

        if (toNodeId != null && fromNodeId != null) {
          break;
        }
      }
    }

    assertNotNull(fromNodeId);
    assertNotNull(toNodeId);
    assertNotNull(shardToBeMoved);

    logger.info(
        "==> going to move shard [{}] from [{}] to [{}]", shardToBeMoved, fromNodeId, toNodeId);
    return new MoveAllocationCommand(shardToBeMoved.shardId(), fromNodeId, toNodeId);
  }
  @Test
  @ElasticsearchIndexes(
      indexes = {
        @ElasticsearchIndex(
            indexName = "library",
            forceCreate = true,
            settings = {
              @ElasticsearchSetting(name = "number_of_shards", value = "2"),
              @ElasticsearchSetting(name = "number_of_replicas", value = "1")
            }),
        @ElasticsearchIndex(indexName = "people")
      })
  public void testElasticsearchSettings() {

    // Check custom settings on node
    NodesInfoResponse infoResponse =
        adminClient.cluster().prepareNodesInfo("node0").setSettings(true).execute().actionGet();

    Settings nodeSettings = infoResponse.getAt(0).getSettings();
    assertEquals("false", nodeSettings.get("http.enabled"));
    assertEquals("zone_one", nodeSettings.get("node.zone"));

    // Check custom settings on index
    ClusterStateResponse response = adminClient.cluster().prepareState().execute().actionGet();

    Settings indexSettings = response.getState().metaData().index("library").settings();
    assertEquals("2", indexSettings.get("index.number_of_shards"));
    assertEquals("1", indexSettings.get("index.number_of_replicas"));

    // Check default settings
    indexSettings = response.getState().metaData().index("people").settings();
    assertEquals("1", indexSettings.get("index.number_of_shards"));
    assertEquals("0", indexSettings.get("index.number_of_replicas"));
  }
Beispiel #5
0
 @Override
 public boolean hasIndex(@NotNull ValueTable valueTable) {
   ClusterStateResponse resp =
       opalSearchService.getClient().admin().cluster().prepareState().execute().actionGet();
   ImmutableOpenMap<String, MappingMetaData> mappings =
       resp.getState().metaData().index(getName()).mappings();
   return mappings.containsKey(getIndex(valueTable).getIndexName());
 }
Beispiel #6
0
  @Test
  public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            settingsBuilder()
                .put(
                    SETTING_NUMBER_OF_SHARDS,
                    between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
                .put(SETTING_NUMBER_OF_REPLICAS, 0))
        .get();
    ensureGreen();

    MoveAllocationCommand moveAllocationCommand = getAllocationCommand();

    assertAcked(
        client().admin().cluster().prepareReroute().setDryRun(true).add(moveAllocationCommand));

    // testing only on master with the latest cluster state as we didn't make any change thus we
    // cannot guarantee that
    // all nodes hold the same cluster state version. We only know there was no need to change
    // anything, thus no need for ack on this update.
    ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
    boolean found = false;
    for (ShardRouting shardRouting :
        clusterStateResponse
            .getState()
            .getRoutingNodes()
            .routingNodeIter(moveAllocationCommand.fromNode())) {
      // the shard that we wanted to move is still on the same node, as we had dryRun flag
      if (shardRouting.shardId().equals(moveAllocationCommand.shardId())) {
        assertThat(shardRouting.started(), equalTo(true));
        found = true;
        break;
      }
    }
    assertThat(found, equalTo(true));

    for (ShardRouting shardRouting :
        clusterStateResponse
            .getState()
            .getRoutingNodes()
            .routingNodeIter(moveAllocationCommand.toNode())) {
      if (shardRouting.shardId().equals(moveAllocationCommand.shardId())) {
        fail(
            "shard ["
                + shardRouting
                + "] shouldn't be on node ["
                + moveAllocationCommand.toString()
                + "]");
      }
    }
  }
  public void testFastCloseAfterCreateContinuesCreateAfterOpen() {
    logger.info("--> creating test index that cannot be allocated");
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            Settings.settingsBuilder()
                .put("index.routing.allocation.include.tag", "no_such_node")
                .build())
        .get();

    ClusterHealthResponse health =
        client().admin().cluster().prepareHealth("test").setWaitForNodes(">=2").get();
    assertThat(health.isTimedOut(), equalTo(false));
    assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED));

    client().admin().indices().prepareClose("test").get();

    logger.info("--> updating test index settings to allow allocation");
    client()
        .admin()
        .indices()
        .prepareUpdateSettings("test")
        .setSettings(
            Settings.settingsBuilder().put("index.routing.allocation.include.tag", "").build())
        .get();

    client().admin().indices().prepareOpen("test").get();

    logger.info("--> waiting for green status");
    ensureGreen();

    NumShards numShards = getNumShards("test");

    ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        stateResponse.getState().metaData().index("test").getState(),
        equalTo(IndexMetaData.State.OPEN));
    assertThat(
        stateResponse.getState().routingTable().index("test").shards().size(),
        equalTo(numShards.numPrimaries));
    assertThat(
        stateResponse
            .getState()
            .routingTable()
            .index("test")
            .shardsWithState(ShardRoutingState.STARTED)
            .size(),
        equalTo(numShards.totalNumShards));

    logger.info("--> indexing a simple document");
    client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
  }
  private Table buildTable(RestRequest request, ClusterStateResponse state) {
    Table table = getTableWithHeader(request);
    DiscoveryNodes nodes = state.getState().nodes();

    table.startRow();
    DiscoveryNode master = nodes.get(nodes.masterNodeId());
    if (master == null) {
      table.addCell("-");
      table.addCell("-");
      table.addCell("-");
      table.addCell("-");
    } else {
      table.addCell(master.getId());
      table.addCell(master.getHostName());
      table.addCell(master.getHostAddress());
      table.addCell(master.getName());
    }
    table.endRow();

    return table;
  }
  @Test
  @Slow
  public void testSnapshotOperations() throws Exception {
    startNode("server1", getClassDefaultSettings());

    // get the environment, so we can clear the work dir when needed
    Environment environment =
        ((InternalNode) node("server1")).injector().getInstance(Environment.class);

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    // Translog tests

    logger.info("Creating index [{}]", "test");
    client("server1").admin().indices().prepareCreate("test").execute().actionGet();

    // create a mapping
    PutMappingResponse putMappingResponse =
        client("server1")
            .admin()
            .indices()
            .preparePutMapping("test")
            .setType("type1")
            .setSource(mappingSource())
            .execute()
            .actionGet();
    assertThat(putMappingResponse.isAcknowledged(), equalTo(true));

    // verify that mapping is there
    ClusterStateResponse clusterState =
        client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    // create two and delete the first
    logger.info("Indexing #1");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")))
        .actionGet();
    logger.info("Indexing #2");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test")))
        .actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Deleting #1");
    client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    // do it again, it should be a no op
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (only translog should be populated)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    // verify that mapping is there
    clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    logger.info("Getting #1, should not exists");
    GetResponse getResponse =
        client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));

    // Now flush and add some data (so we have index recovery as well)
    logger.info(
        "Flushing, so we have actual content in the index files (#2 should be in the index)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();
    logger.info("Indexing #3, so we have something in the translog as well");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test")))
        .actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) and reuse work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Closing the server");
    closeNode("server1");
    logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
    FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info(
        "Flushing, so we have actual content in the index files (#3 should be in the index now as well)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (just from the index, nothing in the translog)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Deleting the index");
    client("server1").admin().indices().delete(deleteIndexRequest("test")).actionGet();
  }
  private Table buildTable(
      RestRequest req,
      ClusterStateResponse state,
      NodesInfoResponse nodesInfo,
      NodesStatsResponse nodesStats) {
    boolean fullId = req.paramAsBoolean("full_id", false);

    DiscoveryNodes nodes = state.getState().nodes();
    String masterId = nodes.masterNodeId();
    Table table = getTableWithHeader(req);

    for (DiscoveryNode node : nodes) {
      NodeInfo info = nodesInfo.getNodesMap().get(node.id());
      NodeStats stats = nodesStats.getNodesMap().get(node.id());

      JvmInfo jvmInfo = info == null ? null : info.getJvm();
      JvmStats jvmStats = stats == null ? null : stats.getJvm();
      FsInfo fsInfo = stats == null ? null : stats.getFs();
      OsStats osStats = stats == null ? null : stats.getOs();
      ProcessStats processStats = stats == null ? null : stats.getProcess();
      NodeIndicesStats indicesStats = stats == null ? null : stats.getIndices();

      table.startRow();

      table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4));
      table.addCell(info == null ? null : info.getProcess().getId());
      table.addCell(node.getHostName());
      table.addCell(node.getHostAddress());
      if (node.address() instanceof InetSocketTransportAddress) {
        table.addCell(((InetSocketTransportAddress) node.address()).address().getPort());
      } else {
        table.addCell("-");
      }

      table.addCell(node.getVersion().number());
      table.addCell(info == null ? null : info.getBuild().shortHash());
      table.addCell(jvmInfo == null ? null : jvmInfo.version());
      table.addCell(fsInfo == null ? null : fsInfo.getTotal().getAvailable());
      table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsed());
      table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsedPercent());
      table.addCell(jvmInfo == null ? null : jvmInfo.getMem().getHeapMax());
      table.addCell(
          osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsed());
      table.addCell(
          osStats == null
              ? null
              : osStats.getMem() == null ? null : osStats.getMem().getUsedPercent());
      table.addCell(
          osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getTotal());
      table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors());
      table.addCell(
          processStats == null
              ? null
              : calculatePercentage(
                  processStats.getOpenFileDescriptors(), processStats.getMaxFileDescriptors()));
      table.addCell(processStats == null ? null : processStats.getMaxFileDescriptors());

      table.addCell(
          osStats == null ? null : String.format(Locale.ROOT, "%.2f", osStats.getLoadAverage()));
      table.addCell(jvmStats == null ? null : jvmStats.getUptime());
      table.addCell(node.clientNode() ? "c" : node.dataNode() ? "d" : "-");
      table.addCell(
          masterId == null
              ? "x"
              : masterId.equals(node.id()) ? "*" : node.masterNode() ? "m" : "-");
      table.addCell(node.name());

      CompletionStats completionStats =
          indicesStats == null ? null : stats.getIndices().getCompletion();
      table.addCell(completionStats == null ? null : completionStats.getSize());

      FieldDataStats fdStats = indicesStats == null ? null : stats.getIndices().getFieldData();
      table.addCell(fdStats == null ? null : fdStats.getMemorySize());
      table.addCell(fdStats == null ? null : fdStats.getEvictions());

      QueryCacheStats fcStats = indicesStats == null ? null : indicesStats.getQueryCache();
      table.addCell(fcStats == null ? null : fcStats.getMemorySize());
      table.addCell(fcStats == null ? null : fcStats.getEvictions());

      RequestCacheStats qcStats = indicesStats == null ? null : indicesStats.getRequestCache();
      table.addCell(qcStats == null ? null : qcStats.getMemorySize());
      table.addCell(qcStats == null ? null : qcStats.getEvictions());
      table.addCell(qcStats == null ? null : qcStats.getHitCount());
      table.addCell(qcStats == null ? null : qcStats.getMissCount());

      FlushStats flushStats = indicesStats == null ? null : indicesStats.getFlush();
      table.addCell(flushStats == null ? null : flushStats.getTotal());
      table.addCell(flushStats == null ? null : flushStats.getTotalTime());

      GetStats getStats = indicesStats == null ? null : indicesStats.getGet();
      table.addCell(getStats == null ? null : getStats.current());
      table.addCell(getStats == null ? null : getStats.getTime());
      table.addCell(getStats == null ? null : getStats.getCount());
      table.addCell(getStats == null ? null : getStats.getExistsTime());
      table.addCell(getStats == null ? null : getStats.getExistsCount());
      table.addCell(getStats == null ? null : getStats.getMissingTime());
      table.addCell(getStats == null ? null : getStats.getMissingCount());

      IndexingStats indexingStats = indicesStats == null ? null : indicesStats.getIndexing();
      table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCurrent());
      table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteTime());
      table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCount());
      table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCurrent());
      table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexTime());
      table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCount());
      table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexFailedCount());

      MergeStats mergeStats = indicesStats == null ? null : indicesStats.getMerge();
      table.addCell(mergeStats == null ? null : mergeStats.getCurrent());
      table.addCell(mergeStats == null ? null : mergeStats.getCurrentNumDocs());
      table.addCell(mergeStats == null ? null : mergeStats.getCurrentSize());
      table.addCell(mergeStats == null ? null : mergeStats.getTotal());
      table.addCell(mergeStats == null ? null : mergeStats.getTotalNumDocs());
      table.addCell(mergeStats == null ? null : mergeStats.getTotalSize());
      table.addCell(mergeStats == null ? null : mergeStats.getTotalTime());

      PercolateStats percolateStats = indicesStats == null ? null : indicesStats.getPercolate();
      table.addCell(percolateStats == null ? null : percolateStats.getCurrent());
      table.addCell(percolateStats == null ? null : percolateStats.getMemorySize());
      table.addCell(percolateStats == null ? null : percolateStats.getNumQueries());
      table.addCell(percolateStats == null ? null : percolateStats.getTime());
      table.addCell(percolateStats == null ? null : percolateStats.getCount());

      RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh();
      table.addCell(refreshStats == null ? null : refreshStats.getTotal());
      table.addCell(refreshStats == null ? null : refreshStats.getTotalTime());

      ScriptStats scriptStats = stats == null ? null : stats.getScriptStats();
      table.addCell(scriptStats == null ? null : scriptStats.getCompilations());
      table.addCell(scriptStats == null ? null : scriptStats.getCacheEvictions());

      SearchStats searchStats = indicesStats == null ? null : indicesStats.getSearch();
      table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCurrent());
      table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchTime());
      table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCount());
      table.addCell(searchStats == null ? null : searchStats.getOpenContexts());
      table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent());
      table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime());
      table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount());
      table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent());
      table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime());
      table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount());

      SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments();
      table.addCell(segmentsStats == null ? null : segmentsStats.getCount());
      table.addCell(segmentsStats == null ? null : segmentsStats.getMemory());
      table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMemory());
      table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMaxMemory());
      table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory());
      table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory());

      SuggestStats suggestStats = indicesStats == null ? null : indicesStats.getSuggest();
      table.addCell(suggestStats == null ? null : suggestStats.getCurrent());
      table.addCell(suggestStats == null ? null : suggestStats.getTime());
      table.addCell(suggestStats == null ? null : suggestStats.getCount());

      table.endRow();
    }

    return table;
  }
  private Table buildTable(
      RestRequest req,
      ClusterStateResponse state,
      NodesInfoResponse nodesInfo,
      NodesStatsResponse nodesStats) {
    final String[] threadPools = req.paramAsStringArray("thread_pool_patterns", new String[] {"*"});
    final DiscoveryNodes nodes = state.getState().nodes();
    final Table table = getTableWithHeader(req);

    // collect all thread pool names that we see across the nodes
    final Set<String> candidates = new HashSet<>();
    for (final NodeStats nodeStats : nodesStats.getNodes()) {
      for (final ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) {
        candidates.add(threadPoolStats.getName());
      }
    }

    // collect all thread pool names that match the specified thread pool patterns
    final Set<String> included = new HashSet<>();
    for (final String candidate : candidates) {
      if (Regex.simpleMatch(threadPools, candidate)) {
        included.add(candidate);
      }
    }

    for (final DiscoveryNode node : nodes) {
      final NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
      final NodeStats stats = nodesStats.getNodesMap().get(node.getId());

      final Map<String, ThreadPoolStats.Stats> poolThreadStats;
      final Map<String, ThreadPool.Info> poolThreadInfo;

      if (stats == null) {
        poolThreadStats = Collections.emptyMap();
        poolThreadInfo = Collections.emptyMap();
      } else {
        // we use a sorted map to ensure that thread pools are sorted by name
        poolThreadStats = new TreeMap<>();
        poolThreadInfo = new HashMap<>();

        ThreadPoolStats threadPoolStats = stats.getThreadPool();
        for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) {
          poolThreadStats.put(threadPoolStat.getName(), threadPoolStat);
        }
        if (info != null) {
          for (ThreadPool.Info threadPoolInfo : info.getThreadPool()) {
            poolThreadInfo.put(threadPoolInfo.getName(), threadPoolInfo);
          }
        }
      }
      for (Map.Entry<String, ThreadPoolStats.Stats> entry : poolThreadStats.entrySet()) {

        if (!included.contains(entry.getKey())) continue;

        table.startRow();

        table.addCell(node.getName());
        table.addCell(node.getId());
        table.addCell(node.getEphemeralId());
        table.addCell(info == null ? null : info.getProcess().getId());
        table.addCell(node.getHostName());
        table.addCell(node.getHostAddress());
        table.addCell(node.getAddress().address().getPort());
        final ThreadPoolStats.Stats poolStats = entry.getValue();
        final ThreadPool.Info poolInfo = poolThreadInfo.get(entry.getKey());

        Long maxQueueSize = null;
        String keepAlive = null;
        Integer minThreads = null;
        Integer maxThreads = null;

        if (poolInfo != null) {
          if (poolInfo.getQueueSize() != null) {
            maxQueueSize = poolInfo.getQueueSize().singles();
          }
          if (poolInfo.getKeepAlive() != null) {
            keepAlive = poolInfo.getKeepAlive().toString();
          }
          if (poolInfo.getMin() >= 0) {
            minThreads = poolInfo.getMin();
          }
          if (poolInfo.getMax() >= 0) {
            maxThreads = poolInfo.getMax();
          }
        }

        table.addCell(entry.getKey());
        table.addCell(poolInfo == null ? null : poolInfo.getThreadPoolType().getType());
        table.addCell(poolStats == null ? null : poolStats.getActive());
        table.addCell(poolStats == null ? null : poolStats.getThreads());
        table.addCell(poolStats == null ? null : poolStats.getQueue());
        table.addCell(maxQueueSize);
        table.addCell(poolStats == null ? null : poolStats.getRejected());
        table.addCell(poolStats == null ? null : poolStats.getLargest());
        table.addCell(poolStats == null ? null : poolStats.getCompleted());
        table.addCell(minThreads);
        table.addCell(maxThreads);
        table.addCell(keepAlive);

        table.endRow();
      }
    }

    return table;
  }
  public void testDisconnectsWhileRecovering() throws Exception {
    final String indexName = "test";
    final Settings nodeSettings =
        Settings.builder()
            .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), "100ms")
            .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), "1s")
            .put(
                MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE_SETTING.getKey(),
                false) // restarted recoveries will delete temp files and write them again
            .build();
    // start a master node
    internalCluster().startNode(nodeSettings);

    final String blueNodeName =
        internalCluster()
            .startNode(Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build());
    final String redNodeName =
        internalCluster()
            .startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build());

    ClusterHealthResponse response =
        client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get();
    assertThat(response.isTimedOut(), is(false));

    client()
        .admin()
        .indices()
        .prepareCreate(indexName)
        .setSettings(
            Settings.builder()
                .put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")
                .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
                .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
        .get();

    List<IndexRequestBuilder> requests = new ArrayList<>();
    int numDocs = scaledRandomIntBetween(25, 250);
    for (int i = 0; i < numDocs; i++) {
      requests.add(client().prepareIndex(indexName, "type").setSource("{}"));
    }
    indexRandom(true, requests);
    ensureSearchable(indexName);

    ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
    final String blueNodeId =
        internalCluster().getInstance(ClusterService.class, blueNodeName).localNode().getId();

    assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty());

    SearchResponse searchResponse = client().prepareSearch(indexName).get();
    assertHitCount(searchResponse, numDocs);

    String[] recoveryActions =
        new String[] {
          PeerRecoverySourceService.Actions.START_RECOVERY,
          PeerRecoveryTargetService.Actions.FILES_INFO,
          PeerRecoveryTargetService.Actions.FILE_CHUNK,
          PeerRecoveryTargetService.Actions.CLEAN_FILES,
          // RecoveryTarget.Actions.TRANSLOG_OPS, <-- may not be sent if already flushed
          PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG,
          PeerRecoveryTargetService.Actions.FINALIZE
        };
    final String recoveryActionToBlock = randomFrom(recoveryActions);
    final boolean dropRequests = randomBoolean();
    logger.info(
        "--> will {} between blue & red on [{}]",
        dropRequests ? "drop requests" : "break connection",
        recoveryActionToBlock);

    MockTransportService blueMockTransportService =
        (MockTransportService) internalCluster().getInstance(TransportService.class, blueNodeName);
    MockTransportService redMockTransportService =
        (MockTransportService) internalCluster().getInstance(TransportService.class, redNodeName);
    TransportService redTransportService =
        internalCluster().getInstance(TransportService.class, redNodeName);
    TransportService blueTransportService =
        internalCluster().getInstance(TransportService.class, blueNodeName);
    final CountDownLatch requestBlocked = new CountDownLatch(1);

    blueMockTransportService.addDelegate(
        redTransportService,
        new RecoveryActionBlocker(
            dropRequests,
            recoveryActionToBlock,
            blueMockTransportService.original(),
            requestBlocked));
    redMockTransportService.addDelegate(
        blueTransportService,
        new RecoveryActionBlocker(
            dropRequests,
            recoveryActionToBlock,
            redMockTransportService.original(),
            requestBlocked));

    logger.info("--> starting recovery from blue to red");
    client()
        .admin()
        .indices()
        .prepareUpdateSettings(indexName)
        .setSettings(
            Settings.builder()
                .put(
                    IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color",
                    "red,blue")
                .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))
        .get();

    requestBlocked.await();

    logger.info("--> stopping to block recovery");
    blueMockTransportService.clearAllRules();
    redMockTransportService.clearAllRules();

    ensureGreen();
    searchResponse = client(redNodeName).prepareSearch(indexName).setPreference("_local").get();
    assertHitCount(searchResponse, numDocs);
  }
  @Test
  public void testMetaWrittenWhenIndexIsClosed() throws Exception {
    String masterNode = startMasterNode();
    String redNodeDataPath = createTempDir().toString();
    String redNode = startDataNode("red", redNodeDataPath);
    String blueNode = startDataNode("blue");
    // create red_index on red_node and same for red
    client()
        .admin()
        .cluster()
        .health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3"))
        .get();
    assertAcked(
        prepareCreate("red_index")
            .setSettings(
                Settings.builder()
                    .put("index.number_of_replicas", 0)
                    .put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")));
    index(
        "red_index",
        "doc",
        "1",
        jsonBuilder().startObject().field("text", "some text").endObject());

    ensureGreen();
    assertIndexNotInMetaState(blueNode, "red_index");
    assertIndexInMetaState(redNode, "red_index");
    assertIndexInMetaState(masterNode, "red_index");

    waitForConcreteMappingsOnAll("red_index", "doc", "text");
    client().admin().indices().prepareClose("red_index").get();
    // close the index
    ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        clusterStateResponse.getState().getMetaData().index("red_index").getState().name(),
        equalTo(IndexMetaData.State.CLOSE.name()));

    // restart master with empty data folder and maybe red node
    boolean restartRedNode = randomBoolean();
    // at this point the red_index on red node
    if (restartRedNode) {
      stopNode(redNode);
    }
    ((InternalTestCluster) cluster()).stopCurrentMasterNode();
    masterNode = startMasterNode();
    if (restartRedNode) {
      redNode = startDataNode("red", redNodeDataPath);
    }

    ensureGreen("red_index");
    assertIndexNotInMetaState(blueNode, "red_index");
    assertIndexInMetaState(redNode, "red_index");
    assertIndexInMetaState(masterNode, "red_index");
    clusterStateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        clusterStateResponse.getState().getMetaData().index("red_index").getState().name(),
        equalTo(IndexMetaData.State.CLOSE.name()));

    // open the index again
    client().admin().indices().prepareOpen("red_index").get();
    clusterStateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        clusterStateResponse.getState().getMetaData().index("red_index").getState().name(),
        equalTo(IndexMetaData.State.OPEN.name()));
    // restart again
    ensureGreen();
    if (restartRedNode) {
      stopNode(redNode);
    }
    ((InternalTestCluster) cluster()).stopCurrentMasterNode();
    masterNode = startMasterNode();
    if (restartRedNode) {
      redNode = startDataNode("red", redNodeDataPath);
    }
    ensureGreen("red_index");
    assertIndexNotInMetaState(blueNode, "red_index");
    assertIndexInMetaState(redNode, "red_index");
    assertIndexInMetaState(masterNode, "red_index");
    clusterStateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        clusterStateResponse.getState().getMetaData().index("red_index").getState().name(),
        equalTo(IndexMetaData.State.OPEN.name()));
    assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists());
  }
  public void testShardActiveElseWhere() throws Exception {
    List<String> nodes = internalCluster().startNodesAsync(2).get();

    final String masterNode = internalCluster().getMasterName();
    final String nonMasterNode = nodes.get(0).equals(masterNode) ? nodes.get(1) : nodes.get(0);

    final String masterId = internalCluster().clusterService(masterNode).localNode().getId();
    final String nonMasterId = internalCluster().clusterService(nonMasterNode).localNode().getId();

    final int numShards = scaledRandomIntBetween(2, 10);
    assertAcked(
        prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
                    .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)));
    ensureGreen("test");

    waitNoPendingTasksOnAll();
    ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
    final Index index = stateResponse.getState().metaData().index("test").getIndex();
    RoutingNode routingNode = stateResponse.getState().getRoutingNodes().node(nonMasterId);
    final int[] node2Shards = new int[routingNode.numberOfOwningShards()];
    int i = 0;
    for (ShardRouting shardRouting : routingNode) {
      node2Shards[i] = shardRouting.shardId().id();
      i++;
    }
    logger.info("Node [{}] has shards: {}", nonMasterNode, Arrays.toString(node2Shards));

    // disable relocations when we do this, to make sure the shards are not relocated from node2
    // due to rebalancing, and delete its content
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(
            Settings.builder()
                .put(
                    EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
                    EnableAllocationDecider.Rebalance.NONE))
        .get();
    internalCluster()
        .getInstance(ClusterService.class, nonMasterNode)
        .submitStateUpdateTask(
            "test",
            new ClusterStateUpdateTask(Priority.IMMEDIATE) {
              @Override
              public ClusterState execute(ClusterState currentState) throws Exception {
                IndexRoutingTable.Builder indexRoutingTableBuilder =
                    IndexRoutingTable.builder(index);
                for (int i = 0; i < numShards; i++) {
                  indexRoutingTableBuilder.addIndexShard(
                      new IndexShardRoutingTable.Builder(new ShardId(index, i))
                          .addShard(
                              TestShardRouting.newShardRouting(
                                  "test", i, masterId, true, ShardRoutingState.STARTED))
                          .build());
                }
                return ClusterState.builder(currentState)
                    .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder).build())
                    .build();
              }

              @Override
              public boolean runOnlyOnMaster() {
                return false;
              }

              @Override
              public void onFailure(String source, Exception e) {}
            });
    waitNoPendingTasksOnAll();
    logger.info("Checking if shards aren't removed");
    for (int shard : node2Shards) {
      assertTrue(waitForShardDeletion(nonMasterNode, index, shard));
    }
  }
  @Test
  public void testNoMasterActions_writeMasterBlock() throws Exception {
    Settings settings =
        settingsBuilder()
            .put("discovery.type", "zen")
            .put("action.auto_create_index", false)
            .put("discovery.zen.minimum_master_nodes", 2)
            .put("discovery.zen.ping_timeout", "200ms")
            .put("discovery.initial_state_timeout", "500ms")
            .put(DiscoverySettings.NO_MASTER_BLOCK, "write")
            .build();

    internalCluster().startNode(settings);
    // start a second node, create an index, and then shut it down so we have no master block
    internalCluster().startNode(settings);
    prepareCreate("test1").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).get();
    prepareCreate("test2")
        .setSettings(
            IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
        .get();
    client().admin().cluster().prepareHealth("_all").setWaitForGreenStatus().get();
    client().prepareIndex("test1", "type1", "1").setSource("field", "value1").get();
    client().prepareIndex("test2", "type1", "1").setSource("field", "value1").get();
    refresh();

    ensureSearchable("test1", "test2");

    ClusterStateResponse clusterState = client().admin().cluster().prepareState().get();
    logger.info("Cluster state:\n" + clusterState.getState().prettyPrint());

    internalCluster().stopRandomDataNode();
    assertThat(
        awaitBusy(
            new Predicate<Object>() {
              public boolean apply(Object o) {
                ClusterState state =
                    client().admin().cluster().prepareState().setLocal(true).get().getState();
                return state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID);
              }
            }),
        equalTo(true));

    GetResponse getResponse = client().prepareGet("test1", "type1", "1").get();
    assertExists(getResponse);

    CountResponse countResponse = client().prepareCount("test1").get();
    assertHitCount(countResponse, 1l);

    SearchResponse searchResponse = client().prepareSearch("test1").get();
    assertHitCount(searchResponse, 1l);

    countResponse = client().prepareCount("test2").get();
    assertThat(countResponse.getTotalShards(), equalTo(2));
    assertThat(countResponse.getSuccessfulShards(), equalTo(1));

    TimeValue timeout = TimeValue.timeValueMillis(200);
    long now = System.currentTimeMillis();
    try {
      client()
          .prepareUpdate("test1", "type1", "1")
          .setDoc("field", "value2")
          .setTimeout(timeout)
          .get();
      fail("Expected ClusterBlockException");
    } catch (ClusterBlockException e) {
      assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
      assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
    }

    now = System.currentTimeMillis();
    try {
      client()
          .prepareIndex("test1", "type1", "1")
          .setSource(XContentFactory.jsonBuilder().startObject().endObject())
          .setTimeout(timeout)
          .get();
      fail("Expected ClusterBlockException");
    } catch (ClusterBlockException e) {
      assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
      assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
    }

    internalCluster().startNode(settings);
    client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
  }
  @Test
  public void testTwoNodesSingleDoc() throws Exception {
    logger.info("--> cleaning nodes");

    logger.info("--> starting 2 nodes");
    internalCluster().startNode(settingsBuilder().put("gateway.type", "local").build());
    internalCluster().startNode(settingsBuilder().put("gateway.type", "local").build());

    logger.info("--> indexing a simple document");
    client()
        .prepareIndex("test", "type1", "1")
        .setSource("field1", "value1")
        .setRefresh(true)
        .execute()
        .actionGet();

    logger.info("--> waiting for green status");
    ClusterHealthResponse health =
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForEvents(Priority.LANGUID)
            .setWaitForGreenStatus()
            .setWaitForNodes("2")
            .execute()
            .actionGet();
    assertThat(health.isTimedOut(), equalTo(false));

    logger.info("--> verify 1 doc in the index");
    for (int i = 0; i < 10; i++) {
      assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
    }

    logger.info("--> closing test index...");
    client().admin().indices().prepareClose("test").execute().actionGet();

    ClusterStateResponse stateResponse =
        client().admin().cluster().prepareState().execute().actionGet();
    assertThat(
        stateResponse.getState().metaData().index("test").state(),
        equalTo(IndexMetaData.State.CLOSE));
    assertThat(stateResponse.getState().routingTable().index("test"), nullValue());

    logger.info("--> opening the index...");
    client().admin().indices().prepareOpen("test").execute().actionGet();

    logger.info("--> waiting for green status");
    health =
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForEvents(Priority.LANGUID)
            .setWaitForGreenStatus()
            .setWaitForNodes("2")
            .execute()
            .actionGet();
    assertThat(health.isTimedOut(), equalTo(false));

    logger.info("--> verify 1 doc in the index");
    assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
    for (int i = 0; i < 10; i++) {
      assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
    }
  }
  public void testSimpleOpenClose() {
    logger.info("--> creating test index");
    createIndex("test");

    logger.info("--> waiting for green status");
    ensureGreen();

    NumShards numShards = getNumShards("test");

    ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        stateResponse.getState().metaData().index("test").getState(),
        equalTo(IndexMetaData.State.OPEN));
    assertThat(
        stateResponse.getState().routingTable().index("test").shards().size(),
        equalTo(numShards.numPrimaries));
    assertThat(
        stateResponse
            .getState()
            .routingTable()
            .index("test")
            .shardsWithState(ShardRoutingState.STARTED)
            .size(),
        equalTo(numShards.totalNumShards));

    logger.info("--> indexing a simple document");
    client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();

    logger.info("--> closing test index...");
    CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").get();
    assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));

    stateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        stateResponse.getState().metaData().index("test").getState(),
        equalTo(IndexMetaData.State.CLOSE));
    assertThat(stateResponse.getState().routingTable().index("test"), nullValue());

    logger.info("--> trying to index into a closed index ...");
    try {
      client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
      fail();
    } catch (IndexClosedException e) {
      // all is well
    }

    logger.info("--> opening index...");
    OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").get();
    assertThat(openIndexResponse.isAcknowledged(), equalTo(true));

    logger.info("--> waiting for green status");
    ensureGreen();

    stateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        stateResponse.getState().metaData().index("test").getState(),
        equalTo(IndexMetaData.State.OPEN));

    assertThat(
        stateResponse.getState().routingTable().index("test").shards().size(),
        equalTo(numShards.numPrimaries));
    assertThat(
        stateResponse
            .getState()
            .routingTable()
            .index("test")
            .shardsWithState(ShardRoutingState.STARTED)
            .size(),
        equalTo(numShards.totalNumShards));

    logger.info("--> indexing a simple document");
    client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
  }
  /**
   * Tests corruption that happens on the network layer and that the primary does not get affected
   * by corruption that happens on the way to the replica. The file on disk stays uncorrupted
   */
  public void testCorruptionOnNetworkLayer() throws ExecutionException, InterruptedException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    internalCluster().ensureAtLeastNumDataNodes(2);
    if (cluster().numDataNodes() < 3) {
      internalCluster()
          .startNode(
              Settings.builder()
                  .put(Node.NODE_DATA_SETTING.getKey(), true)
                  .put(Node.NODE_MASTER_SETTING.getKey(), false));
    }
    NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
    List<NodeStats> dataNodeStats = new ArrayList<>();
    for (NodeStats stat : nodeStats.getNodes()) {
      if (stat.getNode().isDataNode()) {
        dataNodeStats.add(stat);
      }
    }

    assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
    Collections.shuffle(dataNodeStats, random());
    NodeStats primariesNode = dataNodeStats.get(0);
    NodeStats unluckyNode = dataNodeStats.get(1);

    assertAcked(
        prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
                    .put(
                        IndexMetaData.SETTING_NUMBER_OF_SHARDS,
                        between(1, 4)) // don't go crazy here it must recovery fast
                    // This does corrupt files on the replica, so we can't check:
                    .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
                    .put(
                        "index.routing.allocation.include._name", primariesNode.getNode().getName())
                    .put(
                        EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
                        EnableAllocationDecider.Rebalance.NONE)));
    ensureGreen();
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
      builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(
        client()
            .admin()
            .indices()
            .prepareFlush()
            .setForce(true)
            .setWaitIfOngoing(true)
            .execute()
            .actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    final boolean truncate = randomBoolean();
    for (NodeStats dataNode : dataNodeStats) {
      MockTransportService mockTransportService =
          ((MockTransportService)
              internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
      mockTransportService.addDelegate(
          internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()),
          new MockTransportService.DelegateTransport(mockTransportService.original()) {

            @Override
            public void sendRequest(
                DiscoveryNode node,
                long requestId,
                String action,
                TransportRequest request,
                TransportRequestOptions options)
                throws IOException, TransportException {
              if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
                RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
                if (truncate && req.length() > 1) {
                  BytesRef bytesRef = req.content().toBytesRef();
                  BytesArray array =
                      new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1);
                  request =
                      new RecoveryFileChunkRequest(
                          req.recoveryId(),
                          req.shardId(),
                          req.metadata(),
                          req.position(),
                          array,
                          req.lastChunk(),
                          req.totalTranslogOps(),
                          req.sourceThrottleTimeInNanos());
                } else {
                  assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes
                      : "no internal reference!!";
                  final byte[] array = req.content().toBytesRef().bytes;
                  int i = randomIntBetween(0, req.content().length() - 1);
                  array[i] = (byte) ~array[i]; // flip one byte in the content
                }
              }
              super.sendRequest(node, requestId, action, request, options);
            }
          });
    }

    Settings build =
        Settings.builder()
            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
            .put("index.routing.allocation.include._name", "*")
            .build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    client().admin().cluster().prepareReroute().get();
    ClusterHealthResponse actionGet =
        client()
            .admin()
            .cluster()
            .health(Requests.clusterHealthRequest("test").waitForGreenStatus())
            .actionGet();
    if (actionGet.isTimedOut()) {
      logger.info(
          "ensureGreen timed out, cluster state:\n{}\n{}",
          client().admin().cluster().prepareState().get().getState().prettyPrint(),
          client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
      assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
    }
    // we are green so primaries got not corrupted.
    // ensure that no shard is actually allocated on the unlucky node
    ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
    for (IndexShardRoutingTable table :
        clusterStateResponse.getState().getRoutingTable().index("test")) {
      for (ShardRouting routing : table) {
        if (unluckyNode.getNode().getId().equals(routing.currentNodeId())) {
          assertThat(routing.state(), not(equalTo(ShardRoutingState.STARTED)));
          assertThat(routing.state(), not(equalTo(ShardRoutingState.RELOCATING)));
        }
      }
    }
    final int numIterations = scaledRandomIntBetween(5, 20);
    for (int i = 0; i < numIterations; i++) {
      SearchResponse response = client().prepareSearch().setSize(numDocs).get();
      assertHitCount(response, numDocs);
    }
  }
  @Test
  public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception {
    String masterNode = startMasterNode();
    String redNodeDataPath = createTempDir().toString();
    String redNode = startDataNode("red", redNodeDataPath);
    // create red_index on red_node and same for red
    client()
        .admin()
        .cluster()
        .health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2"))
        .get();
    assertAcked(
        prepareCreate("red_index")
            .setSettings(
                Settings.builder()
                    .put("index.number_of_replicas", 0)
                    .put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")));
    index(
        "red_index",
        "doc",
        "1",
        jsonBuilder().startObject().field("text", "some text").endObject());

    logger.info("--> wait for green red_index");
    ensureGreen();
    logger.info("--> wait for meta state written for red_index");
    assertIndexInMetaState(redNode, "red_index");
    assertIndexInMetaState(masterNode, "red_index");

    waitForConcreteMappingsOnAll("red_index", "doc", "text");

    logger.info("--> close red_index");
    client().admin().indices().prepareClose("red_index").get();
    // close the index
    ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        clusterStateResponse.getState().getMetaData().index("red_index").getState().name(),
        equalTo(IndexMetaData.State.CLOSE.name()));

    logger.info("--> restart red node");
    stopNode(redNode);
    redNode = startDataNode("red", redNodeDataPath);
    client()
        .admin()
        .indices()
        .preparePutMapping("red_index")
        .setType("doc")
        .setSource(
            jsonBuilder()
                .startObject()
                .startObject("properties")
                .startObject("integer_field")
                .field("type", "integer")
                .endObject()
                .endObject()
                .endObject())
        .get();

    GetMappingsResponse getMappingsResponse =
        client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get();
    assertNotNull(
        ((LinkedHashMap)
                (getMappingsResponse
                    .getMappings()
                    .get("red_index")
                    .get("doc")
                    .getSourceAsMap()
                    .get("properties")))
            .get("integer_field"));
    // restart master with empty data folder and maybe red node
    ((InternalTestCluster) cluster()).stopCurrentMasterNode();
    masterNode = startMasterNode();

    ensureGreen("red_index");
    assertIndexInMetaState(redNode, "red_index");
    assertIndexInMetaState(masterNode, "red_index");
    clusterStateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        clusterStateResponse.getState().getMetaData().index("red_index").getState().name(),
        equalTo(IndexMetaData.State.CLOSE.name()));
    getMappingsResponse =
        client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get();
    assertNotNull(
        ((LinkedHashMap)
                (getMappingsResponse
                    .getMappings()
                    .get("red_index")
                    .get("doc")
                    .getSourceAsMap()
                    .get("properties")))
            .get("integer_field"));
  }
  @Test
  public void testSimpleOpenClose() throws Exception {

    logger.info("--> starting 2 nodes");
    internalCluster()
        .startNodesAsync(2, settingsBuilder().put("gateway.type", "local").build())
        .get();

    logger.info("--> creating test index");
    createIndex("test");

    NumShards test = getNumShards("test");

    logger.info("--> waiting for green status");
    ensureGreen();

    ClusterStateResponse stateResponse =
        client().admin().cluster().prepareState().execute().actionGet();
    assertThat(
        stateResponse.getState().metaData().index("test").state(),
        equalTo(IndexMetaData.State.OPEN));
    assertThat(
        stateResponse.getState().routingTable().index("test").shards().size(),
        equalTo(test.numPrimaries));
    assertThat(
        stateResponse
            .getState()
            .routingTable()
            .index("test")
            .shardsWithState(ShardRoutingState.STARTED)
            .size(),
        equalTo(test.totalNumShards));

    logger.info("--> indexing a simple document");
    client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();

    logger.info("--> closing test index...");
    client().admin().indices().prepareClose("test").execute().actionGet();

    stateResponse = client().admin().cluster().prepareState().execute().actionGet();
    assertThat(
        stateResponse.getState().metaData().index("test").state(),
        equalTo(IndexMetaData.State.CLOSE));
    assertThat(stateResponse.getState().routingTable().index("test"), nullValue());

    logger.info("--> verifying that the state is green");
    ensureGreen();

    logger.info("--> trying to index into a closed index ...");
    try {
      client()
          .prepareIndex("test", "type1", "1")
          .setSource("field1", "value1")
          .setTimeout("1s")
          .execute()
          .actionGet();
      fail();
    } catch (IndexClosedException e) {
      // all is well
    }

    logger.info("--> creating another index (test2) by indexing into it");
    client()
        .prepareIndex("test2", "type1", "1")
        .setSource("field1", "value1")
        .execute()
        .actionGet();
    logger.info("--> verifying that the state is green");
    ensureGreen();

    logger.info("--> opening the first index again...");
    client().admin().indices().prepareOpen("test").execute().actionGet();

    logger.info("--> verifying that the state is green");
    ensureGreen();

    stateResponse = client().admin().cluster().prepareState().execute().actionGet();
    assertThat(
        stateResponse.getState().metaData().index("test").state(),
        equalTo(IndexMetaData.State.OPEN));
    assertThat(
        stateResponse.getState().routingTable().index("test").shards().size(),
        equalTo(test.numPrimaries));
    assertThat(
        stateResponse
            .getState()
            .routingTable()
            .index("test")
            .shardsWithState(ShardRoutingState.STARTED)
            .size(),
        equalTo(test.totalNumShards));

    logger.info("--> trying to get the indexed document on the first index");
    GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
    assertThat(getResponse.isExists(), equalTo(true));

    logger.info("--> closing test index...");
    client().admin().indices().prepareClose("test").execute().actionGet();
    stateResponse = client().admin().cluster().prepareState().execute().actionGet();
    assertThat(
        stateResponse.getState().metaData().index("test").state(),
        equalTo(IndexMetaData.State.CLOSE));
    assertThat(stateResponse.getState().routingTable().index("test"), nullValue());

    logger.info("--> restarting nodes...");
    internalCluster().fullRestart();
    logger.info("--> waiting for two nodes and green status");
    ensureGreen();

    stateResponse = client().admin().cluster().prepareState().execute().actionGet();
    assertThat(
        stateResponse.getState().metaData().index("test").state(),
        equalTo(IndexMetaData.State.CLOSE));
    assertThat(stateResponse.getState().routingTable().index("test"), nullValue());

    logger.info("--> trying to index into a closed index ...");
    try {
      client()
          .prepareIndex("test", "type1", "1")
          .setSource("field1", "value1")
          .setTimeout("1s")
          .execute()
          .actionGet();
      fail();
    } catch (IndexClosedException e) {
      // all is well
    }

    logger.info("--> opening index...");
    client().admin().indices().prepareOpen("test").execute().actionGet();

    logger.info("--> waiting for green status");
    ensureGreen();

    stateResponse = client().admin().cluster().prepareState().execute().actionGet();
    assertThat(
        stateResponse.getState().metaData().index("test").state(),
        equalTo(IndexMetaData.State.OPEN));
    assertThat(
        stateResponse.getState().routingTable().index("test").shards().size(),
        equalTo(test.numPrimaries));
    assertThat(
        stateResponse
            .getState()
            .routingTable()
            .index("test")
            .shardsWithState(ShardRoutingState.STARTED)
            .size(),
        equalTo(test.totalNumShards));

    logger.info(
        "--> trying to get the indexed document on the first round (before close and shutdown)");
    getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
    assertThat(getResponse.isExists(), equalTo(true));

    logger.info("--> indexing a simple document");
    client().prepareIndex("test", "type1", "2").setSource("field1", "value1").execute().actionGet();
  }