コード例 #1
0
 private Decision shouldIndexFilter(
     IndexMetaData indexMd, RoutingNode node, RoutingAllocation allocation) {
   if (indexMd.requireFilters() != null) {
     if (!indexMd.requireFilters().match(node.node())) {
       return allocation.decision(
           Decision.NO,
           NAME,
           "node does not match index setting [%s] filters [%s]",
           IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX,
           indexMd.requireFilters());
     }
   }
   if (indexMd.includeFilters() != null) {
     if (!indexMd.includeFilters().match(node.node())) {
       return allocation.decision(
           Decision.NO,
           NAME,
           "node does not match index setting [%s] filters [%s]",
           IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_PREFIX,
           indexMd.includeFilters());
     }
   }
   if (indexMd.excludeFilters() != null) {
     if (indexMd.excludeFilters().match(node.node())) {
       return allocation.decision(
           Decision.NO,
           NAME,
           "node matches index setting [%s] filters [%s]",
           IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey(),
           indexMd.excludeFilters());
     }
   }
   return null;
 }
コード例 #2
0
 /**
  * This test ensures that for an unassigned primary shard that has a valid shard copy on at least
  * one node, we will force allocate the primary shard to one of those nodes, even if the
  * allocation deciders all return a NO decision to allocate.
  */
 public void testForceAllocatePrimaryOnNoDecision() throws Exception {
   logger.info("--> starting 1 node");
   final String node = internalCluster().startNode();
   logger.info("--> creating index with 1 primary and 0 replicas");
   final String indexName = "test-idx";
   assertAcked(
       client()
           .admin()
           .indices()
           .prepareCreate(indexName)
           .setSettings(
               Settings.builder()
                   .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
                   .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0))
           .get());
   logger.info("--> update the settings to prevent allocation to the data node");
   assertTrue(
       client()
           .admin()
           .indices()
           .prepareUpdateSettings(indexName)
           .setSettings(
               Settings.builder()
                   .put(
                       IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name", node))
           .get()
           .isAcknowledged());
   logger.info("--> full cluster restart");
   internalCluster().fullRestart();
   logger.info(
       "--> checking that the primary shard is force allocated to the data node despite being blocked by the exclude filter");
   ensureGreen(indexName);
   assertEquals(
       1,
       client()
           .admin()
           .cluster()
           .prepareState()
           .get()
           .getState()
           .routingTable()
           .index(indexName)
           .shardsWithState(ShardRoutingState.STARTED)
           .size());
 }
コード例 #3
0
  public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
    InternalTestCluster.Async<String> masterFuture =
        internalCluster()
            .startNodeAsync(
                Settings.builder()
                    .put(
                        Node.NODE_MASTER_SETTING.getKey(),
                        true,
                        Node.NODE_DATA_SETTING.getKey(),
                        false)
                    .build());
    InternalTestCluster.Async<List<String>> nodesFutures =
        internalCluster()
            .startNodesAsync(
                4,
                Settings.builder()
                    .put(
                        Node.NODE_MASTER_SETTING.getKey(),
                        false,
                        Node.NODE_DATA_SETTING.getKey(),
                        true)
                    .build());

    final String masterNode = masterFuture.get();
    final String node1 = nodesFutures.get().get(0);
    final String node2 = nodesFutures.get().get(1);
    final String node3 = nodesFutures.get().get(2);
    // we will use this later on, handy to start now to make sure it has a different data folder
    // that node 1,2 &3
    final String node4 = nodesFutures.get().get(3);

    assertAcked(
        prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put(indexSettings())
                    .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
                    .put(
                        IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name",
                        node4)));
    assertFalse(
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForNoRelocatingShards(true)
            .setWaitForGreenStatus()
            .setWaitForNodes("5")
            .get()
            .isTimedOut());

    // disable allocation to control the situation more easily
    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(
                Settings.builder()
                    .put(
                        EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(),
                        "none")));

    logger.debug("--> shutting down two random nodes");
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3));
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3));

    logger.debug("--> verifying index is red");
    ClusterHealthResponse health =
        client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
    if (health.getStatus() != ClusterHealthStatus.RED) {
      logClusterState();
      fail("cluster didn't become red, despite of shutting 2 of 3 nodes");
    }

    logger.debug("--> allowing index to be assigned to node [{}]", node4);
    assertAcked(
        client()
            .admin()
            .indices()
            .prepareUpdateSettings("test")
            .setSettings(
                Settings.builder()
                    .put(
                        IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name",
                        "NONE")));

    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(
                Settings.builder()
                    .put(
                        EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(),
                        "all")));

    logger.debug("--> waiting for shards to recover on [{}]", node4);
    // we have to do this in two steps as we now do async shard fetching before assigning, so the
    // change to the
    // allocation filtering may not have immediate effect
    // TODO: we should add an easier to do this. It's too much of a song and dance..
    Index index = resolveIndex("test");
    assertBusy(
        new Runnable() {
          @Override
          public void run() {
            assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index));
          }
        });

    // wait for 4 active shards - we should have lost one shard
    assertFalse(
        client().admin().cluster().prepareHealth().setWaitForActiveShards(4).get().isTimedOut());

    // disable allocation again to control concurrency a bit and allow shard active to kick in
    // before allocation
    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(
                Settings.builder()
                    .put(
                        EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(),
                        "none")));

    logger.debug("--> starting the two old nodes back");

    internalCluster()
        .startNodesAsync(
            2,
            Settings.builder()
                .put(
                    Node.NODE_MASTER_SETTING.getKey(), false, Node.NODE_DATA_SETTING.getKey(), true)
                .build());

    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut());

    assertAcked(
        client()
            .admin()
            .cluster()
            .prepareUpdateSettings()
            .setTransientSettings(
                Settings.builder()
                    .put(
                        EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(),
                        "all")));

    logger.debug("--> waiting for the lost shard to be recovered");

    ensureGreen("test");
  }