/** Returns the changes comparing this nodes to the provided nodes. */
 public Delta delta(DiscoveryNodes other) {
   List<DiscoveryNode> removed = newArrayList();
   List<DiscoveryNode> added = newArrayList();
   for (DiscoveryNode node : other) {
     if (!this.nodeExists(node.id())) {
       removed.add(node);
     }
   }
   for (DiscoveryNode node : this) {
     if (!other.nodeExists(node.id())) {
       added.add(node);
     }
   }
   DiscoveryNode previousMasterNode = null;
   DiscoveryNode newMasterNode = null;
   if (masterNodeId != null) {
     if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) {
       previousMasterNode = other.masterNode();
       newMasterNode = masterNode();
     }
   }
   return new Delta(
       previousMasterNode,
       newMasterNode,
       localNodeId,
       ImmutableList.copyOf(removed),
       ImmutableList.copyOf(added));
 }
  private Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData>
      buildShardStores(DiscoveryNodes nodes, MutableShardRouting shard) {
    Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores =
        cachedStores.get(shard.shardId());
    ObjectOpenHashSet<String> nodesIds;
    if (shardStores == null) {
      shardStores = Maps.newHashMap();
      cachedStores.put(shard.shardId(), shardStores);
      nodesIds = ObjectOpenHashSet.from(nodes.dataNodes().keys());
    } else {
      nodesIds = ObjectOpenHashSet.newInstance();
      // clean nodes that have failed
      for (Iterator<DiscoveryNode> it = shardStores.keySet().iterator(); it.hasNext(); ) {
        DiscoveryNode node = it.next();
        if (!nodes.nodeExists(node.id())) {
          it.remove();
        }
      }

      for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
        DiscoveryNode node = cursor.value;
        if (!shardStores.containsKey(node)) {
          nodesIds.add(node.id());
        }
      }
    }

    if (!nodesIds.isEmpty()) {
      String[] nodesIdsArray = nodesIds.toArray(String.class);
      TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData nodesStoreFilesMetaData =
          listShardStoreMetaData
              .list(shard.shardId(), false, nodesIdsArray, listTimeout)
              .actionGet();
      if (logger.isTraceEnabled()) {
        if (nodesStoreFilesMetaData.failures().length > 0) {
          StringBuilder sb =
              new StringBuilder(shard + ": failures when trying to list stores on nodes:");
          for (int i = 0; i < nodesStoreFilesMetaData.failures().length; i++) {
            Throwable cause = ExceptionsHelper.unwrapCause(nodesStoreFilesMetaData.failures()[i]);
            if (cause instanceof ConnectTransportException) {
              continue;
            }
            sb.append("\n    -> ")
                .append(nodesStoreFilesMetaData.failures()[i].getDetailedMessage());
          }
          logger.trace(sb.toString());
        }
      }

      for (TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData nodeStoreFilesMetaData :
          nodesStoreFilesMetaData) {
        if (nodeStoreFilesMetaData.storeFilesMetaData() != null) {
          shardStores.put(
              nodeStoreFilesMetaData.getNode(), nodeStoreFilesMetaData.storeFilesMetaData());
        }
      }
    }

    return shardStores;
  }
  TimedClusterService createTimedClusterService(boolean makeMaster) throws InterruptedException {
    TimedClusterService timedClusterService =
        new TimedClusterService(
            Settings.builder().put("cluster.name", "ClusterServiceTests").build(),
            new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
            threadPool);
    timedClusterService.setLocalNode(
        new DiscoveryNode(
            "node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT));
    timedClusterService.setNodeConnectionsService(
        new NodeConnectionsService(Settings.EMPTY, null, null) {
          @Override
          public void connectToNodes(List<DiscoveryNode> addedNodes) {
            // skip
          }

          @Override
          public void disconnectFromNodes(List<DiscoveryNode> removedNodes) {
            // skip
          }
        });
    timedClusterService.setClusterStatePublisher((event, ackListener) -> {});
    timedClusterService.start();
    ClusterState state = timedClusterService.state();
    final DiscoveryNodes nodes = state.nodes();
    final DiscoveryNodes.Builder nodesBuilder =
        DiscoveryNodes.builder(nodes).masterNodeId(makeMaster ? nodes.getLocalNodeId() : null);
    state =
        ClusterState.builder(state)
            .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
            .nodes(nodesBuilder)
            .build();
    setState(timedClusterService, state);
    return timedClusterService;
  }
 public Builder putAll(DiscoveryNodes nodes) {
   this.masterNodeId = nodes.masterNodeId();
   this.localNodeId = nodes.localNodeId();
   for (DiscoveryNode node : nodes) {
     put(node);
   }
   return this;
 }
  public void verify(
      String repository, String verificationToken, final ActionListener<VerifyResponse> listener) {
    final DiscoveryNodes discoNodes = clusterService.state().nodes();
    final DiscoveryNode localNode = discoNodes.localNode();

    final ObjectContainer<DiscoveryNode> masterAndDataNodes =
        discoNodes.masterAndDataNodes().values();
    final List<DiscoveryNode> nodes = newArrayList();
    for (ObjectCursor<DiscoveryNode> cursor : masterAndDataNodes) {
      DiscoveryNode node = cursor.value;
      Version version = node.getVersion();
      // Verification wasn't supported before v1.4.0 - no reason to send verification request to
      // these nodes
      if (version != null && version.onOrAfter(Version.V_1_4_0)) {
        nodes.add(node);
      }
    }
    final CopyOnWriteArrayList<VerificationFailure> errors = new CopyOnWriteArrayList<>();
    final AtomicInteger counter = new AtomicInteger(nodes.size());
    for (final DiscoveryNode node : nodes) {
      if (node.equals(localNode)) {
        try {
          doVerify(repository, verificationToken);
        } catch (Throwable t) {
          logger.warn("[{}] failed to verify repository", t, repository);
          errors.add(new VerificationFailure(node.id(), ExceptionsHelper.detailedMessage(t)));
        }
        if (counter.decrementAndGet() == 0) {
          finishVerification(listener, nodes, errors);
        }
      } else {
        transportService.sendRequest(
            node,
            ACTION_NAME,
            new VerifyNodeRepositoryRequest(repository, verificationToken),
            new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
              @Override
              public void handleResponse(TransportResponse.Empty response) {
                if (counter.decrementAndGet() == 0) {
                  finishVerification(listener, nodes, errors);
                }
              }

              @Override
              public void handleException(TransportException exp) {
                errors.add(
                    new VerificationFailure(node.id(), ExceptionsHelper.detailedMessage(exp)));
                if (counter.decrementAndGet() == 0) {
                  finishVerification(listener, nodes, errors);
                }
              }
            });
      }
    }
  }
  public void testDisablingDiffPublishing() throws Exception {
    Settings noDiffPublishingSettings =
        Settings.builder()
            .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false)
            .build();

    MockNode nodeA =
        createMockNode(
            "nodeA",
            noDiffPublishingSettings,
            new ClusterStateListener() {
              @Override
              public void clusterChanged(ClusterChangedEvent event) {
                fail("Shouldn't send cluster state to myself");
              }
            });

    MockNode nodeB =
        createMockNode(
            "nodeB",
            noDiffPublishingSettings,
            new ClusterStateListener() {
              @Override
              public void clusterChanged(ClusterChangedEvent event) {
                assertFalse(event.state().wasReadFromDiff());
              }
            });

    // Initial cluster state
    DiscoveryNodes discoveryNodes =
        DiscoveryNodes.builder()
            .add(nodeA.discoveryNode)
            .localNodeId(nodeA.discoveryNode.getId())
            .masterNodeId(nodeA.discoveryNode.getId())
            .build();
    ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build();

    // cluster state update - add nodeB
    discoveryNodes = DiscoveryNodes.builder(discoveryNodes).add(nodeB.discoveryNode).build();
    ClusterState previousClusterState = clusterState;
    clusterState =
        ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
    publishStateAndWait(nodeA.action, clusterState, previousClusterState);

    // cluster state update - add block
    previousClusterState = clusterState;
    clusterState =
        ClusterState.builder(clusterState)
            .blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK))
            .incrementVersion()
            .build();
    publishStateAndWait(nodeA.action, clusterState, previousClusterState);
  }
 public static void writeTo(DiscoveryNodes nodes, StreamOutput out) throws IOException {
   if (nodes.masterNodeId() == null) {
     out.writeBoolean(false);
   } else {
     out.writeBoolean(true);
     out.writeUTF(nodes.masterNodeId);
   }
   out.writeVInt(nodes.size());
   for (DiscoveryNode node : nodes) {
     node.writeTo(out);
   }
 }
 @Override
 public void messageReceived(MasterPingRequest request, TransportChannel channel)
     throws Exception {
   DiscoveryNodes nodes = nodesProvider.nodes();
   // check if we are really the same master as the one we seemed to be think we are
   // this can happen if the master got "kill -9" and then another node started using the same
   // port
   if (!request.masterNodeId.equals(nodes.localNodeId())) {
     throw new ElasticSearchIllegalStateException(
         "Got ping as master with id [" + request.masterNodeId + "], but not master and no id");
   }
   // send a response, and note if we are connected to the master or not
   channel.sendResponse(new MasterPingResponseResponse(nodes.nodeExists(request.nodeId)));
 }
Example #9
0
 /** Returns a set of nodes that have at least one shard of the given index. */
 public synchronized Set<String> nodesInclude(String index) {
   if (clusterService().state().routingTable().hasIndex(index)) {
     List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index);
     DiscoveryNodes discoveryNodes = clusterService().state().getNodes();
     Set<String> nodes = new HashSet<String>();
     for (ShardRouting shardRouting : allShards) {
       if (shardRouting.assignedToNode()) {
         DiscoveryNode discoveryNode = discoveryNodes.get(shardRouting.currentNodeId());
         nodes.add(discoveryNode.getName());
       }
     }
     return nodes;
   }
   return Collections.emptySet();
 }
    public void start() {
      if (scrollId.getContext().length == 0) {
        listener.onFailure(
            new SearchPhaseExecutionException(
                "query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
        return;
      }

      ScrollIdForNode[] context = scrollId.getContext();
      for (int i = 0; i < context.length; i++) {
        ScrollIdForNode target = context[i];
        DiscoveryNode node = nodes.get(target.getNode());
        if (node != null) {
          executePhase(i, node, target.getScrollId());
        } else {
          if (logger.isDebugEnabled()) {
            logger.debug(
                "Node ["
                    + target.getNode()
                    + "] not available for scroll request ["
                    + scrollId.getSource()
                    + "]");
          }
          successfulOps.decrementAndGet();
          if (counter.decrementAndGet() == 0) {
            finishHim();
          }
        }
      }

      for (ScrollIdForNode target : scrollId.getContext()) {
        DiscoveryNode node = nodes.get(target.getNode());
        if (node == null) {
          if (logger.isDebugEnabled()) {
            logger.debug(
                "Node ["
                    + target.getNode()
                    + "] not available for scroll request ["
                    + scrollId.getSource()
                    + "]");
          }
          successfulOps.decrementAndGet();
          if (counter.decrementAndGet() == 0) {
            finishHim();
          }
        }
      }
    }
    @Override
    public void onNodeAck(DiscoveryNode node, @Nullable Exception e) {
      if (!ackedTaskListener.mustAck(node)) {
        // we always wait for the master ack anyway
        if (!node.equals(nodes.getMasterNode())) {
          return;
        }
      }
      if (e == null) {
        logger.trace(
            "ack received from node [{}], cluster_state update (version: {})",
            node,
            clusterStateVersion);
      } else {
        this.lastFailure = e;
        logger.debug(
            (Supplier<?>)
                () ->
                    new ParameterizedMessage(
                        "ack received from node [{}], cluster_state update (version: {})",
                        node,
                        clusterStateVersion),
            e);
      }

      if (countDown.countDown()) {
        logger.trace(
            "all expected nodes acknowledged cluster_state update (version: {})",
            clusterStateVersion);
        FutureUtils.cancel(ackTimeoutCallback);
        ackedTaskListener.onAllNodesAcked(lastFailure);
      }
    }
  @Test
  public void testRoutingTableSerialization() throws Exception {
    MetaData metaData =
        MetaData.builder()
            .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
            .build();

    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();

    DiscoveryNodes nodes =
        DiscoveryNodes.builder()
            .put(newNode("node1"))
            .put(newNode("node2"))
            .put(newNode("node3"))
            .build();

    ClusterState clusterState =
        ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();

    AllocationService strategy = createAllocationService();
    RoutingTable source = strategy.reroute(clusterState).routingTable();

    BytesStreamOutput outStream = new BytesStreamOutput();
    RoutingTable.Builder.writeTo(source, outStream);
    BytesStreamInput inStream = new BytesStreamInput(outStream.bytes().toBytes(), false);
    RoutingTable target = RoutingTable.Builder.readFrom(inStream);

    assertThat(target.prettyPrint(), equalTo(source.prettyPrint()));
  }
  public void testUnexpectedDiffPublishing() throws Exception {
    MockNode nodeA =
        createMockNode(
                "nodeA",
                Settings.EMPTY,
                event -> {
                  fail("Shouldn't send cluster state to myself");
                })
            .setAsMaster();

    MockNode nodeB = createMockNode("nodeB");

    // Initial cluster state with both states - the second node still shouldn't
    // get diff even though it's present in the previous cluster state
    DiscoveryNodes discoveryNodes =
        DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
    ClusterState previousClusterState =
        ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build();
    ClusterState clusterState =
        ClusterState.builder(previousClusterState).incrementVersion().build();
    publishStateAndWait(nodeA.action, clusterState, previousClusterState);
    assertSameStateFromFull(nodeB.clusterState, clusterState);

    // cluster state update - add block
    previousClusterState = clusterState;
    clusterState =
        ClusterState.builder(clusterState)
            .blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK))
            .incrementVersion()
            .build();
    publishStateAndWait(nodeA.action, clusterState, previousClusterState);
    assertSameStateFromDiff(nodeB.clusterState, clusterState);
  }
  private ClusterState createInitialClusterState(AllocationService service) {
    MetaData.Builder metaBuilder = MetaData.builder();
    metaBuilder.put(
        IndexMetaData.builder("idx")
            .settings(settings(Version.CURRENT))
            .numberOfShards(1)
            .numberOfReplicas(0));
    MetaData metaData = metaBuilder.build();
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
    routingTableBuilder.addAsNew(metaData.index("idx"));

    RoutingTable routingTable = routingTableBuilder.build();
    ClusterState clusterState =
        ClusterState.builder(
                org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(
                    Settings.EMPTY))
            .metaData(metaData)
            .routingTable(routingTable)
            .build();
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
            .build();
    RoutingTable prevRoutingTable = routingTable;
    routingTable = service.reroute(clusterState, "reroute").routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertEquals(prevRoutingTable.index("idx").shards().size(), 1);
    assertEquals(prevRoutingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);

    assertEquals(routingTable.index("idx").shards().size(), 1);
    assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
    return clusterState;
  }
  public void testSerializationFailureDuringDiffPublishing() throws Exception {
    MockNode nodeA =
        createMockNode(
                "nodeA",
                Settings.EMPTY,
                new ClusterStateListener() {
                  @Override
                  public void clusterChanged(ClusterChangedEvent event) {
                    fail("Shouldn't send cluster state to myself");
                  }
                })
            .setAsMaster();

    MockNode nodeB = createMockNode("nodeB");

    // Initial cluster state with both states - the second node still shouldn't get
    // diff even though it's present in the previous cluster state
    DiscoveryNodes discoveryNodes =
        DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
    ClusterState previousClusterState =
        ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build();
    ClusterState clusterState =
        ClusterState.builder(previousClusterState).incrementVersion().build();
    publishStateAndWait(nodeA.action, clusterState, previousClusterState);
    assertSameStateFromFull(nodeB.clusterState, clusterState);

    // cluster state update - add block
    previousClusterState = clusterState;
    clusterState =
        ClusterState.builder(clusterState)
            .blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK))
            .incrementVersion()
            .build();

    ClusterState unserializableClusterState =
        new ClusterState(clusterState.version(), clusterState.stateUUID(), clusterState) {
          @Override
          public Diff<ClusterState> diff(ClusterState previousState) {
            return new Diff<ClusterState>() {
              @Override
              public ClusterState apply(ClusterState part) {
                fail("this diff shouldn't be applied");
                return part;
              }

              @Override
              public void writeTo(StreamOutput out) throws IOException {
                throw new IOException("Simulated failure of diff serialization");
              }
            };
          }
        };
    try {
      publishStateAndWait(nodeA.action, unserializableClusterState, previousClusterState);
      fail("cluster state published despite of diff errors");
    } catch (Discovery.FailedToCommitClusterStateException e) {
      assertThat(e.getCause(), notNullValue());
      assertThat(e.getCause().getMessage(), containsString("failed to serialize"));
    }
  }
 List<ClusterState> randomStates(int count, String... masters) {
   ArrayList<ClusterState> states = new ArrayList<>(count);
   ClusterState[] lastClusterStatePerMaster = new ClusterState[masters.length];
   for (; count > 0; count--) {
     int masterIndex = randomInt(masters.length - 1);
     ClusterState state = lastClusterStatePerMaster[masterIndex];
     if (state == null) {
       state =
           ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
               .nodes(
                   DiscoveryNodes.builder()
                       .add(
                           new DiscoveryNode(
                               masters[masterIndex],
                               buildNewFakeTransportAddress(),
                               emptyMap(),
                               emptySet(),
                               Version.CURRENT))
                       .masterNodeId(masters[masterIndex])
                       .build())
               .build();
     } else {
       state = ClusterState.builder(state).incrementVersion().build();
     }
     states.add(state);
     lastClusterStatePerMaster[masterIndex] = state;
   }
   return states;
 }
 private ClusterState clusterStateFromNodes(List<DiscoveryNode> nodes) {
   final DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
   for (DiscoveryNode node : nodes) {
     builder.add(node);
   }
   return ClusterState.builder(new ClusterName("test")).nodes(builder).build();
 }
  public void testFailToPublishWithLessThanMinMasterNodes() throws Exception {
    final int masterNodes = randomIntBetween(1, 10);

    MockNode master = createMockNode("master");
    DiscoveryNodes.Builder discoveryNodesBuilder =
        DiscoveryNodes.builder().add(master.discoveryNode);
    for (int i = 1; i < masterNodes; i++) {
      discoveryNodesBuilder.add(createMockNode("node" + i).discoveryNode);
    }
    final int dataNodes = randomIntBetween(0, 5);
    final Settings dataSettings =
        Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build();
    for (int i = 0; i < dataNodes; i++) {
      discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings, null).discoveryNode);
    }
    discoveryNodesBuilder
        .localNodeId(master.discoveryNode.getId())
        .masterNodeId(master.discoveryNode.getId());
    DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
    MetaData metaData = MetaData.EMPTY_META_DATA;
    ClusterState clusterState =
        ClusterState.builder(CLUSTER_NAME).metaData(metaData).nodes(discoveryNodes).build();
    ClusterState previousState = master.clusterState;
    try {
      publishState(
          master.action, clusterState, previousState, masterNodes + randomIntBetween(1, 5));
      fail("cluster state publishing didn't fail despite of not having enough nodes");
    } catch (Discovery.FailedToCommitClusterStateException expected) {
      logger.debug("failed to publish as expected", expected);
    }
  }
  @Test
  public void testClusterStateSerialization() throws Exception {
    MetaData metaData =
        MetaData.builder()
            .put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
            .build();

    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();

    DiscoveryNodes nodes =
        DiscoveryNodes.builder()
            .put(newNode("node1"))
            .put(newNode("node2"))
            .put(newNode("node3"))
            .localNodeId("node1")
            .masterNodeId("node2")
            .build();

    ClusterState clusterState =
        ClusterState.builder().nodes(nodes).metaData(metaData).routingTable(routingTable).build();

    AllocationService strategy = createAllocationService();
    clusterState =
        ClusterState.builder(clusterState)
            .routingTable(strategy.reroute(clusterState).routingTable())
            .build();

    ClusterState serializedClusterState =
        ClusterState.Builder.fromBytes(
            ClusterState.Builder.toBytes(clusterState), newNode("node1"));

    assertThat(
        serializedClusterState.routingTable().prettyPrint(),
        equalTo(clusterState.routingTable().prettyPrint()));
  }
 private void handleTransportDisconnect(DiscoveryNode node) {
   if (!latestNodes.nodeExists(node.id())) {
     return;
   }
   NodeFD nodeFD = nodesFD.remove(node);
   if (nodeFD == null) {
     return;
   }
   if (!running) {
     return;
   }
   nodeFD.running = false;
   if (connectOnNetworkDisconnect) {
     try {
       transportService.connectToNode(node);
       nodesFD.put(node, new NodeFD());
       threadPool.schedule(pingInterval, ThreadPool.Names.SAME, new SendPingRequest(node));
     } catch (Exception e) {
       logger.trace("[node  ] [{}] transport disconnected (with verified connect)", node);
       notifyNodeFailure(node, "transport disconnected (with verified connect)");
     }
   } else {
     logger.trace("[node  ] [{}] transport disconnected", node);
     notifyNodeFailure(node, "transport disconnected");
   }
 }
 public MockNode resetMasterId() {
   this.clusterState =
       ClusterState.builder(clusterState)
           .nodes(DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(null))
           .build();
   return this;
 }
  private ClusterState addNode(ClusterState clusterState, AllocationService strategy) {
    logger.info(
        "now, start 1 more node, check that rebalancing will happen because we set it to always");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(
                DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node" + numberOfNodes)))
            .build();

    RoutingTable routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    RoutingNodes routingNodes = clusterState.getRoutingNodes();

    // move initializing to started

    RoutingTable prev = routingTable;
    while (true) {
      routingTable =
          strategy
              .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
              .routingTable();
      clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
      routingNodes = clusterState.getRoutingNodes();
      if (routingTable == prev) break;
      prev = routingTable;
    }

    return clusterState;
  }
  @Test
  public void testSingleIndexShardFailed() {
    AllocationService strategy =
        createAllocationService(
            settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());

    logger.info("Building initial routing table");

    MetaData metaData =
        MetaData.builder()
            .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
            .build();

    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();

    ClusterState clusterState =
        ClusterState.builder().metaData(metaData).routingTable(routingTable).build();

    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());

    logger.info("Adding one node and rerouting");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder().put(newNode("node1")))
            .build();
    RoutingTable prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));

    logger.info("Marking the shard as failed");
    RoutingNodes routingNodes = clusterState.routingNodes();
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyFailedShard(
                clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
  }
 public MockNode setAsMaster() {
   this.clusterState =
       ClusterState.builder(clusterState)
           .nodes(
               DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.getId()))
           .build();
   return this;
 }
  public static void main(String[] args) {
    final int numberOfRuns = 1;
    final int numIndices = 5 * 365; // five years
    final int numShards = 6;
    final int numReplicas = 2;
    final int numberOfNodes = 30;
    final int numberOfTags = 2;
    AllocationService strategy =
        ElasticsearchAllocationTestCase.createAllocationService(
            ImmutableSettings.EMPTY, new Random(1));

    MetaData.Builder mb = MetaData.builder();
    for (int i = 1; i <= numIndices; i++) {
      mb.put(
          IndexMetaData.builder("test_" + i)
              .numberOfShards(numShards)
              .numberOfReplicas(numReplicas));
    }
    MetaData metaData = mb.build();
    RoutingTable.Builder rb = RoutingTable.builder();
    for (int i = 1; i <= numIndices; i++) {
      rb.addAsNew(metaData.index("test_" + i));
    }
    RoutingTable routingTable = rb.build();
    DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
    for (int i = 1; i <= numberOfNodes; i++) {
      nb.put(newNode("node" + i, ImmutableMap.of("tag", "tag_" + (i % numberOfTags))));
    }
    ClusterState initialClusterState =
        ClusterState.builder().metaData(metaData).routingTable(routingTable).nodes(nb).build();

    long start = System.currentTimeMillis();
    for (int i = 0; i < numberOfRuns; i++) {
      logger.info("[{}] starting... ", i);
      long runStart = System.currentTimeMillis();
      ClusterState clusterState = initialClusterState;
      while (clusterState.readOnlyRoutingNodes().hasUnassignedShards()) {
        logger.info(
            "[{}] remaining unassigned {}",
            i,
            clusterState.readOnlyRoutingNodes().unassigned().size());
        RoutingAllocation.Result result =
            strategy.applyStartedShards(
                clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING));
        clusterState = ClusterState.builder(clusterState).routingResult(result).build();
        result = strategy.reroute(clusterState);
        clusterState = ClusterState.builder(clusterState).routingResult(result).build();
      }
      logger.info(
          "[{}] took {}", i, TimeValue.timeValueMillis(System.currentTimeMillis() - runStart));
    }
    long took = System.currentTimeMillis() - start;
    logger.info(
        "total took {}, AVG {}",
        TimeValue.timeValueMillis(took),
        TimeValue.timeValueMillis(took / numberOfRuns));
  }
 @Override
 public DiscoveryNodes nodes() {
   DiscoveryNodes latestNodes = this.latestDiscoNodes;
   if (latestNodes != null) {
     return latestNodes;
   }
   // have not decided yet, just send the local node
   return DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()).build();
 }
Example #27
0
 @Override
 protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
   List<String> list = new ArrayList<>();
   for (String node : nodesIds) {
     if (nodes.getDataNodes().containsKey(node)) {
       list.add(node);
     }
   }
   return list.toArray(new String[list.size()]);
 }
 private static ImmutableList<ShardRouting> collectAttributeShards(
     AttributesKey key, DiscoveryNodes nodes, ArrayList<ShardRouting> from) {
   final ArrayList<ShardRouting> to = new ArrayList<>();
   for (final String attribute : key.attributes) {
     final String localAttributeValue = nodes.localNode().attributes().get(attribute);
     if (localAttributeValue != null) {
       for (Iterator<ShardRouting> iterator = from.iterator(); iterator.hasNext(); ) {
         ShardRouting fromShard = iterator.next();
         final DiscoveryNode discoveryNode = nodes.get(fromShard.currentNodeId());
         if (discoveryNode == null) {
           iterator.remove(); // node is not present anymore - ignore shard
         } else if (localAttributeValue.equals(discoveryNode.attributes().get(attribute))) {
           iterator.remove();
           to.add(fromShard);
         }
       }
     }
   }
   return ImmutableList.copyOf(to);
 }
 /** Creates a cluster state with no index */
 public static ClusterState stateWithNoShard() {
   int numberOfNodes = 2;
   DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
   discoBuilder.localNodeId(newNode(0).getId());
   discoBuilder.masterNodeId(newNode(1).getId());
   ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
   state.nodes(discoBuilder);
   state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
   state.routingTable(RoutingTable.builder().build());
   return state.build();
 }
 @Override
 public void messageReceived(MasterPingRequest request, TransportChannel channel)
     throws Exception {
   DiscoveryNodes nodes = nodesProvider.nodes();
   // check if we are really the same master as the one we seemed to be think we are
   // this can happen if the master got "kill -9" and then another node started using the same
   // port
   if (!request.masterNodeId.equals(nodes.localNodeId())) {
     throw new NotMasterException();
   }
   // if we are no longer master, fail...
   if (!nodes.localNodeMaster()) {
     throw new NoLongerMasterException();
   }
   if (!nodes.nodeExists(request.nodeId)) {
     throw new NodeDoesNotExistOnMasterException();
   }
   // send a response, and note if we are connected to the master or not
   channel.sendResponse(new MasterPingResponseResponse(nodes.nodeExists(request.nodeId)));
 }