コード例 #1
0
ファイル: TestCluster.java プロジェクト: pwg17/elasticsearch
 /** Stops the current master node forcefully */
 public synchronized void stopCurrentMasterNode() {
   ensureOpen();
   assert size() > 0;
   String masterNodeName = getMasterName();
   assert nodes.containsKey(masterNodeName);
   logger.info("Closing master node [{}] ", masterNodeName);
   NodeAndClient remove = nodes.remove(masterNodeName);
   remove.close();
 }
コード例 #2
0
ファイル: TestCluster.java プロジェクト: pwg17/elasticsearch
 /** Stops a random node in the cluster. */
 public synchronized void stopRandomNode() {
   ensureOpen();
   NodeAndClient nodeAndClient = getRandomNodeAndClient();
   if (nodeAndClient != null) {
     logger.info("Closing random node [{}] ", nodeAndClient.name);
     nodes.remove(nodeAndClient.name);
     nodeAndClient.close();
   }
 }
コード例 #3
0
ファイル: TestCluster.java プロジェクト: pwg17/elasticsearch
 /** Stops the any of the current nodes but not the master node. */
 public void stopRandomNonMasterNode() {
   NodeAndClient nodeAndClient =
       getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
   if (nodeAndClient != null) {
     logger.info(
         "Closing random non master node [{}] current master [{}] ",
         nodeAndClient.name,
         getMasterName());
     nodes.remove(nodeAndClient.name);
     nodeAndClient.close();
   }
 }
コード例 #4
0
ファイル: TestCluster.java プロジェクト: pwg17/elasticsearch
 /**
  * Stops a random node in the cluster that applies to the given filter or non if the non of the
  * nodes applies to the filter.
  */
 public synchronized void stopRandomNode(final Predicate<Settings> filter) {
   ensureOpen();
   NodeAndClient nodeAndClient =
       getRandomNodeAndClient(
           new Predicate<TestCluster.NodeAndClient>() {
             @Override
             public boolean apply(NodeAndClient nodeAndClient) {
               return filter.apply(nodeAndClient.node.settings());
             }
           });
   if (nodeAndClient != null) {
     logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
     nodes.remove(nodeAndClient.name);
     nodeAndClient.close();
   }
 }
コード例 #5
0
ファイル: TestCluster.java プロジェクト: pwg17/elasticsearch
 /**
  * Ensures that at most <code>n</code> are up and running. If less nodes that <code>n</code> are
  * running this method will not start any additional nodes.
  */
 public synchronized void ensureAtMostNumNodes(int n) {
   if (nodes.size() <= n) {
     return;
   }
   // prevent killing the master if possible
   final Iterator<NodeAndClient> values =
       n == 0
           ? nodes.values().iterator()
           : Iterators.filter(
               nodes.values().iterator(),
               Predicates.not(new MasterNodePredicate(getMasterName())));
   final Iterator<NodeAndClient> limit = Iterators.limit(values, nodes.size() - n);
   logger.info("reducing cluster size from {} to {}", nodes.size() - n, n);
   Set<NodeAndClient> nodesToRemove = new HashSet<NodeAndClient>();
   while (limit.hasNext()) {
     NodeAndClient next = limit.next();
     nodesToRemove.add(next);
     next.close();
   }
   for (NodeAndClient toRemove : nodesToRemove) {
     nodes.remove(toRemove.name);
   }
 }
コード例 #6
0
ファイル: TestCluster.java プロジェクト: pwg17/elasticsearch
 private void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception {
   ensureOpen();
   List<NodeAndClient> toRemove = new ArrayList<TestCluster.NodeAndClient>();
   try {
     for (NodeAndClient nodeAndClient : nodes.values()) {
       if (!callback.doRestart(nodeAndClient.name)) {
         logger.info("Closing node [{}] during restart", nodeAndClient.name);
         toRemove.add(nodeAndClient);
         nodeAndClient.close();
       }
     }
   } finally {
     for (NodeAndClient nodeAndClient : toRemove) {
       nodes.remove(nodeAndClient.name);
     }
   }
   logger.info("Restarting remaining nodes rollingRestart [{}]", rollingRestart);
   if (rollingRestart) {
     int numNodesRestarted = 0;
     for (NodeAndClient nodeAndClient : nodes.values()) {
       callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
       logger.info("Restarting node [{}] ", nodeAndClient.name);
       nodeAndClient.restart(callback);
     }
   } else {
     int numNodesRestarted = 0;
     for (NodeAndClient nodeAndClient : nodes.values()) {
       callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
       logger.info("Stopping node [{}] ", nodeAndClient.name);
       nodeAndClient.node.close();
     }
     for (NodeAndClient nodeAndClient : nodes.values()) {
       logger.info("Starting node [{}] ", nodeAndClient.name);
       nodeAndClient.restart(callback);
     }
   }
 }
コード例 #7
0
ファイル: TestCluster.java プロジェクト: pwg17/elasticsearch
  private synchronized void reset(Random random, boolean wipeData, double transportClientRatio) {
    assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0;
    logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio);
    this.transportClientRatio = transportClientRatio;
    this.random = new Random(random.nextLong());
    resetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */
    if (wipeData) {
      wipeDataDirectories();
    }
    if (nextNodeId.get() == sharedNodesSeeds.length && nodes.size() == sharedNodesSeeds.length) {
      logger.debug(
          "Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]",
          nodes.keySet(),
          nextNodeId.get(),
          sharedNodesSeeds.length);
      return;
    }
    logger.debug(
        "Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]",
        nodes.keySet(),
        nextNodeId.get(),
        sharedNodesSeeds.length);

    Set<NodeAndClient> sharedNodes = new HashSet<NodeAndClient>();
    boolean changed = false;
    for (int i = 0; i < sharedNodesSeeds.length; i++) {
      String buildNodeName = buildNodeName(i);
      NodeAndClient nodeAndClient = nodes.get(buildNodeName);
      if (nodeAndClient == null) {
        changed = true;
        nodeAndClient = buildNode(i, sharedNodesSeeds[i], defaultSettings);
        nodeAndClient.node.start();
        logger.info("Start Shared Node [{}] not shared", nodeAndClient.name);
      }
      sharedNodes.add(nodeAndClient);
    }
    if (!changed && sharedNodes.size() == nodes.size()) {
      logger.debug(
          "Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]",
          nodes.keySet(),
          nextNodeId.get(),
          sharedNodesSeeds.length);
      if (size() > 0) {
        client()
            .admin()
            .cluster()
            .prepareHealth()
            .setWaitForNodes(Integer.toString(sharedNodesSeeds.length))
            .get();
      }
      return; // we are consistent - return
    }
    for (NodeAndClient nodeAndClient : sharedNodes) {
      nodes.remove(nodeAndClient.name);
    }

    // trash the remaining nodes
    final Collection<NodeAndClient> toShutDown = nodes.values();
    for (NodeAndClient nodeAndClient : toShutDown) {
      logger.debug("Close Node [{}] not shared", nodeAndClient.name);
      nodeAndClient.close();
    }
    nodes.clear();
    for (NodeAndClient nodeAndClient : sharedNodes) {
      publishNode(nodeAndClient);
    }
    nextNodeId.set(sharedNodesSeeds.length);
    assert size() == sharedNodesSeeds.length;
    if (size() > 0) {
      client()
          .admin()
          .cluster()
          .prepareHealth()
          .setWaitForNodes(Integer.toString(sharedNodesSeeds.length))
          .get();
    }
    logger.debug(
        "Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]",
        nodes.keySet(),
        nextNodeId.get(),
        sharedNodesSeeds.length);
  }