/** Restarts a random node in the cluster and calls the callback during restart. */ public void restartRandomNode(RestartCallback callback) throws Exception { ensureOpen(); NodeAndClient nodeAndClient = getRandomNodeAndClient(); if (nodeAndClient != null) { logger.info("Restarting random node [{}] ", nodeAndClient.name); nodeAndClient.restart(callback); } }
/** Returns a "smart" node client to a random node in the cluster */ public synchronized Client smartClient() { NodeAndClient randomNodeAndClient = getRandomNodeAndClient(); if (randomNodeAndClient != null) { return randomNodeAndClient.nodeClient(); } Assert.fail("No smart client found"); return null; // can't happen }
/** Stops a random node in the cluster. */ public synchronized void stopRandomNode() { ensureOpen(); NodeAndClient nodeAndClient = getRandomNodeAndClient(); if (nodeAndClient != null) { logger.info("Closing random node [{}] ", nodeAndClient.name); nodes.remove(nodeAndClient.name); nodeAndClient.close(); } }
/** * Ensures that at least <code>n</code> nodes are present in the cluster. if more nodes than * <code>n</code> are present this method will not stop any of the running nodes. */ public synchronized void ensureAtLeastNumNodes(int n) { int size = nodes.size(); for (int i = size; i < n; i++) { logger.info("increasing cluster size from {} to {}", size, n); NodeAndClient buildNode = buildNode(); buildNode.node().start(); publishNode(buildNode); } }
/** Returns a node client to a given node. */ public synchronized Client client(String nodeName) { ensureOpen(); NodeAndClient nodeAndClient = nodes.get(nodeName); if (nodeAndClient != null) { return nodeAndClient.client(random); } Assert.fail("No node found with name: [" + nodeName + "]"); return null; // can't happen }
/** Returns a client to a node started with "node.client: true" */ public synchronized Client clientNodeClient() { ensureOpen(); NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new ClientNodePredicate()); if (randomNodeAndClient != null) { return randomNodeAndClient.client(random); } startNodeClient(ImmutableSettings.EMPTY); return getRandomNodeAndClient(new ClientNodePredicate()).client(random); }
/** Stops the current master node forcefully */ public synchronized void stopCurrentMasterNode() { ensureOpen(); assert size() > 0; String masterNodeName = getMasterName(); assert nodes.containsKey(masterNodeName); logger.info("Closing master node [{}] ", masterNodeName); NodeAndClient remove = nodes.remove(masterNodeName); remove.close(); }
/** * Returns a node client to random node but not the master. This method will fail if no non-master * client is available. */ public synchronized Client nonMasterClient() { ensureOpen(); NodeAndClient randomNodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName()))); if (randomNodeAndClient != null) { return randomNodeAndClient.nodeClient(); // ensure node client non-master is requested } Assert.fail("No non-master client found"); return null; // can't happen }
private synchronized NodeAndClient getOrBuildRandomNode() { ensureOpen(); NodeAndClient randomNodeAndClient = getRandomNodeAndClient(); if (randomNodeAndClient != null) { return randomNodeAndClient; } NodeAndClient buildNode = buildNode(); buildNode.node().start(); publishNode(buildNode); return buildNode; }
/** Stops the any of the current nodes but not the master node. */ public void stopRandomNonMasterNode() { NodeAndClient nodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName()))); if (nodeAndClient != null) { logger.info( "Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName()); nodes.remove(nodeAndClient.name); nodeAndClient.close(); } }
/** * Returns a random node that applies to the given predicate. The predicate can filter nodes based * on the nodes settings. If all nodes are filtered out this method will return <code>null</code> */ public synchronized Client client(final Predicate<Settings> filterPredicate) { ensureOpen(); final NodeAndClient randomNodeAndClient = getRandomNodeAndClient( new Predicate<NodeAndClient>() { @Override public boolean apply(NodeAndClient nodeAndClient) { return filterPredicate.apply(nodeAndClient.node.settings()); } }); if (randomNodeAndClient != null) { return randomNodeAndClient.client(random); } return null; }
/** * Stops a random node in the cluster that applies to the given filter or non if the non of the * nodes applies to the filter. */ public synchronized void stopRandomNode(final Predicate<Settings> filter) { ensureOpen(); NodeAndClient nodeAndClient = getRandomNodeAndClient( new Predicate<TestCluster.NodeAndClient>() { @Override public boolean apply(NodeAndClient nodeAndClient) { return filter.apply(nodeAndClient.node.settings()); } }); if (nodeAndClient != null) { logger.info("Closing filtered random node [{}] ", nodeAndClient.name); nodes.remove(nodeAndClient.name); nodeAndClient.close(); } }
private void publishNode(NodeAndClient nodeAndClient) { assert !nodeAndClient.node().isClosed(); NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, nodeAndClient.node); if (nodeEnv.hasNodeFile()) { dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataLocations())); } nodes.put(nodeAndClient.name, nodeAndClient); }
/** * Ensures that at most <code>n</code> are up and running. If less nodes that <code>n</code> are * running this method will not start any additional nodes. */ public synchronized void ensureAtMostNumNodes(int n) { if (nodes.size() <= n) { return; } // prevent killing the master if possible final Iterator<NodeAndClient> values = n == 0 ? nodes.values().iterator() : Iterators.filter( nodes.values().iterator(), Predicates.not(new MasterNodePredicate(getMasterName()))); final Iterator<NodeAndClient> limit = Iterators.limit(values, nodes.size() - n); logger.info("reducing cluster size from {} to {}", nodes.size() - n, n); Set<NodeAndClient> nodesToRemove = new HashSet<NodeAndClient>(); while (limit.hasNext()) { NodeAndClient next = limit.next(); nodesToRemove.add(next); next.close(); } for (NodeAndClient toRemove : nodesToRemove) { nodes.remove(toRemove.name); } }
private void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception { ensureOpen(); List<NodeAndClient> toRemove = new ArrayList<TestCluster.NodeAndClient>(); try { for (NodeAndClient nodeAndClient : nodes.values()) { if (!callback.doRestart(nodeAndClient.name)) { logger.info("Closing node [{}] during restart", nodeAndClient.name); toRemove.add(nodeAndClient); nodeAndClient.close(); } } } finally { for (NodeAndClient nodeAndClient : toRemove) { nodes.remove(nodeAndClient.name); } } logger.info("Restarting remaining nodes rollingRestart [{}]", rollingRestart); if (rollingRestart) { int numNodesRestarted = 0; for (NodeAndClient nodeAndClient : nodes.values()) { callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient()); logger.info("Restarting node [{}] ", nodeAndClient.name); nodeAndClient.restart(callback); } } else { int numNodesRestarted = 0; for (NodeAndClient nodeAndClient : nodes.values()) { callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient()); logger.info("Stopping node [{}] ", nodeAndClient.name); nodeAndClient.node.close(); } for (NodeAndClient nodeAndClient : nodes.values()) { logger.info("Starting node [{}] ", nodeAndClient.name); nodeAndClient.restart(callback); } } }
private void resetClients() { final Collection<NodeAndClient> nodesAndClients = nodes.values(); for (NodeAndClient nodeAndClient : nodesAndClients) { nodeAndClient.resetClient(); } }
/** Starts a node with the given settings and returns it's name. */ public String startNode(Settings settings) { NodeAndClient buildNode = buildNode(settings); buildNode.node().start(); publishNode(buildNode); return buildNode.name; }
private synchronized void reset(Random random, boolean wipeData, double transportClientRatio) { assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0; logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio); this.transportClientRatio = transportClientRatio; this.random = new Random(random.nextLong()); resetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */ if (wipeData) { wipeDataDirectories(); } if (nextNodeId.get() == sharedNodesSeeds.length && nodes.size() == sharedNodesSeeds.length) { logger.debug( "Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length); return; } logger.debug( "Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length); Set<NodeAndClient> sharedNodes = new HashSet<NodeAndClient>(); boolean changed = false; for (int i = 0; i < sharedNodesSeeds.length; i++) { String buildNodeName = buildNodeName(i); NodeAndClient nodeAndClient = nodes.get(buildNodeName); if (nodeAndClient == null) { changed = true; nodeAndClient = buildNode(i, sharedNodesSeeds[i], defaultSettings); nodeAndClient.node.start(); logger.info("Start Shared Node [{}] not shared", nodeAndClient.name); } sharedNodes.add(nodeAndClient); } if (!changed && sharedNodes.size() == nodes.size()) { logger.debug( "Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length); if (size() > 0) { client() .admin() .cluster() .prepareHealth() .setWaitForNodes(Integer.toString(sharedNodesSeeds.length)) .get(); } return; // we are consistent - return } for (NodeAndClient nodeAndClient : sharedNodes) { nodes.remove(nodeAndClient.name); } // trash the remaining nodes final Collection<NodeAndClient> toShutDown = nodes.values(); for (NodeAndClient nodeAndClient : toShutDown) { logger.debug("Close Node [{}] not shared", nodeAndClient.name); nodeAndClient.close(); } nodes.clear(); for (NodeAndClient nodeAndClient : sharedNodes) { publishNode(nodeAndClient); } nextNodeId.set(sharedNodesSeeds.length); assert size() == sharedNodesSeeds.length; if (size() > 0) { client() .admin() .cluster() .prepareHealth() .setWaitForNodes(Integer.toString(sharedNodesSeeds.length)) .get(); } logger.debug( "Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length); }