public void testReconnect() { List<DiscoveryNode> nodes = generateNodes(); NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, THREAD_POOL, transportService); ClusterState current = clusterStateFromNodes(Collections.emptyList()); ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); transport.randomConnectionExceptions = true; service.connectToNodes(event.nodesDelta().addedNodes()); for (int i = 0; i < 3; i++) { // simulate disconnects for (DiscoveryNode node : randomSubsetOf(nodes)) { transport.disconnectFromNode(node); } service.new ConnectionChecker().run(); } // disable exceptions so things can be restored transport.randomConnectionExceptions = false; service.new ConnectionChecker().run(); assertConnectedExactlyToNodes(event.state()); }
@Override public void clusterChanged(ClusterChangedEvent event) { if (event.source().equals(CLUSTER_UPDATE_TASK_SOURCE)) { // that's us, ignore this event return; } if (event.state().nodes().localNodeMaster()) { // we are master, schedule the routing table updater if (scheduledRoutingTableFuture == null) { // a new master (us), make sure we reroute shards routingTableDirty = true; scheduledRoutingTableFuture = threadPool.scheduleWithFixedDelay(new RoutingTableUpdater(), schedule); } if (event.nodesRemoved()) { // if nodes were removed, we don't want to wait for the scheduled task // since we want to get primary election as fast as possible routingTableDirty = true; reroute(); // Commented out since we make sure to reroute whenever shards changes state or metadata // changes state // } else if (event.routingTableChanged()) { // routingTableDirty = true; // reroute(); } else { if (event.nodesAdded()) { for (DiscoveryNode node : event.nodesDelta().addedNodes()) { if (node.dataNode()) { routingTableDirty = true; break; } } } } } else { FutureUtils.cancel(scheduledRoutingTableFuture); scheduledRoutingTableFuture = null; } }
public void testConnectAndDisconnect() { List<DiscoveryNode> nodes = generateNodes(); NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, THREAD_POOL, transportService); ClusterState current = clusterStateFromNodes(Collections.emptyList()); ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); service.connectToNodes(event.nodesDelta().addedNodes()); assertConnected(event.nodesDelta().addedNodes()); service.disconnectFromNodes(event.nodesDelta().removedNodes()); assertConnectedExactlyToNodes(event.state()); current = event.state(); event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); service.connectToNodes(event.nodesDelta().addedNodes()); assertConnected(event.nodesDelta().addedNodes()); service.disconnectFromNodes(event.nodesDelta().removedNodes()); assertConnectedExactlyToNodes(event.state()); }
@Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... this.currentMetaData = null; return; } MetaData newMetaData = event.state().metaData(); // delete indices that were there before, but are deleted now // we need to do it so they won't be detected as dangling if (currentMetaData != null) { // only delete indices when we already received a state (currentMetaData != null) for (IndexMetaData current : currentMetaData) { if (!newMetaData.hasIndex(current.index())) { logger.debug( "[{}] deleting index that is no longer part of the metadata (indices: [{}])", current.index(), newMetaData.indices().keys()); if (nodeEnv.hasNodeFile()) { FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index()))); } try { nodeIndexDeletedAction.nodeIndexStoreDeleted( event.state(), current.index(), event.state().nodes().localNodeId()); } catch (Exception e) { logger.debug( "[{}] failed to notify master on local index store deletion", e, current.index()); } } } } currentMetaData = newMetaData; }