private void createStaleReplicaScenario() throws Exception { logger.info("--> starting 3 nodes, 1 master, 2 data"); String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(2); assertAcked( client() .admin() .indices() .prepareCreate("test") .setSettings( Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1)) .get()); ensureGreen(); logger.info("--> indexing..."); client() .prepareIndex("test", "type1") .setSource(jsonBuilder().startObject().field("field", "value1").endObject()) .get(); refresh(); ClusterState state = client().admin().cluster().prepareState().all().get().getState(); List<ShardRouting> shards = state.routingTable().allShards("test"); assertThat(shards.size(), equalTo(2)); final String primaryNode; final String replicaNode; if (shards.get(0).primary()) { primaryNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().getName(); replicaNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().getName(); } else { primaryNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().getName(); replicaNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().getName(); } NetworkDisruption partition = new NetworkDisruption( new TwoPartitions( Sets.newHashSet(master, replicaNode), Collections.singleton(primaryNode)), new NetworkDisconnect()); internalCluster().setDisruptionScheme(partition); logger.info("--> partitioning node with primary shard from rest of cluster"); partition.startDisrupting(); ensureStableCluster(2, master); logger.info("--> index a document into previous replica shard (that is now primary)"); client(replicaNode) .prepareIndex("test", "type1") .setSource(jsonBuilder().startObject().field("field", "value1").endObject()) .get(); logger.info("--> shut down node that has new acknowledged document"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureStableCluster(1, master); partition.stopDisrupting(); logger.info("--> waiting for node with old primary shard to rejoin the cluster"); ensureStableCluster(2, master); logger.info("--> check that old primary shard does not get promoted to primary again"); // kick reroute and wait for all shard states to be fetched client(master).admin().cluster().prepareReroute().get(); assertBusy( () -> assertThat( internalCluster() .getInstance(GatewayAllocator.class, master) .getNumberOfInFlightFetch(), equalTo(0))); // kick reroute a second time and check that all shards are unassigned assertThat( client(master) .admin() .cluster() .prepareReroute() .get() .getState() .getRoutingNodes() .unassigned() .size(), equalTo(2)); }
private void processAnalyzerFactory( String name, AnalyzerProvider<?> analyzerFactory, Map<String, NamedAnalyzer> analyzerAliases, Map<String, NamedAnalyzer> analyzers) { /* * Lucene defaults positionIncrementGap to 0 in all analyzers but * Elasticsearch defaults them to 0 only before version 2.0 * and 100 afterwards so we override the positionIncrementGap if it * doesn't match here. */ int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP; if (analyzerFactory instanceof CustomAnalyzerProvider) { ((CustomAnalyzerProvider) analyzerFactory).build(this); /* * Custom analyzers already default to the correct, version * dependent positionIncrementGap and the user is be able to * configure the positionIncrementGap directly on the analyzer so * we disable overriding the positionIncrementGap to preserve the * user's setting. */ overridePositionIncrementGap = Integer.MIN_VALUE; } Analyzer analyzerF = analyzerFactory.get(); if (analyzerF == null) { throw new IllegalArgumentException( "analyzer [" + analyzerFactory.name() + "] created null analyzer"); } NamedAnalyzer analyzer; if (analyzerF instanceof NamedAnalyzer) { // if we got a named analyzer back, use it... analyzer = (NamedAnalyzer) analyzerF; if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) { // unless the positionIncrementGap needs to be overridden analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap); } } else { analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap); } if (analyzers.containsKey(name)) { throw new IllegalStateException("already registered analyzer with name: " + name); } analyzers.put(name, analyzer); // TODO: remove alias support completely when we no longer support pre 5.0 indices final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias"; if (indexSettings.getSettings().get(analyzerAliasKey) != null) { if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha6)) { // do not allow alias creation if the index was created on or after v5.0 alpha6 throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported"); } // the setting is now removed but we only support it for loading indices created before v5.0 deprecationLogger.deprecated( "setting [{}] is only allowed on index [{}] because it was created before 5.x; " + "analyzer aliases can no longer be created on new indices.", analyzerAliasKey, index().getName()); Set<String> aliases = Sets.newHashSet(indexSettings.getSettings().getAsArray(analyzerAliasKey)); for (String alias : aliases) { if (analyzerAliases.putIfAbsent(alias, analyzer) != null) { throw new IllegalStateException( "alias [" + alias + "] is already used by [" + analyzerAliases.get(alias).name() + "]"); } } } }
public void testDisconnectFromNewlyAddedNodesIfClusterStatePublishingFails() throws InterruptedException { TimedClusterService timedClusterService = new TimedClusterService( Settings.builder().put("cluster.name", "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); timedClusterService.setLocalNode( new DiscoveryNode( "node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); Set<DiscoveryNode> currentNodes = Collections.synchronizedSet(new HashSet<>()); currentNodes.add(timedClusterService.localNode()); timedClusterService.setNodeConnectionsService( new NodeConnectionsService(Settings.EMPTY, null, null) { @Override public void connectToNodes(List<DiscoveryNode> addedNodes) { currentNodes.addAll(addedNodes); } @Override public void disconnectFromNodes(List<DiscoveryNode> removedNodes) { currentNodes.removeAll(removedNodes); } }); AtomicBoolean failToCommit = new AtomicBoolean(); timedClusterService.setClusterStatePublisher( (event, ackListener) -> { if (failToCommit.get()) { throw new Discovery.FailedToCommitClusterStateException("just to test this"); } }); timedClusterService.start(); ClusterState state = timedClusterService.state(); final DiscoveryNodes nodes = state.nodes(); final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); state = ClusterState.builder(state) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .nodes(nodesBuilder) .build(); setState(timedClusterService, state); assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes()))); final CountDownLatch latch = new CountDownLatch(1); // try to add node when cluster state publishing fails failToCommit.set(true); timedClusterService.submitStateUpdateTask( "test", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { DiscoveryNode newNode = new DiscoveryNode( "node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); return ClusterState.builder(currentState) .nodes(DiscoveryNodes.builder(currentState.nodes()).add(newNode)) .build(); } @Override public void clusterStateProcessed( String source, ClusterState oldState, ClusterState newState) { latch.countDown(); } @Override public void onFailure(String source, Exception e) { latch.countDown(); } }); latch.await(); assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes()))); timedClusterService.close(); }