public ImmutableList<TransportAddress> transportAddresses() { ImmutableList.Builder<TransportAddress> lstBuilder = ImmutableList.builder(); for (DiscoveryNode listedNode : listedNodes) { lstBuilder.add(listedNode.address()); } return lstBuilder.build(); }
public DiscoveryNode findByAddress(TransportAddress address) { for (DiscoveryNode node : nodes.values()) { if (node.address().equals(address)) { return node; } } return null; }
private ClusterState handleJoinRequest(final DiscoveryNode node) { if (!master) { throw new ElasticsearchIllegalStateException( "Node [" + localNode + "] not master for join request from [" + node + "]"); } ClusterState state = clusterService.state(); if (!transportService.addressSupported(node.address().getClass())) { // TODO, what should we do now? Maybe inform that node that its crap? logger.warn("received a wrong address type from [{}], ignoring...", node); } else { // try and connect to the node, if it fails, we can raise an exception back to the client... transportService.connectToNode(node); state = clusterService.state(); // validate the join request, will throw a failure if it fails, which will get back to the // node calling the join request membership.sendValidateJoinRequestBlocking(node, state, pingTimeout); clusterService.submitStateUpdateTask( "zen-disco-receive(join from node[" + node + "])", Priority.URGENT, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { if (currentState.nodes().nodeExists(node.id())) { // the node already exists in the cluster logger.warn("received a join request for an existing node [{}]", node); // still send a new cluster state, so it will be re published and possibly update // the other node return ClusterState.builder(currentState).build(); } DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()); for (DiscoveryNode existingNode : currentState.nodes()) { if (node.address().equals(existingNode.address())) { builder.remove(existingNode.id()); logger.warn( "received join request from node [{}], but found existing node {} with same address, removing existing node", node, existingNode); } } latestDiscoNodes = builder.build(); // add the new node now (will update latestDiscoNodes on publish) return ClusterState.builder(currentState) .nodes(latestDiscoNodes.newNode(node)) .build(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); } }); } return state; }
@Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (getTaskFailures() != null && getTaskFailures().size() > 0) { builder.startArray("task_failures"); for (TaskOperationFailure ex : getTaskFailures()) { builder.startObject(); builder.value(ex); builder.endObject(); } builder.endArray(); } if (getNodeFailures() != null && getNodeFailures().size() > 0) { builder.startArray("node_failures"); for (FailedNodeException ex : getNodeFailures()) { builder.startObject(); ex.toXContent(builder, params); builder.endObject(); } builder.endArray(); } builder.startObject("nodes"); for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) { DiscoveryNode node = entry.getKey(); builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE); builder.field("name", node.name()); builder.field("transport_address", node.address().toString()); builder.field("host", node.getHostName()); builder.field("ip", node.getAddress()); if (!node.attributes().isEmpty()) { builder.startObject("attributes"); for (ObjectObjectCursor<String, String> attr : node.attributes()) { builder.field(attr.key, attr.value, XContentBuilder.FieldCaseConversion.NONE); } builder.endObject(); } builder.startArray("tasks"); for (TaskInfo task : entry.getValue()) { task.toXContent(builder, params); } builder.endArray(); builder.endObject(); } builder.endObject(); return builder; }
public TransportClientNodesService removeTransportAddress(TransportAddress transportAddress) { synchronized (mutex) { if (closed) { throw new ElasticsearchIllegalStateException( "transport client is closed, can't remove an address"); } ImmutableList.Builder<DiscoveryNode> builder = ImmutableList.builder(); for (DiscoveryNode otherNode : listedNodes) { if (!otherNode.address().equals(transportAddress)) { builder.add(otherNode); } else { logger.debug("removing address [{}]", otherNode); } } listedNodes = builder.build(); nodesSampler.sample(); } return this; }
public TransportClientNodesService addTransportAddresses(TransportAddress... transportAddresses) { synchronized (mutex) { if (closed) { throw new ElasticsearchIllegalStateException( "transport client is closed, can't add an address"); } List<TransportAddress> filtered = Lists.newArrayListWithExpectedSize(transportAddresses.length); for (TransportAddress transportAddress : transportAddresses) { boolean found = false; for (DiscoveryNode otherNode : listedNodes) { if (otherNode.address().equals(transportAddress)) { found = true; logger.debug( "address [{}] already exists with [{}], ignoring...", transportAddress, otherNode); break; } } if (!found) { filtered.add(transportAddress); } } if (filtered.isEmpty()) { return this; } ImmutableList.Builder<DiscoveryNode> builder = ImmutableList.builder(); builder.addAll(listedNodes()); for (TransportAddress transportAddress : filtered) { DiscoveryNode node = new DiscoveryNode( "#transport#-" + tempNodeIdGenerator.incrementAndGet(), transportAddress, minCompatibilityVersion); logger.debug("adding address [{}]", node); builder.add(node); } listedNodes = builder.build(); nodesSampler.sample(); } return this; }
private Table buildTable( RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) { boolean fullId = req.paramAsBoolean("full_id", false); DiscoveryNodes nodes = state.getState().nodes(); String masterId = nodes.masterNodeId(); Table table = getTableWithHeader(req); for (DiscoveryNode node : nodes) { NodeInfo info = nodesInfo.getNodesMap().get(node.id()); NodeStats stats = nodesStats.getNodesMap().get(node.id()); JvmInfo jvmInfo = info == null ? null : info.getJvm(); JvmStats jvmStats = stats == null ? null : stats.getJvm(); FsInfo fsInfo = stats == null ? null : stats.getFs(); OsStats osStats = stats == null ? null : stats.getOs(); ProcessStats processStats = stats == null ? null : stats.getProcess(); NodeIndicesStats indicesStats = stats == null ? null : stats.getIndices(); table.startRow(); table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4)); table.addCell(info == null ? null : info.getProcess().getId()); table.addCell(node.getHostName()); table.addCell(node.getHostAddress()); if (node.address() instanceof InetSocketTransportAddress) { table.addCell(((InetSocketTransportAddress) node.address()).address().getPort()); } else { table.addCell("-"); } table.addCell(node.getVersion().number()); table.addCell(info == null ? null : info.getBuild().shortHash()); table.addCell(jvmInfo == null ? null : jvmInfo.version()); table.addCell(fsInfo == null ? null : fsInfo.getTotal().getAvailable()); table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsed()); table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsedPercent()); table.addCell(jvmInfo == null ? null : jvmInfo.getMem().getHeapMax()); table.addCell( osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsed()); table.addCell( osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsedPercent()); table.addCell( osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getTotal()); table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors()); table.addCell( processStats == null ? null : calculatePercentage( processStats.getOpenFileDescriptors(), processStats.getMaxFileDescriptors())); table.addCell(processStats == null ? null : processStats.getMaxFileDescriptors()); table.addCell( osStats == null ? null : String.format(Locale.ROOT, "%.2f", osStats.getLoadAverage())); table.addCell(jvmStats == null ? null : jvmStats.getUptime()); table.addCell(node.clientNode() ? "c" : node.dataNode() ? "d" : "-"); table.addCell( masterId == null ? "x" : masterId.equals(node.id()) ? "*" : node.masterNode() ? "m" : "-"); table.addCell(node.name()); CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion(); table.addCell(completionStats == null ? null : completionStats.getSize()); FieldDataStats fdStats = indicesStats == null ? null : stats.getIndices().getFieldData(); table.addCell(fdStats == null ? null : fdStats.getMemorySize()); table.addCell(fdStats == null ? null : fdStats.getEvictions()); QueryCacheStats fcStats = indicesStats == null ? null : indicesStats.getQueryCache(); table.addCell(fcStats == null ? null : fcStats.getMemorySize()); table.addCell(fcStats == null ? null : fcStats.getEvictions()); RequestCacheStats qcStats = indicesStats == null ? null : indicesStats.getRequestCache(); table.addCell(qcStats == null ? null : qcStats.getMemorySize()); table.addCell(qcStats == null ? null : qcStats.getEvictions()); table.addCell(qcStats == null ? null : qcStats.getHitCount()); table.addCell(qcStats == null ? null : qcStats.getMissCount()); FlushStats flushStats = indicesStats == null ? null : indicesStats.getFlush(); table.addCell(flushStats == null ? null : flushStats.getTotal()); table.addCell(flushStats == null ? null : flushStats.getTotalTime()); GetStats getStats = indicesStats == null ? null : indicesStats.getGet(); table.addCell(getStats == null ? null : getStats.current()); table.addCell(getStats == null ? null : getStats.getTime()); table.addCell(getStats == null ? null : getStats.getCount()); table.addCell(getStats == null ? null : getStats.getExistsTime()); table.addCell(getStats == null ? null : getStats.getExistsCount()); table.addCell(getStats == null ? null : getStats.getMissingTime()); table.addCell(getStats == null ? null : getStats.getMissingCount()); IndexingStats indexingStats = indicesStats == null ? null : indicesStats.getIndexing(); table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCurrent()); table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteTime()); table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCount()); table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCurrent()); table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexTime()); table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCount()); table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexFailedCount()); MergeStats mergeStats = indicesStats == null ? null : indicesStats.getMerge(); table.addCell(mergeStats == null ? null : mergeStats.getCurrent()); table.addCell(mergeStats == null ? null : mergeStats.getCurrentNumDocs()); table.addCell(mergeStats == null ? null : mergeStats.getCurrentSize()); table.addCell(mergeStats == null ? null : mergeStats.getTotal()); table.addCell(mergeStats == null ? null : mergeStats.getTotalNumDocs()); table.addCell(mergeStats == null ? null : mergeStats.getTotalSize()); table.addCell(mergeStats == null ? null : mergeStats.getTotalTime()); PercolateStats percolateStats = indicesStats == null ? null : indicesStats.getPercolate(); table.addCell(percolateStats == null ? null : percolateStats.getCurrent()); table.addCell(percolateStats == null ? null : percolateStats.getMemorySize()); table.addCell(percolateStats == null ? null : percolateStats.getNumQueries()); table.addCell(percolateStats == null ? null : percolateStats.getTime()); table.addCell(percolateStats == null ? null : percolateStats.getCount()); RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh(); table.addCell(refreshStats == null ? null : refreshStats.getTotal()); table.addCell(refreshStats == null ? null : refreshStats.getTotalTime()); ScriptStats scriptStats = stats == null ? null : stats.getScriptStats(); table.addCell(scriptStats == null ? null : scriptStats.getCompilations()); table.addCell(scriptStats == null ? null : scriptStats.getCacheEvictions()); SearchStats searchStats = indicesStats == null ? null : indicesStats.getSearch(); table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCount()); table.addCell(searchStats == null ? null : searchStats.getOpenContexts()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); table.addCell(segmentsStats == null ? null : segmentsStats.getMemory()); table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMemory()); table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMaxMemory()); table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory()); table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory()); SuggestStats suggestStats = indicesStats == null ? null : indicesStats.getSuggest(); table.addCell(suggestStats == null ? null : suggestStats.getCurrent()); table.addCell(suggestStats == null ? null : suggestStats.getTime()); table.addCell(suggestStats == null ? null : suggestStats.getCount()); table.endRow(); } return table; }
private ClusterState applyUpdate(ClusterState currentState, ClusterChangedEvent task) { boolean clusterStateChanged = false; ClusterState tribeState = task.state(); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes()); // -- merge nodes // go over existing nodes, and see if they need to be removed for (DiscoveryNode discoNode : currentState.nodes()) { String markedTribeName = discoNode.attributes().get(TRIBE_NAME); if (markedTribeName != null && markedTribeName.equals(tribeName)) { if (tribeState.nodes().get(discoNode.id()) == null) { clusterStateChanged = true; logger.info("[{}] removing node [{}]", tribeName, discoNode); nodes.remove(discoNode.id()); } } } // go over tribe nodes, and see if they need to be added for (DiscoveryNode tribe : tribeState.nodes()) { if (currentState.nodes().get(tribe.id()) == null) { // a new node, add it, but also add the tribe name to the attributes Map<String, String> tribeAttr = new HashMap<>(); for (ObjectObjectCursor<String, String> attr : tribe.attributes()) { tribeAttr.put(attr.key, attr.value); } tribeAttr.put(TRIBE_NAME, tribeName); DiscoveryNode discoNode = new DiscoveryNode( tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); clusterStateChanged = true; logger.info("[{}] adding node [{}]", tribeName, discoNode); nodes.put(discoNode); } } // -- merge metadata ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); MetaData.Builder metaData = MetaData.builder(currentState.metaData()); RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); // go over existing indices, and see if they need to be removed for (IndexMetaData index : currentState.metaData()) { String markedTribeName = index.getSettings().get(TRIBE_NAME); if (markedTribeName != null && markedTribeName.equals(tribeName)) { IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); clusterStateChanged = true; if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); removeIndex(blocks, metaData, routingTable, index); } else { // always make sure to update the metadata and routing table, in case // there are changes in them (new mapping, shards moving from initializing to started) routingTable.add(tribeState.routingTable().index(index.getIndex())); Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); } } } // go over tribe one, and see if they need to be added for (IndexMetaData tribeIndex : tribeState.metaData()) { // if there is no routing table yet, do nothing with it... IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex()); if (table == null) { continue; } final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); if (indexMetaData == null) { if (!droppedIndices.contains(tribeIndex.getIndex())) { // a new index, add it, and add the tribe name as a setting clusterStateChanged = true; logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex()); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); } } else { String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME); if (!tribeName.equals(existingFromTribe)) { // we have a potential conflict on index names, decide what to do... if (ON_CONFLICT_ANY.equals(onConflict)) { // we chose any tribe, carry on } else if (ON_CONFLICT_DROP.equals(onConflict)) { // drop the indices, there is a conflict clusterStateChanged = true; logger.info( "[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); removeIndex(blocks, metaData, routingTable, tribeIndex); droppedIndices.add(tribeIndex.getIndex()); } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { // on conflict, prefer a tribe... String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length()); if (tribeName.equals(preferredTribeName)) { // the new one is hte preferred one, replace... clusterStateChanged = true; logger.info( "[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); removeIndex(blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); } // else: either the existing one is the preferred one, or we haven't seen one, carry // on } } } } if (!clusterStateChanged) { return currentState; } else { return ClusterState.builder(currentState) .incrementVersion() .blocks(blocks) .nodes(nodes) .metaData(metaData) .routingTable(routingTable.build()) .build(); } }
public String[] resolveNodesIds(String... nodesIds) { if (isAllNodes(nodesIds)) { int index = 0; nodesIds = new String[nodes.size()]; for (DiscoveryNode node : this) { nodesIds[index++] = node.id(); } return nodesIds; } else { Set<String> resolvedNodesIds = new HashSet<String>(nodesIds.length); for (String nodeId : nodesIds) { if (nodeId.equals("_local")) { String localNodeId = localNodeId(); if (localNodeId != null) { resolvedNodesIds.add(localNodeId); } } else if (nodeId.equals("_master")) { String masterNodeId = masterNodeId(); if (masterNodeId != null) { resolvedNodesIds.add(masterNodeId); } } else if (nodeExists(nodeId)) { resolvedNodesIds.add(nodeId); } else { // not a node id, try and search by name for (DiscoveryNode node : this) { if (Regex.simpleMatch(nodeId, node.name())) { resolvedNodesIds.add(node.id()); } } for (DiscoveryNode node : this) { if (node.address().match(nodeId)) { resolvedNodesIds.add(node.id()); } } int index = nodeId.indexOf(':'); if (index != -1) { String matchAttrName = nodeId.substring(0, index); String matchAttrValue = nodeId.substring(index + 1); if ("data".equals(matchAttrName)) { if (Booleans.parseBoolean(matchAttrValue, true)) { resolvedNodesIds.addAll(dataNodes.keySet()); } else { resolvedNodesIds.removeAll(dataNodes.keySet()); } } else if ("master".equals(matchAttrName)) { if (Booleans.parseBoolean(matchAttrValue, true)) { resolvedNodesIds.addAll(masterNodes.keySet()); } else { resolvedNodesIds.removeAll(masterNodes.keySet()); } } else { for (DiscoveryNode node : this) { for (Map.Entry<String, String> entry : node.attributes().entrySet()) { String attrName = entry.getKey(); String attrValue = entry.getValue(); if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) { resolvedNodesIds.add(node.id()); } } } } } } } return resolvedNodesIds.toArray(new String[resolvedNodesIds.size()]); } }
@Override protected void doSample() { HashSet<DiscoveryNode> newNodes = new HashSet<>(); HashSet<DiscoveryNode> newFilteredNodes = new HashSet<>(); for (DiscoveryNode listedNode : listedNodes) { if (!transportService.nodeConnected(listedNode)) { try { // its a listed node, light connect to it... logger.trace("connecting to listed node (light) [{}]", listedNode); transportService.connectToNodeLight(listedNode); } catch (Throwable e) { logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode); continue; } } try { LivenessResponse livenessResponse = transportService .submitRequest( listedNode, TransportLivenessAction.NAME, headers.applyTo(new LivenessRequest()), TransportRequestOptions.options() .withType(TransportRequestOptions.Type.STATE) .withTimeout(pingTimeout), new FutureTransportResponseHandler<LivenessResponse>() { @Override public LivenessResponse newInstance() { return new LivenessResponse(); } }) .txGet(); if (!ignoreClusterName && !clusterName.equals(livenessResponse.getClusterName())) { logger.warn("node {} not part of the cluster {}, ignoring...", listedNode, clusterName); newFilteredNodes.add(listedNode); } else if (livenessResponse.getDiscoveryNode() != null) { // use discovered information but do keep the original transport address, so people can // control which address is exactly used. DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode(); newNodes.add( new DiscoveryNode( nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(), nodeWithInfo.getHostAddress(), listedNode.address(), nodeWithInfo.attributes(), nodeWithInfo.version())); } else { // although we asked for one node, our target may not have completed initialization yet // and doesn't have cluster nodes logger.debug( "node {} didn't return any discovery info, temporarily using transport discovery node", listedNode); newNodes.add(listedNode); } } catch (Throwable e) { logger.info("failed to get node info for {}, disconnecting...", e, listedNode); transportService.disconnectFromNode(listedNode); } } nodes = validateNewNodes(newNodes); filteredNodes = ImmutableList.copyOf(newFilteredNodes); }
@Test public void testHostOnMessages() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(2); final AtomicReference<TransportAddress> addressA = new AtomicReference<>(); final AtomicReference<TransportAddress> addressB = new AtomicReference<>(); serviceB.registerHandler( "action1", new TransportRequestHandler<TestRequest>() { @Override public TestRequest newInstance() { return new TestRequest(); } @Override public void messageReceived(TestRequest request, TransportChannel channel) throws Exception { addressA.set(request.remoteAddress()); channel.sendResponse(new TestResponse()); latch.countDown(); } @Override public String executor() { return ThreadPool.Names.SAME; } @Override public boolean isForceExecution() { return false; } }); serviceA.sendRequest( nodeB, "action1", new TestRequest(), new TransportResponseHandler<TestResponse>() { @Override public TestResponse newInstance() { return new TestResponse(); } @Override public void handleResponse(TestResponse response) { addressB.set(response.remoteAddress()); latch.countDown(); } @Override public void handleException(TransportException exp) { latch.countDown(); } @Override public String executor() { return ThreadPool.Names.SAME; } }); if (!latch.await(10, TimeUnit.SECONDS)) { fail("message round trip did not complete within a sensible time frame"); } assertTrue(nodeA.address().sameHost(addressA.get())); assertTrue(nodeB.address().sameHost(addressB.get())); }