/** Returns the changes comparing this nodes to the provided nodes. */ public Delta delta(DiscoveryNodes other) { List<DiscoveryNode> removed = newArrayList(); List<DiscoveryNode> added = newArrayList(); for (DiscoveryNode node : other) { if (!this.nodeExists(node.id())) { removed.add(node); } } for (DiscoveryNode node : this) { if (!other.nodeExists(node.id())) { added.add(node); } } DiscoveryNode previousMasterNode = null; DiscoveryNode newMasterNode = null; if (masterNodeId != null) { if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) { previousMasterNode = other.masterNode(); newMasterNode = masterNode(); } } return new Delta( previousMasterNode, newMasterNode, localNodeId, ImmutableList.copyOf(removed), ImmutableList.copyOf(added)); }
private Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> buildShardStores(DiscoveryNodes nodes, MutableShardRouting shard) { Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores = cachedStores.get(shard.shardId()); ObjectOpenHashSet<String> nodesIds; if (shardStores == null) { shardStores = Maps.newHashMap(); cachedStores.put(shard.shardId(), shardStores); nodesIds = ObjectOpenHashSet.from(nodes.dataNodes().keys()); } else { nodesIds = ObjectOpenHashSet.newInstance(); // clean nodes that have failed for (Iterator<DiscoveryNode> it = shardStores.keySet().iterator(); it.hasNext(); ) { DiscoveryNode node = it.next(); if (!nodes.nodeExists(node.id())) { it.remove(); } } for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) { DiscoveryNode node = cursor.value; if (!shardStores.containsKey(node)) { nodesIds.add(node.id()); } } } if (!nodesIds.isEmpty()) { String[] nodesIdsArray = nodesIds.toArray(String.class); TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData nodesStoreFilesMetaData = listShardStoreMetaData .list(shard.shardId(), false, nodesIdsArray, listTimeout) .actionGet(); if (logger.isTraceEnabled()) { if (nodesStoreFilesMetaData.failures().length > 0) { StringBuilder sb = new StringBuilder(shard + ": failures when trying to list stores on nodes:"); for (int i = 0; i < nodesStoreFilesMetaData.failures().length; i++) { Throwable cause = ExceptionsHelper.unwrapCause(nodesStoreFilesMetaData.failures()[i]); if (cause instanceof ConnectTransportException) { continue; } sb.append("\n -> ") .append(nodesStoreFilesMetaData.failures()[i].getDetailedMessage()); } logger.trace(sb.toString()); } } for (TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData nodeStoreFilesMetaData : nodesStoreFilesMetaData) { if (nodeStoreFilesMetaData.storeFilesMetaData() != null) { shardStores.put( nodeStoreFilesMetaData.getNode(), nodeStoreFilesMetaData.storeFilesMetaData()); } } } return shardStores; }
public ImmutableList<TransportAddress> transportAddresses() { ImmutableList.Builder<TransportAddress> lstBuilder = ImmutableList.builder(); for (DiscoveryNode listedNode : listedNodes) { lstBuilder.add(listedNode.address()); } return lstBuilder.build(); }
@Override public void run() { // master node will check against all nodes if its alive with certain discoveries // implementations, // but we can't rely on that, so we check on it as well for (DiscoveryNode node : clusterState.nodes()) { if (lifecycle.stoppedOrClosed()) { return; } if (!nodeRequiresConnection(node)) { continue; } if (clusterState .nodes() .nodeExists( node .id())) { // we double check existence of node since connectToNode might take // time... if (!transportService.nodeConnected(node)) { try { transportService.connectToNode(node); } catch (Exception e) { if (lifecycle.stoppedOrClosed()) { return; } if (clusterState .nodes() .nodeExists(node.id())) { // double check here as well, maybe its gone? logger.warn("failed to reconnect to node {}", e, node); } } } } } }
public DiscoveryNode findByAddress(TransportAddress address) { for (DiscoveryNode node : nodes.values()) { if (node.address().equals(address)) { return node; } } return null; }
public DiscoveryNodes removeDeadMembers(Set<String> newNodes, String masterNodeId) { Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId); for (DiscoveryNode node : this) { if (newNodes.contains(node.id())) { builder.put(node); } } return builder.build(); }
public void verify( String repository, String verificationToken, final ActionListener<VerifyResponse> listener) { final DiscoveryNodes discoNodes = clusterService.state().nodes(); final DiscoveryNode localNode = discoNodes.localNode(); final ObjectContainer<DiscoveryNode> masterAndDataNodes = discoNodes.masterAndDataNodes().values(); final List<DiscoveryNode> nodes = newArrayList(); for (ObjectCursor<DiscoveryNode> cursor : masterAndDataNodes) { DiscoveryNode node = cursor.value; Version version = node.getVersion(); // Verification wasn't supported before v1.4.0 - no reason to send verification request to // these nodes if (version != null && version.onOrAfter(Version.V_1_4_0)) { nodes.add(node); } } final CopyOnWriteArrayList<VerificationFailure> errors = new CopyOnWriteArrayList<>(); final AtomicInteger counter = new AtomicInteger(nodes.size()); for (final DiscoveryNode node : nodes) { if (node.equals(localNode)) { try { doVerify(repository, verificationToken); } catch (Throwable t) { logger.warn("[{}] failed to verify repository", t, repository); errors.add(new VerificationFailure(node.id(), ExceptionsHelper.detailedMessage(t))); } if (counter.decrementAndGet() == 0) { finishVerification(listener, nodes, errors); } } else { transportService.sendRequest( node, ACTION_NAME, new VerifyNodeRepositoryRequest(repository, verificationToken), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(TransportResponse.Empty response) { if (counter.decrementAndGet() == 0) { finishVerification(listener, nodes, errors); } } @Override public void handleException(TransportException exp) { errors.add( new VerificationFailure(node.id(), ExceptionsHelper.detailedMessage(exp))); if (counter.decrementAndGet() == 0) { finishVerification(listener, nodes, errors); } } }); } } }
public static void writeTo(DiscoveryNodes nodes, StreamOutput out) throws IOException { if (nodes.masterNodeId() == null) { out.writeBoolean(false); } else { out.writeBoolean(true); out.writeUTF(nodes.masterNodeId); } out.writeVInt(nodes.size()); for (DiscoveryNode node : nodes) { node.writeTo(out); } }
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) { if (minimumMasterNodes < 1) { return true; } int count = 0; for (DiscoveryNode node : nodes) { if (node.masterNode()) { count++; } } return count >= minimumMasterNodes; }
private void updateMappingOnMaster(final String index, final String type) { try { MapperService mapperService = indicesService.indexServiceSafe(index).mapperService(); final DocumentMapper documentMapper = mapperService.documentMapper(type); if (documentMapper == null) { // should not happen return; } IndexMetaData metaData = clusterService.state().metaData().index(index); if (metaData == null) { return; } long orderId = mappingUpdatedAction.generateNextMappingUpdateOrder(); documentMapper.refreshSource(); DiscoveryNode node = clusterService.localNode(); final MappingUpdatedAction.MappingUpdatedRequest request = new MappingUpdatedAction.MappingUpdatedRequest( index, metaData.uuid(), type, documentMapper.mappingSource(), orderId, node != null ? node.id() : null); mappingUpdatedAction.execute( request, new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() { @Override public void onResponse( MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) { // all is well } @Override public void onFailure(Throwable e) { try { logger.warn( "failed to update master on updated mapping for index [{}], type [{}] and source [{}]", e, index, type, documentMapper.mappingSource().string()); } catch (IOException e1) { // ignore } } }); } catch (Exception e) { logger.warn( "failed to update master on updated mapping for index [{}], type [{}]", e, index, type); } }
public void addNodeInfo(NodeInfo nodeInfo) { total++; DiscoveryNode node = nodeInfo.getNode(); if (node.masterNode()) { if (node.dataNode()) { masterData++; } else { masterOnly++; } } else if (node.dataNode()) { dataOnly++; } else if (node.clientNode()) { client++; } }
private List<DiscoveryNode> sortedMasterNodes(Iterable<DiscoveryNode> nodes) { List<DiscoveryNode> possibleNodes = CollectionUtils.iterableAsArrayList(nodes); if (possibleNodes.isEmpty()) { return null; } // clean non master nodes for (Iterator<DiscoveryNode> it = possibleNodes.iterator(); it.hasNext(); ) { DiscoveryNode node = it.next(); if (!node.masterNode()) { it.remove(); } } CollectionUtil.introSort(possibleNodes, nodeComparator); return possibleNodes; }
public String toString() { StringBuilder sb = new StringBuilder(); sb.append("routingNode (["); sb.append(node.getName()); sb.append("]["); sb.append(node.getId()); sb.append("]["); sb.append(node.getHostName()); sb.append("]["); sb.append(node.getHostAddress()); sb.append("], ["); sb.append(shards.size()); sb.append(" assigned shards])"); return sb.toString(); }
/** Returns a set of nodes that have at least one shard of the given index. */ public synchronized Set<String> nodesInclude(String index) { if (clusterService().state().routingTable().hasIndex(index)) { List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index); DiscoveryNodes discoveryNodes = clusterService().state().getNodes(); Set<String> nodes = new HashSet<String>(); for (ShardRouting shardRouting : allShards) { if (shardRouting.assignedToNode()) { DiscoveryNode discoveryNode = discoveryNodes.get(shardRouting.currentNodeId()); nodes.add(discoveryNode.getName()); } } return nodes; } return Collections.emptySet(); }
public String shortSummary() { StringBuilder sb = new StringBuilder(); if (!removed() && masterNodeChanged()) { if (newMasterNode.id().equals(localNodeId)) { // we are the master, no nodes we removed, we are actually the first master sb.append("new_master ").append(newMasterNode()); } else { // we are not the master, so we just got this event. No nodes were removed, so its not a // *new* master sb.append("detected_master ").append(newMasterNode()); } } else { if (masterNodeChanged()) { sb.append("master {new ").append(newMasterNode()); if (previousMasterNode() != null) { sb.append(", previous ").append(previousMasterNode()); } sb.append("}"); } if (removed()) { if (masterNodeChanged()) { sb.append(", "); } sb.append("removed {"); for (DiscoveryNode node : removedNodes()) { sb.append(node).append(','); } sb.append("}"); } } if (added()) { // don't print if there is one added, and it is us if (!(addedNodes().size() == 1 && addedNodes().get(0).id().equals(localNodeId))) { if (removed() || masterNodeChanged()) { sb.append(", "); } sb.append("added {"); for (DiscoveryNode node : addedNodes()) { if (!node.id().equals(localNodeId)) { // don't print ourself sb.append(node).append(','); } } sb.append("}"); } } return sb.toString(); }
@Override public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { if (!ackedTaskListener.mustAck(node)) { // we always wait for the master ack anyway if (!node.equals(nodes.getMasterNode())) { return; } } if (e == null) { logger.trace( "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); } else { this.lastFailure = e; logger.debug( (Supplier<?>) () -> new ParameterizedMessage( "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), e); } if (countDown.countDown()) { logger.trace( "all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion); FutureUtils.cancel(ackTimeoutCallback); ackedTaskListener.onAllNodesAcked(lastFailure); } }
private void handleTransportDisconnect(DiscoveryNode node) { if (!latestNodes.nodeExists(node.id())) { return; } NodeFD nodeFD = nodesFD.remove(node); if (nodeFD == null) { return; } if (!running) { return; } nodeFD.running = false; if (connectOnNetworkDisconnect) { try { transportService.connectToNode(node); nodesFD.put(node, new NodeFD()); threadPool.schedule(pingInterval, ThreadPool.Names.SAME, new SendPingRequest(node)); } catch (Exception e) { logger.trace("[node ] [{}] transport disconnected (with verified connect)", node); notifyNodeFailure(node, "transport disconnected (with verified connect)"); } } else { logger.trace("[node ] [{}] transport disconnected", node); notifyNodeFailure(node, "transport disconnected"); } }
private ClusterState rejoin(ClusterState clusterState, String reason) { logger.warn(reason + ", current nodes: {}", clusterState.nodes()); nodesFD.stop(); masterFD.stop(reason); master = false; ClusterBlocks clusterBlocks = ClusterBlocks.builder() .blocks(clusterState.blocks()) .addGlobalBlock(NO_MASTER_BLOCK) .addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) .build(); // clear the routing table, we have no master, so we need to recreate the routing when we reform // the cluster RoutingTable routingTable = RoutingTable.builder().build(); // we also clean the metadata, since we are going to recover it if we become master MetaData metaData = MetaData.builder().build(); // clean the nodes, we are now not connected to anybody, since we try and reform the cluster latestDiscoNodes = new DiscoveryNodes.Builder().put(localNode).localNodeId(localNode.id()).build(); asyncJoinCluster(); return ClusterState.builder(clusterState) .blocks(clusterBlocks) .nodes(latestDiscoNodes) .routingTable(routingTable) .metaData(metaData) .build(); }
private void sendNodeRequest( final DiscoveryNode node, List<ShardRouting> shards, final int nodeIndex) { try { NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards); transportService.sendRequest( node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() { @Override public NodeResponse newInstance() { return new NodeResponse(); } @Override public void handleResponse(NodeResponse response) { onNodeResponse(node, nodeIndex, response); } @Override public void handleException(TransportException exp) { onNodeFailure(node, nodeIndex, exp); } @Override public String executor() { return ThreadPool.Names.SAME; } }); } catch (Throwable e) { onNodeFailure(node, nodeIndex, e); } }
/** * Creates a cluster state where local node and master node can be specified * * @param localNode node in allNodes that is the local node * @param masterNode node in allNodes that is the master node. Can be null if no master exists * @param allNodes all nodes in the cluster * @return cluster state */ public static ClusterState state( DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); for (DiscoveryNode node : allNodes) { discoBuilder.put(node); } if (masterNode != null) { discoBuilder.masterNodeId(masterNode.getId()); } discoBuilder.localNodeId(localNode.getId()); ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); state.metaData(MetaData.builder().generateClusterUuidIfNeeded()); return state.build(); }
private IndexShard newShard( boolean primary, DiscoveryNode node, IndexMetaData indexMetaData, Path homePath) throws IOException { // add node name to settings for propper logging final Settings nodeSettings = Settings.builder().put("node.name", node.getName()).build(); final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); ShardRouting shardRouting = TestShardRouting.newShardRouting( shardId, node.getId(), primary, ShardRoutingState.INITIALIZING); final Path path = Files.createDirectories(homePath.resolve(node.getId())); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path); ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); Store store = createStore(indexSettings, shardPath); IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); MapperService mapperService = MapperTestUtils.newMapperService(homePath, indexSettings.getSettings()); for (Map.Entry<String, String> type : indexMapping.entrySet()) { mapperService.merge( type.getKey(), new CompressedXContent(type.getValue()), MapperService.MergeReason.MAPPING_RECOVERY, true); } SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); final IndexEventListener indexEventListener = new IndexEventListener() {}; final Engine.Warmer warmer = searcher -> {}; return new IndexShard( shardRouting, indexSettings, shardPath, store, indexCache, mapperService, similarityService, null, null, indexEventListener, null, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, Collections.emptyList(), Collections.emptyList()); }
/** * Get info about current operation of this river. Used for REST management operations handling. * * @return String with JSON formatted info. * @throws Exception */ @Override public String getRiverOperationInfo(DiscoveryNode esNode, Date currentDate) throws Exception { XContentBuilder builder = jsonBuilder().prettyPrint(); builder.startObject(); builder.field("river_name", riverName().getName()); builder.field("info_date", currentDate); builder.startObject("indexing"); builder.field("state", closed ? "stopped" : "running"); if (!closed) builder.field("last_restart", lastRestartDate); else if (permanentStopDate != null) builder.field("stopped_permanently", permanentStopDate); builder.endObject(); if (esNode != null) { builder.startObject("node"); builder.field("id", esNode.getId()); builder.field("name", esNode.getName()); builder.endObject(); } if (coordinatorInstance != null) { List<SpaceIndexingInfo> currProjectIndexingInfo = coordinatorInstance.getCurrentSpaceIndexingInfo(); if (currProjectIndexingInfo != null) { builder.startArray("current_indexing"); for (SpaceIndexingInfo pi : currProjectIndexingInfo) { pi.buildDocument(builder, null, true, false); } builder.endArray(); } } List<String> pkeys = getAllIndexedSpaceKeys(); if (pkeys != null) { builder.startArray("indexed_spaces"); for (String spaceKey : pkeys) { builder.startObject(); builder.field(SpaceIndexingInfo.DOCFIELD_SPACE_KEY, spaceKey); SpaceIndexingInfo lastIndexing = getLastSpaceIndexingInfo(spaceKey); if (lastIndexing != null) { builder.field("last_indexing"); lastIndexing.buildDocument(builder, null, false, true); } builder.endObject(); } builder.endArray(); } builder.endObject(); return builder.string(); }
@Inject public LocalGatewayMetaState( Settings settings, ThreadPool threadPool, NodeEnvironment nodeEnv, TransportNodesListGatewayMetaState nodesListGatewayMetaState, LocalAllocateDangledIndices allocateDangledIndices, NodeIndexDeletedAction nodeIndexDeletedAction) throws Exception { super(settings); this.nodeEnv = nodeEnv; this.threadPool = threadPool; this.format = XContentType.fromRestContentType(settings.get("format", "smile")); this.allocateDangledIndices = allocateDangledIndices; this.nodeIndexDeletedAction = nodeIndexDeletedAction; nodesListGatewayMetaState.init(this); if (this.format == XContentType.SMILE) { Map<String, String> params = Maps.newHashMap(); params.put("binary", "true"); formatParams = new ToXContent.MapParams(params); Map<String, String> globalOnlyParams = Maps.newHashMap(); globalOnlyParams.put("binary", "true"); globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true"); globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true"); globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams); } else { formatParams = ToXContent.EMPTY_PARAMS; Map<String, String> globalOnlyParams = Maps.newHashMap(); globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true"); globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true"); globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams); } this.autoImportDangled = AutoImportDangledState.fromString( settings.get( "gateway.local.auto_import_dangled", AutoImportDangledState.YES.toString())); this.danglingTimeout = settings.getAsTime("gateway.local.dangling_timeout", TimeValue.timeValueHours(2)); logger.debug( "using gateway.local.auto_import_dangled [{}], with gateway.local.dangling_timeout [{}]", this.autoImportDangled, this.danglingTimeout); if (DiscoveryNode.masterNode(settings)) { try { pre019Upgrade(); long start = System.currentTimeMillis(); loadState(); logger.debug( "took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start)); } catch (Exception e) { logger.error("failed to read local state, exiting...", e); throw e; } } }
private boolean nodeRequiresConnection(DiscoveryNode node) { if (localNode().clientNode()) { if (node.clientNode()) { return false; } } return true; }
public MockNode setAsMaster() { this.clusterState = ClusterState.builder(clusterState) .nodes( DiscoveryNodes.builder(clusterState.nodes()).masterNodeId(discoveryNode.getId())) .build(); return this; }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); fromNode.writeTo(out); out.writeVInt(indices.length); for (IndexMetaData indexMetaData : indices) { indexMetaData.writeTo(out); } }
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); fromNode = DiscoveryNode.readNode(in); indices = new IndexMetaData[in.readVInt()]; for (int i = 0; i < indices.length; i++) { indices[i] = IndexMetaData.Builder.readFrom(in); } }
/** * Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt> if no master * has been elected. */ public DiscoveryNode electMaster(Iterable<DiscoveryNode> nodes) { List<DiscoveryNode> sortedNodes = sortedMasterNodes(nodes); if (sortedNodes == null || sortedNodes.isEmpty()) { return null; } DiscoveryNode masterNode = sortedNodes.get(0); // Sanity check: maybe we don't end up here, because serialization may have failed. if (masterNode.getVersion().before(minMasterVersion)) { logger.warn( "ignoring master [{}], because the version [{}] is lower than the minimum compatible version [{}]", masterNode, masterNode.getVersion(), minMasterVersion); return null; } else { return masterNode; } }
@Override public DiscoveryNodes nodes() { DiscoveryNodes latestNodes = this.latestDiscoNodes; if (latestNodes != null) { return latestNodes; } // have not decided yet, just send the local node return DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()).build(); }
private ClusterState handleJoinRequest(final DiscoveryNode node) { if (!master) { throw new ElasticsearchIllegalStateException( "Node [" + localNode + "] not master for join request from [" + node + "]"); } ClusterState state = clusterService.state(); if (!transportService.addressSupported(node.address().getClass())) { // TODO, what should we do now? Maybe inform that node that its crap? logger.warn("received a wrong address type from [{}], ignoring...", node); } else { // try and connect to the node, if it fails, we can raise an exception back to the client... transportService.connectToNode(node); state = clusterService.state(); // validate the join request, will throw a failure if it fails, which will get back to the // node calling the join request membership.sendValidateJoinRequestBlocking(node, state, pingTimeout); clusterService.submitStateUpdateTask( "zen-disco-receive(join from node[" + node + "])", Priority.URGENT, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { if (currentState.nodes().nodeExists(node.id())) { // the node already exists in the cluster logger.warn("received a join request for an existing node [{}]", node); // still send a new cluster state, so it will be re published and possibly update // the other node return ClusterState.builder(currentState).build(); } DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()); for (DiscoveryNode existingNode : currentState.nodes()) { if (node.address().equals(existingNode.address())) { builder.remove(existingNode.id()); logger.warn( "received join request from node [{}], but found existing node {} with same address, removing existing node", node, existingNode); } } latestDiscoNodes = builder.build(); // add the new node now (will update latestDiscoNodes on publish) return ClusterState.builder(currentState) .nodes(latestDiscoNodes.newNode(node)) .build(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); } }); } return state; }