public void stop() throws Exception { if (!started) { return; } stopping = true; if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug(this + "::stopping ClusterConnection"); } if (serverLocator != null) { serverLocator.removeClusterTopologyListener(this); } HornetQServerLogger.LOGGER.debug( "Cluster connection being stopped for node" + nodeManager.getNodeId() + ", server = " + this.server + " serverLocator = " + serverLocator); synchronized (this) { for (MessageFlowRecord record : records.values()) { try { record.close(); } catch (Exception ignore) { } } } if (managementService != null) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(new SimpleString("name"), name); Notification notification = new Notification( nodeManager.getNodeId().toString(), NotificationType.CLUSTER_CONNECTION_STOPPED, props); managementService.sendNotification(notification); } executor.execute( new Runnable() { public void run() { synchronized (ClusterConnectionImpl.this) { closeLocator(serverLocator); serverLocator = null; } } }); started = false; }
public void nodeDown(final long eventUID, final String nodeID) { if (stopping) { return; } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + " receiving nodeDown for nodeID=" + nodeID, new Exception("trace")); } if (nodeID.equals(nodeManager.getNodeId().toString())) { return; } // Remove the flow record for that node MessageFlowRecord record = records.remove(nodeID); if (record != null) { try { if (isTrace) { HornetQServerLogger.LOGGER.trace("Closing clustering record " + record); } record.close(); } catch (Exception e) { HornetQServerLogger.LOGGER.errorClosingFlowRecord(e); } } }
@Override public String toString() { return "ClusterConnectionImpl@" + System.identityHashCode(this) + "[nodeUUID=" + nodeManager.getNodeId() + ", connector=" + connector + ", address=" + address + ", server=" + server + "]"; }
public synchronized void deployBridge(final BridgeConfiguration config) throws Exception { if (config.getName() == null) { HornetQServerLogger.LOGGER.bridgeNotUnique(); return; } if (config.getQueueName() == null) { HornetQServerLogger.LOGGER.bridgeNoQueue(config.getName()); return; } if (config.getForwardingAddress() == null) { HornetQServerLogger.LOGGER.bridgeNoForwardAddress(config.getName()); } if (bridges.containsKey(config.getName())) { HornetQServerLogger.LOGGER.bridgeAlreadyDeployed(config.getName()); return; } Transformer transformer = instantiateTransformer(config.getTransformerClassName()); Binding binding = postOffice.getBinding(new SimpleString(config.getQueueName())); if (binding == null) { HornetQServerLogger.LOGGER.bridgeQueueNotFound(config.getQueueName(), config.getName()); return; } Queue queue = (Queue) binding.getBindable(); ServerLocatorInternal serverLocator; if (config.getDiscoveryGroupName() != null) { DiscoveryGroupConfiguration discoveryGroupConfiguration = configuration.getDiscoveryGroupConfigurations().get(config.getDiscoveryGroupName()); if (discoveryGroupConfiguration == null) { HornetQServerLogger.LOGGER.bridgeNoDiscoveryGroup(config.getDiscoveryGroupName()); return; } if (config.isHA()) { serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithHA(discoveryGroupConfiguration); } else { serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithoutHA(discoveryGroupConfiguration); } } else { TransportConfiguration[] tcConfigs = connectorNameListToArray(config.getStaticConnectors()); if (tcConfigs == null) { HornetQServerLogger.LOGGER.bridgeCantFindConnectors(config.getName()); return; } if (config.isHA()) { serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithHA(tcConfigs); } else { serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithoutHA(tcConfigs); } } if (config.getForwardingAddress() != null) { AddressSettings addressConfig = configuration.getAddressesSettings().get(config.getForwardingAddress()); // The address config could be null on certain test cases or some Embedded environment if (addressConfig == null) { // We will certainly have this warning on testcases which is ok HornetQServerLogger.LOGGER.bridgeCantFindAddressConfig( config.getName(), config.getForwardingAddress()); } else { final int windowSize = config.getConfirmationWindowSize(); final long maxBytes = addressConfig.getMaxSizeBytes(); if (maxBytes != -1 && maxBytes < windowSize) { HornetQServerLogger.LOGGER.bridgeConfirmationWindowTooSmall( config.getName(), config.getForwardingAddress(), windowSize, maxBytes); } } } serverLocator.setIdentity("Bridge " + config.getName()); serverLocator.setConfirmationWindowSize(config.getConfirmationWindowSize()); // We are going to manually retry on the bridge in case of failure serverLocator.setReconnectAttempts(0); serverLocator.setInitialConnectAttempts(0); serverLocator.setRetryInterval(config.getRetryInterval()); serverLocator.setMaxRetryInterval(config.getMaxRetryInterval()); serverLocator.setRetryIntervalMultiplier(config.getRetryIntervalMultiplier()); serverLocator.setClientFailureCheckPeriod(config.getClientFailureCheckPeriod()); serverLocator.setConnectionTTL(config.getConnectionTTL()); serverLocator.setBlockOnDurableSend(!config.isUseDuplicateDetection()); serverLocator.setBlockOnNonDurableSend(!config.isUseDuplicateDetection()); serverLocator.setMinLargeMessageSize(config.getMinLargeMessageSize()); // disable flow control serverLocator.setProducerWindowSize(-1); // This will be set to 30s unless it's changed from embedded / testing // there is no reason to exception the config for this timeout // since the Bridge is supposed to be non-blocking and fast // We may expose this if we find a good use case serverLocator.setCallTimeout(config.getCallTimeout()); if (!config.isUseDuplicateDetection()) { HornetQServerLogger.LOGGER.debug( "Bridge " + config.getName() + " is configured to not use duplicate detecion, it will send messages synchronously"); } clusterLocators.add(serverLocator); Bridge bridge = new BridgeImpl( serverLocator, config.getReconnectAttempts(), config.getReconnectAttemptsOnSameNode(), config.getRetryInterval(), config.getRetryIntervalMultiplier(), config.getMaxRetryInterval(), nodeManager.getUUID(), new SimpleString(config.getName()), queue, executorFactory.getExecutor(), FilterImpl.createFilter(config.getFilterString()), SimpleString.toSimpleString(config.getForwardingAddress()), scheduledExecutor, transformer, config.isUseDuplicateDetection(), config.getUser(), config.getPassword(), !backup, server.getStorageManager()); bridges.put(config.getName(), bridge); managementService.registerBridge(bridge, config); bridge.start(); }
public String getNodeId() { return nodeManager.getNodeId().toString(); }
private void createNewRecord( final long eventUID, final String targetNodeID, final TransportConfiguration connector, final SimpleString queueName, final Queue queue, final boolean start) throws Exception { String nodeId; synchronized (this) { if (!started) { return; } if (serverLocator == null) { return; } nodeId = serverLocator.getNodeID(); } final ServerLocatorInternal targetLocator = new ServerLocatorImpl(topology, true, connector); targetLocator.setReconnectAttempts(0); targetLocator.setInitialConnectAttempts(0); targetLocator.setClientFailureCheckPeriod(clientFailureCheckPeriod); targetLocator.setConnectionTTL(connectionTTL); targetLocator.setInitialConnectAttempts(0); targetLocator.setConfirmationWindowSize(confirmationWindowSize); targetLocator.setBlockOnDurableSend(!useDuplicateDetection); targetLocator.setBlockOnNonDurableSend(!useDuplicateDetection); targetLocator.setRetryInterval(retryInterval); targetLocator.setMaxRetryInterval(maxRetryInterval); targetLocator.setRetryIntervalMultiplier(retryIntervalMultiplier); targetLocator.setMinLargeMessageSize(minLargeMessageSize); // No producer flow control on the bridges, as we don't want to lock the queues targetLocator.setProducerWindowSize(-1); targetLocator.setAfterConnectionInternalListener(this); targetLocator.setNodeID(nodeId); targetLocator.setClusterTransportConfiguration( serverLocator.getClusterTransportConfiguration()); if (retryInterval > 0) { targetLocator.setRetryInterval(retryInterval); } targetLocator.disableFinalizeCheck(); targetLocator.addIncomingInterceptor( new IncomingInterceptorLookingForExceptionMessage(manager, executorFactory.getExecutor())); MessageFlowRecordImpl record = new MessageFlowRecordImpl( targetLocator, eventUID, targetNodeID, connector, queueName, queue); ClusterConnectionBridge bridge = new ClusterConnectionBridge( this, manager, targetLocator, serverLocator, reconnectAttempts, retryInterval, retryIntervalMultiplier, maxRetryInterval, nodeManager.getUUID(), record.getEventUID(), record.getTargetNodeID(), record.getQueueName(), record.getQueue(), executorFactory.getExecutor(), null, null, scheduledExecutor, null, useDuplicateDetection, clusterUser, clusterPassword, server.getStorageManager(), managementService.getManagementAddress(), managementService.getManagementNotificationAddress(), record, record.getConnector()); targetLocator.setIdentity( "(Cluster-connection-bridge::" + bridge.toString() + "::" + this.toString() + ")"); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "creating record between " + this.connector + " and " + connector + bridge); } record.setBridge(bridge); records.put(targetNodeID, record); if (start) { bridge.start(); } }
@Override public void nodeUP(final TopologyMember topologyMember, final boolean last) { if (stopping) { return; } final String nodeID = topologyMember.getNodeId(); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { String ClusterTestBase = "receiving nodeUP for nodeID="; HornetQServerLogger.LOGGER.debug( this + ClusterTestBase + nodeID + " connectionPair=" + topologyMember); } // discard notifications about ourselves unless its from our backup if (nodeID.equals(nodeManager.getNodeId().toString())) { if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( this + "::informing about backup to itself, nodeUUID=" + nodeManager.getNodeId() + ", connectorPair=" + topologyMember + ", this = " + this); } return; } // if the node is more than 1 hop away, we do not create a bridge for direct cluster connection if (allowDirectConnectionsOnly && !allowableConnections.contains(topologyMember.getLive())) { return; } // FIXME required to prevent cluster connections w/o discovery group // and empty static connectors to create bridges... ulgy! if (serverLocator == null) { return; } /*we don't create bridges to backups*/ if (topologyMember.getLive() == null) { if (isTrace) { HornetQServerLogger.LOGGER.trace( this + " ignoring call with nodeID=" + nodeID + ", topologyMember=" + topologyMember + ", last=" + last); } return; } synchronized (recordsGuard) { try { MessageFlowRecord record = records.get(nodeID); if (record == null) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + "::Creating record for nodeID=" + nodeID + ", topologyMember=" + topologyMember); } // New node - create a new flow record final SimpleString queueName = new SimpleString("sf." + name + "." + nodeID); Binding queueBinding = postOffice.getBinding(queueName); Queue queue; if (queueBinding != null) { queue = (Queue) queueBinding.getBindable(); } else { // Add binding in storage so the queue will get reloaded on startup and we can find it - // it's never // actually routed to at that address though queue = server.createQueue(queueName, queueName, null, true, false); } createNewRecord( topologyMember.getUniqueEventID(), nodeID, topologyMember.getLive(), queueName, queue, true); } else { if (isTrace) { HornetQServerLogger.LOGGER.trace( this + " ignored nodeUp record for " + topologyMember + " on nodeID=" + nodeID + " as the record already existed"); } } } catch (Exception e) { HornetQServerLogger.LOGGER.errorUpdatingTopology(e); } } }
private synchronized void activate() throws Exception { if (!started) { return; } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Activating cluster connection nodeID=" + nodeManager.getNodeId() + " for server=" + this.server); } liveNotifier = new LiveNotifier(); liveNotifier.updateAsLive(); liveNotifier.schedule(); serverLocator = clusterConnector.createServerLocator(); if (serverLocator != null) { if (!useDuplicateDetection) { HornetQServerLogger.LOGGER.debug( "DuplicateDetection is disabled, sending clustered messages blocked"); } final TopologyMember currentMember = topology.getMember(manager.getNodeId()); if (currentMember == null) { // sanity check only throw new IllegalStateException( "InternalError! The ClusterConnection doesn't know about its own node = " + this); } serverLocator.setNodeID(nodeManager.getNodeId().toString()); serverLocator.setIdentity("(main-ClusterConnection::" + server.toString() + ")"); serverLocator.setReconnectAttempts(0); serverLocator.setClusterConnection(true); serverLocator.setClusterTransportConfiguration(connector); serverLocator.setInitialConnectAttempts(-1); serverLocator.setClientFailureCheckPeriod(clientFailureCheckPeriod); serverLocator.setConnectionTTL(connectionTTL); serverLocator.setConfirmationWindowSize(confirmationWindowSize); // if not using duplicate detection, we will send blocked serverLocator.setBlockOnDurableSend(!useDuplicateDetection); serverLocator.setBlockOnNonDurableSend(!useDuplicateDetection); serverLocator.setCallTimeout(callTimeout); serverLocator.setCallFailoverTimeout(callFailoverTimeout); // No producer flow control on the bridges, as we don't want to lock the queues serverLocator.setProducerWindowSize(-1); if (retryInterval > 0) { this.serverLocator.setRetryInterval(retryInterval); } addClusterTopologyListener(this); serverLocator.setAfterConnectionInternalListener(this); serverLocator.start(server.getExecutorFactory().getExecutor()); } if (managementService != null) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(new SimpleString("name"), name); Notification notification = new Notification( nodeManager.getNodeId().toString(), NotificationType.CLUSTER_CONNECTION_STARTED, props); HornetQServerLogger.LOGGER.debug("sending notification: " + notification); managementService.sendNotification(notification); } }
public void setLiveID(String liveID) { targetServerID = liveID; nodeManager.setNodeID(liveID); // now we are replicating we can start waiting for disconnect notifications so we can fail over sessionFactory.addFailureListener(this); }