/** * This is directly called when the connection to the node is gone, or when the node sends a * disconnection. Look for callers of this method! */ public void notifyNodeDown(final long eventTime, final String nodeID) { if (!ha) { // there's no topology here return; } if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug( "nodeDown " + this + " nodeID=" + nodeID + " as being down", new Exception("trace")); } topology.removeMember(eventTime, nodeID); if (clusterConnection) { updateArraysAndPairs(); } else { synchronized (this) { if (topology.isEmpty()) { // Resetting the topology to its original condition as it was brand new receivedTopology = false; topologyArray = null; } else { updateArraysAndPairs(); if (topology.nodes() == 1 && topology.getMember(this.nodeID) != null) { // Resetting the topology to its original condition as it was brand new receivedTopology = false; } } } } }
public synchronized void xaPrepare(final Xid xid) throws Exception { if (tx != null && tx.getXid().equals(xid)) { final String msg = "Cannot commit, session is currently doing work in a transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { Transaction theTx = resourceManager.getTransaction(xid); if (isTrace) { HornetQLogger.LOGGER.trace("xaprepare into " + ", xid=" + xid + ", tx= " + tx); } if (theTx == null) { final String msg = "Cannot find xid in resource manager: " + xid; throw new HornetQXAException(XAException.XAER_NOTA, msg); } else { if (theTx.getState() == Transaction.State.SUSPENDED) { throw new HornetQXAException( XAException.XAER_PROTO, "Cannot prepare transaction, it is suspended " + xid); } else if (theTx.getState() == Transaction.State.PREPARED) { HornetQLogger.LOGGER.info("ignoring prepare on xid as already called :" + xid); } else { theTx.prepare(); } } } }
private synchronized void doClose(final boolean failed) throws Exception { if (tx != null && tx.getXid() == null) { // We only rollback local txs on close, not XA tx branches try { rollback(failed, false); } catch (Exception e) { HornetQLogger.LOGGER.warn(e.getMessage(), e); } } Set<ServerConsumer> consumersClone = new HashSet<ServerConsumer>(consumers.values()); for (ServerConsumer consumer : consumersClone) { consumer.close(failed); } consumers.clear(); server.removeSession(name); if (currentLargeMessage != null) { try { currentLargeMessage.deleteFile(); } catch (Throwable error) { HornetQLogger.LOGGER.errorDeletingLargeMessageFile(error); } } remotingConnection.removeFailureListener(this); callback.closed(); }
public ClientSessionFactory createSessionFactory(String nodeID) throws Exception { TopologyMember topologyMember = topology.getMember(nodeID); if (HornetQLogger.LOGGER.isTraceEnabled()) { HornetQLogger.LOGGER.trace( "Creating connection factory towards " + nodeID + " = " + topologyMember + ", topology=" + topology.describe()); } if (topologyMember == null) { return null; } else if (topologyMember.getA() != null) { ClientSessionFactoryInternal factory = (ClientSessionFactoryInternal) createSessionFactory(topologyMember.getA()); if (topologyMember.getB() != null) { factory.setBackupConnector(topologyMember.getA(), topologyMember.getB()); } return factory; } else if (topologyMember.getA() == null && topologyMember.getB() != null) { // This shouldn't happen, however I wanted this to consider all possible cases ClientSessionFactoryInternal factory = (ClientSessionFactoryInternal) createSessionFactory(topologyMember.getB()); return factory; } else { // it shouldn't happen return null; } }
public synchronized void xaRollback(final Xid xid) throws Exception { if (tx != null && tx.getXid().equals(xid)) { final String msg = "Cannot roll back, session is currently doing work in a transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { Transaction theTx = resourceManager.removeTransaction(xid); if (isTrace) { HornetQLogger.LOGGER.trace("xarollback into " + theTx); } if (theTx == null) { // checked heuristic committed transactions if (resourceManager.getHeuristicCommittedTransactions().contains(xid)) { throw new HornetQXAException( XAException.XA_HEURCOM, "transaction has ben heuristically committed: " + xid); } // checked heuristic rolled back transactions else if (resourceManager.getHeuristicRolledbackTransactions().contains(xid)) { throw new HornetQXAException( XAException.XA_HEURRB, "transaction has ben heuristically rolled back: " + xid); } else { if (isTrace) { HornetQLogger.LOGGER.trace( "xarollback into " + theTx + ", xid=" + xid + " forcing a rollback regular"); } try { // jbpapp-8845 // This could have happened because the TX timed out, // at this point we would be better on rolling back this session as a way to prevent // consumers from holding their messages this.rollback(false); } catch (Exception e) { HornetQLogger.LOGGER.warn(e.getMessage(), e); } throw new HornetQXAException( XAException.XAER_NOTA, "Cannot find xid in resource manager: " + xid); } } else { if (theTx.getState() == Transaction.State.SUSPENDED) { if (isTrace) { HornetQLogger.LOGGER.trace( "xarollback into " + theTx + " sending tx back as it was suspended"); } // Put it back resourceManager.putTransaction(xid, tx); throw new HornetQXAException( XAException.XAER_PROTO, "Cannot rollback transaction, it is suspended " + xid); } else { doRollback(false, false, theTx); } } } }
public void connectionFailed(final HornetQException me, boolean failedOver) { try { HornetQLogger.LOGGER.clientConnectionFailed(name); close(true); HornetQLogger.LOGGER.clientConnectionFailedClearingSession(name); } catch (Throwable t) { HornetQLogger.LOGGER.errorClosingConnection(this); } }
private void run() { try { if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug("deleting temporary queue " + bindingName); } if (postOffice.getBinding(bindingName) != null) { postOffice.removeBinding(bindingName); } queue.deleteAllReferences(); } catch (Exception e) { HornetQLogger.LOGGER.errorRemovingTempQueue(e, bindingName); } }
public void waitContextCompletion() { OperationContext formerCtx = storageManager.getContext(); try { try { if (!storageManager.waitOnOperations(10000)) { HornetQLogger.LOGGER.errorCompletingContext(new Exception("warning")); } } catch (Exception e) { HornetQLogger.LOGGER.warn(e.getMessage(), e); } } finally { storageManager.setContext(formerCtx); } }
public void sendLarge(final MessageInternal message) throws Exception { // need to create the LargeMessage before continue long id = storageManager.generateUniqueID(); LargeServerMessage largeMsg = storageManager.createLargeMessage(id, message); if (HornetQLogger.LOGGER.isTraceEnabled()) { HornetQLogger.LOGGER.trace("sendLarge::" + largeMsg); } if (currentLargeMessage != null) { HornetQLogger.LOGGER.replacingIncompleteLargeMessage(currentLargeMessage.getMessageID()); } currentLargeMessage = largeMsg; }
public synchronized void xaCommit(final Xid xid, final boolean onePhase) throws Exception { if (tx != null && tx.getXid().equals(xid)) { final String msg = "Cannot commit, session is currently doing work in transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { Transaction theTx = resourceManager.removeTransaction(xid); if (isTrace) { HornetQLogger.LOGGER.trace("XAcommit into " + theTx + ", xid=" + xid); } if (theTx == null) { // checked heuristic committed transactions if (resourceManager.getHeuristicCommittedTransactions().contains(xid)) { throw new HornetQXAException( XAException.XA_HEURCOM, "transaction has been heuristically committed: " + xid); } // checked heuristic rolled back transactions else if (resourceManager.getHeuristicRolledbackTransactions().contains(xid)) { throw new HornetQXAException( XAException.XA_HEURRB, "transaction has been heuristically rolled back: " + xid); } else { if (isTrace) { HornetQLogger.LOGGER.trace( "XAcommit into " + theTx + ", xid=" + xid + " cannot find it"); } throw new HornetQXAException( XAException.XAER_NOTA, "Cannot find xid in resource manager: " + xid); } } else { if (theTx.getState() == Transaction.State.SUSPENDED) { // Put it back resourceManager.putTransaction(xid, theTx); throw new HornetQXAException( XAException.XAER_PROTO, "Cannot commit transaction, it is suspended " + xid); } else { theTx.commit(onePhase); } } } }
public void notifyNodeUp( long uniqueEventID, final String nodeID, final Pair<TransportConfiguration, TransportConfiguration> connectorPair, final boolean last) { if (!ha) { // there's no topology return; } if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug( "NodeUp " + this + "::nodeID=" + nodeID + ", connectorPair=" + connectorPair, new Exception("trace")); } TopologyMember member = new TopologyMember(connectorPair.getA(), connectorPair.getB()); topology.updateMember(uniqueEventID, nodeID, member); TopologyMember actMember = topology.getMember(nodeID); if (actMember != null && actMember.getConnector().getA() != null && actMember.getConnector().getB() != null) { HashSet<ClientSessionFactory> clonedFactories = new HashSet<ClientSessionFactory>(); synchronized (factories) { clonedFactories.addAll(factories); } for (ClientSessionFactory factory : clonedFactories) { ((ClientSessionFactoryInternal) factory) .setBackupConnector(actMember.getConnector().getA(), actMember.getConnector().getB()); } } updateArraysAndPairs(); if (last) { synchronized (this) { receivedTopology = true; // Notify if waiting on getting topology notifyAll(); } } }
public void closeConsumer(final long consumerID) throws Exception { final ServerConsumer consumer = consumers.get(consumerID); if (consumer != null) { consumer.close(false); } else { HornetQLogger.LOGGER.cannotFindConsumer(consumerID); } }
public synchronized void commit() throws Exception { if (isTrace) { HornetQLogger.LOGGER.trace("Calling commit"); } try { tx.commit(); } finally { tx = newTransaction(); } }
public void run() { while (true) { try { if (thread.isInterrupted() && !started) { break; } Thread.sleep(measureInterval); } catch (InterruptedException ignore) { if (!started) { break; } } long maxMemory = runtime.maxMemory(); long totalMemory = runtime.totalMemory(); long freeMemory = runtime.freeMemory(); long availableMemory = freeMemory + maxMemory - totalMemory; double availableMemoryPercent = 100.0 * availableMemory / maxMemory; StringBuilder info = new StringBuilder(); info.append(String.format("free memory: %s%n", SizeFormatterUtil.sizeof(freeMemory))); info.append(String.format("max memory: %s%n", SizeFormatterUtil.sizeof(maxMemory))); info.append(String.format("total memory: %s%n", SizeFormatterUtil.sizeof(totalMemory))); info.append(String.format("available memory: %.2f%%%n", availableMemoryPercent)); if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug(info); } if (availableMemoryPercent <= memoryWarningThreshold) { HornetQLogger.LOGGER.memoryError(memoryWarningThreshold, info.toString()); low = true; } else { low = false; } } }
public void receiveConsumerCredits(final long consumerID, final int credits) throws Exception { ServerConsumer consumer = consumers.get(consumerID); if (consumer == null) { HornetQLogger.LOGGER.debug("There is no consumer with id " + consumerID); return; } consumer.receiveCredits(credits); }
public ClientSessionFactory tryConnect() throws HornetQException { if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug(this + "::Trying to connect to " + factory); } try { ClientSessionFactoryInternal factoryToUse = factory; if (factoryToUse != null) { addToConnecting(factoryToUse); try { factoryToUse.connect(1, false); } finally { removeFromConnecting(factoryToUse); } } return factoryToUse; } catch (HornetQException e) { HornetQLogger.LOGGER.debug( this + "::Exception on establish connector initial connection", e); return null; } }
@Override protected void finalize() throws Throwable { if (!isClosed() && finalizeCheck) { HornetQLogger.LOGGER.serverLocatorNotClosed(e, System.identityHashCode(this)); if (ServerLocatorImpl.finalizeCallback != null) { ServerLocatorImpl.finalizeCallback.run(); } close(); } super.finalize(); }
public synchronized void start() { HornetQLogger.LOGGER.debug( "Starting MemoryManager with MEASURE_INTERVAL: " + measureInterval + " FREE_MEMORY_PERCENT: " + memoryWarningThreshold); if (started) { // Already started return; } started = true; thread = new Thread(new MemoryRunnable(), "hornetq-memory-manager-thread"); thread.setDaemon(true); thread.start(); }
public void send(final ServerMessage message, final boolean direct) throws Exception { long id = storageManager.generateUniqueID(); SimpleString address = message.getAddress(); message.setMessageID(id); message.encodeMessageIDToBuffer(); if (defaultAddress == null && address != null) { defaultAddress = address; } if (address == null) { if (message.isDurable()) { // We need to force a re-encode when the message gets persisted or when it gets reloaded // it will have no address message.setAddress(defaultAddress); } else { // We don't want to force a re-encode when the message gets sent to the consumer message.setAddressTransient(defaultAddress); } } if (isTrace) { HornetQLogger.LOGGER.trace( "send(message=" + message + ", direct=" + direct + ") being called"); } if (message.getAddress() == null) { // This could happen with some tests that are ignoring messages throw HornetQMessageBundle.BUNDLE.noAddress(); } if (message.getAddress().equals(managementAddress)) { // It's a management message handleManagementMessage(message, direct); } else { doSend(message, direct); } }
public Response propose(final Proposal proposal) throws Exception { // sanity check in case it is already selected Response response = responses.get(proposal.getGroupId()); if (response != null) { return response; } try { lock.lock(); TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(ManagementHelper.HDR_PROPOSAL_GROUP_ID, proposal.getGroupId()); props.putSimpleStringProperty(ManagementHelper.HDR_PROPOSAL_VALUE, proposal.getClusterName()); props.putIntProperty(ManagementHelper.HDR_BINDING_TYPE, BindingType.LOCAL_QUEUE_INDEX); props.putSimpleStringProperty(ManagementHelper.HDR_ADDRESS, address); props.putIntProperty(ManagementHelper.HDR_DISTANCE, 0); Notification notification = new Notification(null, NotificationType.PROPOSAL, props); managementService.sendNotification(notification); if (!sendCondition.await(timeout, TimeUnit.MILLISECONDS)) HornetQLogger.LOGGER.groupHandlerSendTimeout(); response = responses.get(proposal.getGroupId()); } finally { lock.unlock(); } if (response == null) { throw new IllegalStateException( "no response received from group handler for " + proposal.getGroupId()); } return response; }
public synchronized void xaStart(final Xid xid) throws Exception { if (tx != null) { final String msg = "Cannot start, session is already doing work in a transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { tx = newTransaction(xid); if (isTrace) { HornetQLogger.LOGGER.trace("xastart into tx= " + tx); } boolean added = resourceManager.putTransaction(xid, tx); if (!added) { final String msg = "Cannot start, there is already a xid " + tx.getXid(); throw new HornetQXAException(XAException.XAER_DUPID, msg); } } }
public synchronized void xaSuspend() throws Exception { if (isTrace) { HornetQLogger.LOGGER.trace("xasuspend on " + this.tx); } if (tx == null) { final String msg = "Cannot suspend, session is not doing work in a transaction "; throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { if (tx.getState() == Transaction.State.SUSPENDED) { final String msg = "Cannot suspend, transaction is already suspended " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { tx.suspend(); tx = null; } } }
/** * A CoreProtocolManager * * @author Tim Fox */ class CoreProtocolManager implements ProtocolManager { private static final boolean isTrace = HornetQLogger.LOGGER.isTraceEnabled(); private final HornetQServer server; private final List<Interceptor> interceptors; CoreProtocolManager(final HornetQServer server, final List<Interceptor> interceptors) { this.server = server; this.interceptors = interceptors; } public ConnectionEntry createConnectionEntry( final Acceptor acceptorUsed, final Connection connection) { final Configuration config = server.getConfiguration(); Executor connectionExecutor = server.getExecutorFactory().getExecutor(); final CoreRemotingConnection rc = new RemotingConnectionImpl( connection, interceptors, config.isAsyncConnectionExecutionEnabled() ? connectionExecutor : null, server.getNodeID()); Channel channel1 = rc.getChannel(CHANNEL_ID.SESSION.id, -1); ChannelHandler handler = new HornetQPacketHandler(this, server, channel1, rc); channel1.setHandler(handler); long ttl = HornetQClient.DEFAULT_CONNECTION_TTL; if (config.getConnectionTTLOverride() != -1) { ttl = config.getConnectionTTLOverride(); } final ConnectionEntry entry = new ConnectionEntry(rc, connectionExecutor, System.currentTimeMillis(), ttl); final Channel channel0 = rc.getChannel(0, -1); channel0.setHandler(new LocalChannelHandler(config, entry, channel0, acceptorUsed, rc)); return entry; } private final Map<String, ServerSessionPacketHandler> sessionHandlers = new ConcurrentHashMap<String, ServerSessionPacketHandler>(); ServerSessionPacketHandler getSessionHandler(final String sessionName) { return sessionHandlers.get(sessionName); } void addSessionHandler(final String name, final ServerSessionPacketHandler handler) { sessionHandlers.put(name, handler); } public void removeHandler(final String name) { sessionHandlers.remove(name); } public void handleBuffer(RemotingConnection connection, HornetQBuffer buffer) {} // This is never called using the core protocol, since we override the HornetQFrameDecoder with // our core // optimised version HornetQFrameDecoder2, which never calls this public int isReadyToHandle(HornetQBuffer buffer) { return -1; } @Override public String toString() { return "CoreProtocolManager(server=" + server + ")"; } private class LocalChannelHandler implements ChannelHandler { private final Configuration config; private final ConnectionEntry entry; private final Channel channel0; private final Acceptor acceptorUsed; private final CoreRemotingConnection rc; public LocalChannelHandler( final Configuration config, final ConnectionEntry entry, final Channel channel0, final Acceptor acceptorUsed, final CoreRemotingConnection rc) { this.config = config; this.entry = entry; this.channel0 = channel0; this.acceptorUsed = acceptorUsed; this.rc = rc; } public void handlePacket(final Packet packet) { if (packet.getType() == PacketImpl.PING) { Ping ping = (Ping) packet; if (config.getConnectionTTLOverride() == -1) { // Allow clients to specify connection ttl entry.ttl = ping.getConnectionTTL(); } // Just send a ping back channel0.send(packet); } else if (packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY || packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY_V2) { SubscribeClusterTopologyUpdatesMessage msg = (SubscribeClusterTopologyUpdatesMessage) packet; if (packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY_V2) { channel0 .getConnection() .setClientVersion( ((SubscribeClusterTopologyUpdatesMessageV2) msg).getClientVersion()); } final ClusterTopologyListener listener = new ClusterTopologyListener() { public void nodeUP( final long uniqueEventID, final String nodeID, final Pair<TransportConfiguration, TransportConfiguration> connectorPair, final boolean last) { // Using an executor as most of the notifications on the Topology // may come from a channel itself // What could cause deadlocks entry.connectionExecutor.execute( new Runnable() { public void run() { if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) { channel0.send( new ClusterTopologyChangeMessage_V2( uniqueEventID, nodeID, connectorPair, last)); } else { channel0.send( new ClusterTopologyChangeMessage(nodeID, connectorPair, last)); } } }); } public void nodeDown(final long uniqueEventID, final String nodeID) { // Using an executor as most of the notifications on the Topology // may come from a channel itself // What could cause deadlocks entry.connectionExecutor.execute( new Runnable() { public void run() { if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) { channel0.send(new ClusterTopologyChangeMessage_V2(uniqueEventID, nodeID)); } else { channel0.send(new ClusterTopologyChangeMessage(nodeID)); } } }); } @Override public String toString() { return "Remote Proxy on channel " + Integer.toHexString(System.identityHashCode(this)); } }; if (acceptorUsed.getClusterConnection() != null) { acceptorUsed.getClusterConnection().addClusterTopologyListener(listener); rc.addCloseListener( new CloseListener() { public void connectionClosed() { acceptorUsed.getClusterConnection().removeClusterTopologyListener(listener); } }); } else { // if not clustered, we send a single notification to the client containing the node-id // where the server is connected to // This is done this way so Recovery discovery could also use the node-id for // non-clustered setups entry.connectionExecutor.execute( new Runnable() { public void run() { String nodeId = server.getNodeID().toString(); Pair<TransportConfiguration, TransportConfiguration> emptyConfig = new Pair<TransportConfiguration, TransportConfiguration>(null, null); if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) { channel0.send( new ClusterTopologyChangeMessage_V2( System.currentTimeMillis(), nodeId, emptyConfig, true)); } else { channel0.send(new ClusterTopologyChangeMessage(nodeId, emptyConfig, true)); } } }); } } else if (packet.getType() == PacketImpl.NODE_ANNOUNCE) { NodeAnnounceMessage msg = (NodeAnnounceMessage) packet; Pair<TransportConfiguration, TransportConfiguration> pair; if (msg.isBackup()) { pair = new Pair<TransportConfiguration, TransportConfiguration>(null, msg.getConnector()); } else { pair = new Pair<TransportConfiguration, TransportConfiguration>( msg.getConnector(), msg.getBackupConnector()); } if (isTrace) { HornetQLogger.LOGGER.trace( "Server " + server + " receiving nodeUp from NodeID=" + msg.getNodeID() + ", pair=" + pair); } if (acceptorUsed != null) { ClusterConnection clusterConn = acceptorUsed.getClusterConnection(); if (clusterConn != null) { clusterConn.nodeAnnounced( msg.getCurrentEventID(), msg.getNodeID(), pair, msg.isBackup()); } else { HornetQLogger.LOGGER.debug("Cluster connection is null on acceptor = " + acceptorUsed); } } else { HornetQLogger.LOGGER.debug( "there is no acceptor used configured at the CoreProtocolManager " + this); } } else if (packet.getType() == PacketImpl.BACKUP_REGISTRATION) { BackupRegistrationMessage msg = (BackupRegistrationMessage) packet; ClusterConnection clusterConnection = acceptorUsed.getClusterConnection(); if (clusterConnection.verify(msg.getClusterUser(), msg.getClusterPassword())) { try { server.startReplication( rc, clusterConnection, getPair(msg.getConnector(), true), msg.isFailBackRequest()); } catch (HornetQException e) { channel0.send(new BackupRegistrationFailedMessage(e)); } } else { channel0.send(new BackupRegistrationFailedMessage(null)); } } } private Pair<TransportConfiguration, TransportConfiguration> getPair( TransportConfiguration conn, boolean isBackup) { if (isBackup) { return new Pair<TransportConfiguration, TransportConfiguration>(null, conn); } return new Pair<TransportConfiguration, TransportConfiguration>(conn, null); } } }
public void handlePacket(final Packet packet) { if (packet.getType() == PacketImpl.PING) { Ping ping = (Ping) packet; if (config.getConnectionTTLOverride() == -1) { // Allow clients to specify connection ttl entry.ttl = ping.getConnectionTTL(); } // Just send a ping back channel0.send(packet); } else if (packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY || packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY_V2) { SubscribeClusterTopologyUpdatesMessage msg = (SubscribeClusterTopologyUpdatesMessage) packet; if (packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY_V2) { channel0 .getConnection() .setClientVersion( ((SubscribeClusterTopologyUpdatesMessageV2) msg).getClientVersion()); } final ClusterTopologyListener listener = new ClusterTopologyListener() { public void nodeUP( final long uniqueEventID, final String nodeID, final Pair<TransportConfiguration, TransportConfiguration> connectorPair, final boolean last) { // Using an executor as most of the notifications on the Topology // may come from a channel itself // What could cause deadlocks entry.connectionExecutor.execute( new Runnable() { public void run() { if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) { channel0.send( new ClusterTopologyChangeMessage_V2( uniqueEventID, nodeID, connectorPair, last)); } else { channel0.send( new ClusterTopologyChangeMessage(nodeID, connectorPair, last)); } } }); } public void nodeDown(final long uniqueEventID, final String nodeID) { // Using an executor as most of the notifications on the Topology // may come from a channel itself // What could cause deadlocks entry.connectionExecutor.execute( new Runnable() { public void run() { if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) { channel0.send(new ClusterTopologyChangeMessage_V2(uniqueEventID, nodeID)); } else { channel0.send(new ClusterTopologyChangeMessage(nodeID)); } } }); } @Override public String toString() { return "Remote Proxy on channel " + Integer.toHexString(System.identityHashCode(this)); } }; if (acceptorUsed.getClusterConnection() != null) { acceptorUsed.getClusterConnection().addClusterTopologyListener(listener); rc.addCloseListener( new CloseListener() { public void connectionClosed() { acceptorUsed.getClusterConnection().removeClusterTopologyListener(listener); } }); } else { // if not clustered, we send a single notification to the client containing the node-id // where the server is connected to // This is done this way so Recovery discovery could also use the node-id for // non-clustered setups entry.connectionExecutor.execute( new Runnable() { public void run() { String nodeId = server.getNodeID().toString(); Pair<TransportConfiguration, TransportConfiguration> emptyConfig = new Pair<TransportConfiguration, TransportConfiguration>(null, null); if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) { channel0.send( new ClusterTopologyChangeMessage_V2( System.currentTimeMillis(), nodeId, emptyConfig, true)); } else { channel0.send(new ClusterTopologyChangeMessage(nodeId, emptyConfig, true)); } } }); } } else if (packet.getType() == PacketImpl.NODE_ANNOUNCE) { NodeAnnounceMessage msg = (NodeAnnounceMessage) packet; Pair<TransportConfiguration, TransportConfiguration> pair; if (msg.isBackup()) { pair = new Pair<TransportConfiguration, TransportConfiguration>(null, msg.getConnector()); } else { pair = new Pair<TransportConfiguration, TransportConfiguration>( msg.getConnector(), msg.getBackupConnector()); } if (isTrace) { HornetQLogger.LOGGER.trace( "Server " + server + " receiving nodeUp from NodeID=" + msg.getNodeID() + ", pair=" + pair); } if (acceptorUsed != null) { ClusterConnection clusterConn = acceptorUsed.getClusterConnection(); if (clusterConn != null) { clusterConn.nodeAnnounced( msg.getCurrentEventID(), msg.getNodeID(), pair, msg.isBackup()); } else { HornetQLogger.LOGGER.debug("Cluster connection is null on acceptor = " + acceptorUsed); } } else { HornetQLogger.LOGGER.debug( "there is no acceptor used configured at the CoreProtocolManager " + this); } } else if (packet.getType() == PacketImpl.BACKUP_REGISTRATION) { BackupRegistrationMessage msg = (BackupRegistrationMessage) packet; ClusterConnection clusterConnection = acceptorUsed.getClusterConnection(); if (clusterConnection.verify(msg.getClusterUser(), msg.getClusterPassword())) { try { server.startReplication( rc, clusterConnection, getPair(msg.getConnector(), true), msg.isFailBackRequest()); } catch (HornetQException e) { channel0.send(new BackupRegistrationFailedMessage(e)); } } else { channel0.send(new BackupRegistrationFailedMessage(null)); } } }
private void doClose(final boolean sendClose) { if (state == STATE.CLOSED) { if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug(this + " is already closed when calling closed"); } return; } state = STATE.CLOSING; if (discoveryGroup != null) { synchronized (this) { try { discoveryGroup.stop(); } catch (Exception e) { HornetQLogger.LOGGER.failedToStopDiscovery(e); } } } else { staticConnector.disconnect(); } synchronized (connectingFactories) { for (ClientSessionFactoryInternal csf : connectingFactories) { csf.close(); } connectingFactories.clear(); } Set<ClientSessionFactoryInternal> clonedFactory; synchronized (factories) { clonedFactory = new HashSet<ClientSessionFactoryInternal>(factories); factories.clear(); } for (ClientSessionFactory factory : clonedFactory) { if (sendClose) { factory.close(); } else { factory.cleanup(); } } if (shutdownPool) { if (threadPool != null) { threadPool.shutdown(); try { if (!threadPool.awaitTermination(10000, TimeUnit.MILLISECONDS)) { HornetQLogger.LOGGER.timedOutWaitingForTermination(); } } catch (InterruptedException ignore) { } } if (scheduledThreadPool != null) { scheduledThreadPool.shutdown(); try { if (!scheduledThreadPool.awaitTermination(10000, TimeUnit.MILLISECONDS)) { HornetQLogger.LOGGER.timedOutWaitingForScheduledPoolTermination(); } } catch (InterruptedException ignore) { } } } readOnly = false; state = STATE.CLOSED; }
public ClientSessionFactory connect() throws HornetQException { assertOpen(); initialise(); ClientSessionFactory csf = null; createConnectors(); try { int retryNumber = 0; while (csf == null && !isClosed()) { retryNumber++; for (Connector conn : connectors) { if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug(this + "::Submitting connect towards " + conn); } csf = conn.tryConnect(); if (csf != null) { csf.getConnection() .addFailureListener( new FailureListener() { // Case the node where the cluster connection was connected is gone, we need // to restart the // connection public void connectionFailed( HornetQException exception, boolean failedOver) { if (clusterConnection && exception.getType() == HornetQExceptionType.DISCONNECTED) { try { ServerLocatorImpl.this.start(startExecutor); } catch (Exception e) { // There isn't much to be done if this happens here HornetQLogger.LOGGER.errorStartingLocator(e); } } } @Override public String toString() { return "FailureListener('restarts cluster connections')"; } }); if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug( "Returning " + csf + " after " + retryNumber + " retries on StaticConnector " + ServerLocatorImpl.this); } return csf; } } if (initialConnectAttempts >= 0 && retryNumber > initialConnectAttempts) { break; } if (!isClosed()) { Thread.sleep(retryInterval); } } } catch (RejectedExecutionException e) { HornetQLogger.LOGGER.debug("Rejected execution", e); throw e; } catch (Exception e) { HornetQLogger.LOGGER.errorConnectingToNodes(e); throw HornetQMessageBundle.BUNDLE.cannotConnectToStaticConnectors(e); } if (!isClosed()) { HornetQLogger.LOGGER.errorConnectingToNodes(e); throw HornetQMessageBundle.BUNDLE.cannotConnectToStaticConnectors2(); } return null; }
/* * Session implementation * * @author <a href="mailto:[email protected]">Tim Fox</a> * @author <a href="mailto:[email protected]">Clebert Suconic</a> * @author <a href="mailto:[email protected]">Jeff Mesnil</a> * @author <a href="mailto:[email protected]>Andy Taylor</a> */ public class ServerSessionImpl implements ServerSession, FailureListener { // Constants ----------------------------------------------------------------------------- private static final boolean isTrace = HornetQLogger.LOGGER.isTraceEnabled(); // Static ------------------------------------------------------------------------------- // Attributes ---------------------------------------------------------------------------- private final String username; private final String password; private final int minLargeMessageSize; private final boolean autoCommitSends; private final boolean autoCommitAcks; private final boolean preAcknowledge; private final boolean strictUpdateDeliveryCount; private final RemotingConnection remotingConnection; private final Map<Long, ServerConsumer> consumers = new ConcurrentHashMap<Long, ServerConsumer>(); private Transaction tx; private final boolean xa; private final StorageManager storageManager; private final ResourceManager resourceManager; public final PostOffice postOffice; private final SecurityStore securityStore; private final ManagementService managementService; private volatile boolean started = false; private final Map<SimpleString, TempQueueCleanerUpper> tempQueueCleannerUppers = new HashMap<SimpleString, TempQueueCleanerUpper>(); private final String name; private final HornetQServer server; private final SimpleString managementAddress; // The current currentLargeMessage being processed private volatile LargeServerMessage currentLargeMessage; private final RoutingContext routingContext = new RoutingContextImpl(null); private final SessionCallback callback; private volatile SimpleString defaultAddress; private volatile int timeoutSeconds; private Map<String, String> metaData; private OperationContext sessionContext; // Session's usage should be by definition single threaded, hence it's not needed to use a // concurrentHashMap here private final Map<SimpleString, Pair<UUID, AtomicLong>> targetAddressInfos = new HashMap<SimpleString, Pair<UUID, AtomicLong>>(); private final long creationTime = System.currentTimeMillis(); // Constructors --------------------------------------------------------------------------------- public ServerSessionImpl( final String name, final String username, final String password, final int minLargeMessageSize, final boolean autoCommitSends, final boolean autoCommitAcks, final boolean preAcknowledge, final boolean strictUpdateDeliveryCount, final boolean xa, final RemotingConnection remotingConnection, final StorageManager storageManager, final PostOffice postOffice, final ResourceManager resourceManager, final SecurityStore securityStore, final ManagementService managementService, final HornetQServer server, final SimpleString managementAddress, final SimpleString defaultAddress, final SessionCallback callback) throws Exception { this.username = username; this.password = password; this.minLargeMessageSize = minLargeMessageSize; this.autoCommitSends = autoCommitSends; this.autoCommitAcks = autoCommitAcks; this.preAcknowledge = preAcknowledge; this.remotingConnection = remotingConnection; this.storageManager = storageManager; this.postOffice = postOffice; this.resourceManager = resourceManager; this.securityStore = securityStore; timeoutSeconds = resourceManager.getTimeoutSeconds(); this.xa = xa; this.strictUpdateDeliveryCount = strictUpdateDeliveryCount; this.managementService = managementService; this.name = name; this.server = server; this.managementAddress = managementAddress; this.callback = callback; this.defaultAddress = defaultAddress; remotingConnection.addFailureListener(this); if (!xa) { tx = newTransaction(); } } // ServerSession implementation // ---------------------------------------------------------------------------- /** @return the sessionContext */ public OperationContext getSessionContext() { return sessionContext; } /** @param sessionContext the sessionContext to set */ public void setSessionContext(OperationContext sessionContext) { this.sessionContext = sessionContext; } public String getUsername() { return username; } public String getPassword() { return password; } public int getMinLargeMessageSize() { return minLargeMessageSize; } public String getName() { return name; } public Object getConnectionID() { return remotingConnection.getID(); } public Set<ServerConsumer> getServerConsumers() { Set<ServerConsumer> consumersClone = new HashSet<ServerConsumer>(consumers.values()); return Collections.unmodifiableSet(consumersClone); } public void removeConsumer(final long consumerID) throws Exception { if (consumers.remove(consumerID) == null) { throw new IllegalStateException("Cannot find consumer with id " + consumerID + " to remove"); } } private synchronized void doClose(final boolean failed) throws Exception { if (tx != null && tx.getXid() == null) { // We only rollback local txs on close, not XA tx branches try { rollback(failed, false); } catch (Exception e) { HornetQLogger.LOGGER.warn(e.getMessage(), e); } } Set<ServerConsumer> consumersClone = new HashSet<ServerConsumer>(consumers.values()); for (ServerConsumer consumer : consumersClone) { consumer.close(failed); } consumers.clear(); server.removeSession(name); if (currentLargeMessage != null) { try { currentLargeMessage.deleteFile(); } catch (Throwable error) { HornetQLogger.LOGGER.errorDeletingLargeMessageFile(error); } } remotingConnection.removeFailureListener(this); callback.closed(); } public void createConsumer( final long consumerID, final SimpleString queueName, final SimpleString filterString, final boolean browseOnly) throws Exception { Binding binding = postOffice.getBinding(queueName); if (binding == null || binding.getType() != BindingType.LOCAL_QUEUE) { throw HornetQMessageBundle.BUNDLE.noSuchQueue(queueName); } securityStore.check(binding.getAddress(), CheckType.CONSUME, this); Filter filter = FilterImpl.createFilter(filterString); ServerConsumer consumer = new ServerConsumerImpl( consumerID, this, (QueueBinding) binding, filter, started, browseOnly, storageManager, callback, preAcknowledge, strictUpdateDeliveryCount, managementService); consumers.put(consumer.getID(), consumer); if (!browseOnly) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(ManagementHelper.HDR_ADDRESS, binding.getAddress()); props.putSimpleStringProperty(ManagementHelper.HDR_CLUSTER_NAME, binding.getClusterName()); props.putSimpleStringProperty(ManagementHelper.HDR_ROUTING_NAME, binding.getRoutingName()); props.putIntProperty(ManagementHelper.HDR_DISTANCE, binding.getDistance()); Queue theQueue = (Queue) binding.getBindable(); props.putIntProperty(ManagementHelper.HDR_CONSUMER_COUNT, theQueue.getConsumerCount()); if (filterString != null) { props.putSimpleStringProperty(ManagementHelper.HDR_FILTERSTRING, filterString); } Notification notification = new Notification(null, CONSUMER_CREATED, props); managementService.sendNotification(notification); } } public void createQueue( final SimpleString address, final SimpleString name, final SimpleString filterString, final boolean temporary, final boolean durable) throws Exception { if (durable) { // make sure the user has privileges to create this queue securityStore.check(address, CheckType.CREATE_DURABLE_QUEUE, this); } else { securityStore.check(address, CheckType.CREATE_NON_DURABLE_QUEUE, this); } Queue queue = server.createQueue(address, name, filterString, durable, temporary); if (temporary) { // Temporary queue in core simply means the queue will be deleted if // the remoting connection // dies. It does not mean it will get deleted automatically when the // session is closed. // It is up to the user to delete the queue when finished with it TempQueueCleanerUpper cleaner = new TempQueueCleanerUpper(postOffice, name, queue); remotingConnection.addCloseListener(cleaner); remotingConnection.addFailureListener(cleaner); tempQueueCleannerUppers.put(name, cleaner); } } /** * For test cases only * * @return RemotingConnection */ public RemotingConnection getRemotingConnection() { return remotingConnection; } private static class TempQueueCleanerUpper implements CloseListener, FailureListener { private final PostOffice postOffice; private final SimpleString bindingName; private final Queue queue; TempQueueCleanerUpper( final PostOffice postOffice, final SimpleString bindingName, final Queue queue) { this.postOffice = postOffice; this.bindingName = bindingName; this.queue = queue; } private void run() { try { if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug("deleting temporary queue " + bindingName); } if (postOffice.getBinding(bindingName) != null) { postOffice.removeBinding(bindingName); } queue.deleteAllReferences(); } catch (Exception e) { HornetQLogger.LOGGER.errorRemovingTempQueue(e, bindingName); } } public void connectionFailed(HornetQException exception, boolean failedOver) { run(); } public void connectionClosed() { run(); } @Override public String toString() { return "Temporary Cleaner for queue " + bindingName; } } public void deleteQueue(final SimpleString name) throws Exception { Binding binding = postOffice.getBinding(name); if (binding == null || binding.getType() != BindingType.LOCAL_QUEUE) { throw new NonExistentQueueException(); } server.destroyQueue(name, this); TempQueueCleanerUpper cleaner = this.tempQueueCleannerUppers.remove(name); if (cleaner != null) { remotingConnection.removeCloseListener(cleaner); remotingConnection.removeFailureListener(cleaner); } } public QueueQueryResult executeQueueQuery(final SimpleString name) throws Exception { if (name == null) { throw HornetQMessageBundle.BUNDLE.queueNameIsNull(); } QueueQueryResult response; Binding binding = postOffice.getBinding(name); if (binding != null && binding.getType() == BindingType.LOCAL_QUEUE) { Queue queue = (Queue) binding.getBindable(); Filter filter = queue.getFilter(); SimpleString filterString = filter == null ? null : filter.getFilterString(); response = new QueueQueryResult( name, binding.getAddress(), queue.isDurable(), queue.isTemporary(), filterString, queue.getConsumerCount(), queue.getMessageCount()); } // make an exception for the management address (see HORNETQ-29) else if (name.equals(managementAddress)) { response = new QueueQueryResult(name, managementAddress, true, false, null, -1, -1); } else { response = new QueueQueryResult(); } return response; } public BindingQueryResult executeBindingQuery(final SimpleString address) throws Exception { if (address == null) { throw HornetQMessageBundle.BUNDLE.addressIsNull(); } List<SimpleString> names = new ArrayList<SimpleString>(); // make an exception for the management address (see HORNETQ-29) if (address.equals(managementAddress)) { return new BindingQueryResult(true, names); } Bindings bindings = postOffice.getMatchingBindings(address); for (Binding binding : bindings.getBindings()) { if (binding.getType() == BindingType.LOCAL_QUEUE || binding.getType() == BindingType.REMOTE_QUEUE) { names.add(binding.getUniqueName()); } } return new BindingQueryResult(!names.isEmpty(), names); } public void forceConsumerDelivery(final long consumerID, final long sequence) throws Exception { ServerConsumer consumer = consumers.get(consumerID); // this would be possible if the server consumer was closed by pings/pongs.. etc if (consumer != null) { consumer.forceDelivery(sequence); } } public void acknowledge(final long consumerID, final long messageID) throws Exception { ServerConsumer consumer = consumers.get(consumerID); if (consumer == null) { throw HornetQMessageBundle.BUNDLE.consumerDoesntExist(consumerID); } if (tx != null && tx.getState() == State.ROLLEDBACK) { // JBPAPP-8845 - if we let stuff to be acked on a rolled back TX, we will just // have these messages to be stuck on the limbo until the server is restarted // The tx has already timed out, so we need to ack and rollback immediately Transaction newTX = newTransaction(); consumer.acknowledge(autoCommitAcks, newTX, messageID); newTX.rollback(); } else { consumer.acknowledge(autoCommitAcks, tx, messageID); } } public void individualAcknowledge(final long consumerID, final long messageID) throws Exception { ServerConsumer consumer = consumers.get(consumerID); if (this.xa && tx == null) { throw new HornetQXAException(XAException.XAER_PROTO, "Invalid transaction state"); } if (tx != null && tx.getState() == State.ROLLEDBACK) { // JBPAPP-8845 - if we let stuff to be acked on a rolled back TX, we will just // have these messages to be stuck on the limbo until the server is restarted // The tx has already timed out, so we need to ack and rollback immediately Transaction newTX = newTransaction(); consumer.individualAcknowledge(autoCommitAcks, tx, messageID); newTX.rollback(); } else { consumer.individualAcknowledge(autoCommitAcks, tx, messageID); } } public void expire(final long consumerID, final long messageID) throws Exception { MessageReference ref = consumers.get(consumerID).removeReferenceByID(messageID); if (ref != null) { ref.getQueue().expire(ref); } } public synchronized void commit() throws Exception { if (isTrace) { HornetQLogger.LOGGER.trace("Calling commit"); } try { tx.commit(); } finally { tx = newTransaction(); } } public void rollback(final boolean considerLastMessageAsDelivered) throws Exception { rollback(false, considerLastMessageAsDelivered); } /** * @param clientFailed If the client has failed, we can't decrease the delivery-counts, and the * close may issue a rollback * @param considerLastMessageAsDelivered * @throws Exception */ private synchronized void rollback( final boolean clientFailed, final boolean considerLastMessageAsDelivered) throws Exception { if (tx == null) { // Might be null if XA tx = newTransaction(); } doRollback(clientFailed, considerLastMessageAsDelivered, tx); if (xa) { tx = null; } else { tx = newTransaction(); } } /** @return */ private TransactionImpl newTransaction() { TransactionImpl tx = new TransactionImpl(storageManager, timeoutSeconds); return tx; } /** * @param xid * @return */ private TransactionImpl newTransaction(final Xid xid) { TransactionImpl tx = new TransactionImpl(xid, storageManager, timeoutSeconds); return tx; } public synchronized void xaCommit(final Xid xid, final boolean onePhase) throws Exception { if (tx != null && tx.getXid().equals(xid)) { final String msg = "Cannot commit, session is currently doing work in transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { Transaction theTx = resourceManager.removeTransaction(xid); if (isTrace) { HornetQLogger.LOGGER.trace("XAcommit into " + theTx + ", xid=" + xid); } if (theTx == null) { // checked heuristic committed transactions if (resourceManager.getHeuristicCommittedTransactions().contains(xid)) { throw new HornetQXAException( XAException.XA_HEURCOM, "transaction has been heuristically committed: " + xid); } // checked heuristic rolled back transactions else if (resourceManager.getHeuristicRolledbackTransactions().contains(xid)) { throw new HornetQXAException( XAException.XA_HEURRB, "transaction has been heuristically rolled back: " + xid); } else { if (isTrace) { HornetQLogger.LOGGER.trace( "XAcommit into " + theTx + ", xid=" + xid + " cannot find it"); } throw new HornetQXAException( XAException.XAER_NOTA, "Cannot find xid in resource manager: " + xid); } } else { if (theTx.getState() == Transaction.State.SUSPENDED) { // Put it back resourceManager.putTransaction(xid, theTx); throw new HornetQXAException( XAException.XAER_PROTO, "Cannot commit transaction, it is suspended " + xid); } else { theTx.commit(onePhase); } } } } public synchronized void xaEnd(final Xid xid) throws Exception { if (tx != null && tx.getXid().equals(xid)) { if (tx.getState() == Transaction.State.SUSPENDED) { final String msg = "Cannot end, transaction is suspended"; throw new HornetQXAException(XAException.XAER_PROTO, msg); } else if (tx.getState() == Transaction.State.ROLLEDBACK) { final String msg = "Cannot end, transaction is rolled back"; tx = null; throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { tx = null; } } else { // It's also legal for the TM to call end for a Xid in the suspended // state // See JTA 1.1 spec 3.4.4 - state diagram // Although in practice TMs rarely do this. Transaction theTx = resourceManager.getTransaction(xid); if (theTx == null) { final String msg = "Cannot find suspended transaction to end " + xid; throw new HornetQXAException(XAException.XAER_NOTA, msg); } else { if (theTx.getState() != Transaction.State.SUSPENDED) { final String msg = "Transaction is not suspended " + xid; throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { theTx.resume(); } } } } public synchronized void xaForget(final Xid xid) throws Exception { long id = resourceManager.removeHeuristicCompletion(xid); if (id != -1) { try { storageManager.deleteHeuristicCompletion(id); } catch (Exception e) { e.printStackTrace(); throw new HornetQXAException(XAException.XAER_RMERR); } } else { throw new HornetQXAException(XAException.XAER_NOTA); } } public synchronized void xaJoin(final Xid xid) throws Exception { Transaction theTx = resourceManager.getTransaction(xid); if (theTx == null) { final String msg = "Cannot find xid in resource manager: " + xid; throw new HornetQXAException(XAException.XAER_NOTA, msg); } else { if (theTx.getState() == Transaction.State.SUSPENDED) { throw new HornetQXAException( XAException.XAER_PROTO, "Cannot join tx, it is suspended " + xid); } else { tx = theTx; } } } public synchronized void xaResume(final Xid xid) throws Exception { if (tx != null) { final String msg = "Cannot resume, session is currently doing work in a transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { Transaction theTx = resourceManager.getTransaction(xid); if (theTx == null) { final String msg = "Cannot find xid in resource manager: " + xid; throw new HornetQXAException(XAException.XAER_NOTA, msg); } else { if (theTx.getState() != Transaction.State.SUSPENDED) { throw new HornetQXAException( XAException.XAER_PROTO, "Cannot resume transaction, it is not suspended " + xid); } else { tx = theTx; tx.resume(); } } } } public synchronized void xaRollback(final Xid xid) throws Exception { if (tx != null && tx.getXid().equals(xid)) { final String msg = "Cannot roll back, session is currently doing work in a transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { Transaction theTx = resourceManager.removeTransaction(xid); if (isTrace) { HornetQLogger.LOGGER.trace("xarollback into " + theTx); } if (theTx == null) { // checked heuristic committed transactions if (resourceManager.getHeuristicCommittedTransactions().contains(xid)) { throw new HornetQXAException( XAException.XA_HEURCOM, "transaction has ben heuristically committed: " + xid); } // checked heuristic rolled back transactions else if (resourceManager.getHeuristicRolledbackTransactions().contains(xid)) { throw new HornetQXAException( XAException.XA_HEURRB, "transaction has ben heuristically rolled back: " + xid); } else { if (isTrace) { HornetQLogger.LOGGER.trace( "xarollback into " + theTx + ", xid=" + xid + " forcing a rollback regular"); } try { // jbpapp-8845 // This could have happened because the TX timed out, // at this point we would be better on rolling back this session as a way to prevent // consumers from holding their messages this.rollback(false); } catch (Exception e) { HornetQLogger.LOGGER.warn(e.getMessage(), e); } throw new HornetQXAException( XAException.XAER_NOTA, "Cannot find xid in resource manager: " + xid); } } else { if (theTx.getState() == Transaction.State.SUSPENDED) { if (isTrace) { HornetQLogger.LOGGER.trace( "xarollback into " + theTx + " sending tx back as it was suspended"); } // Put it back resourceManager.putTransaction(xid, tx); throw new HornetQXAException( XAException.XAER_PROTO, "Cannot rollback transaction, it is suspended " + xid); } else { doRollback(false, false, theTx); } } } } public synchronized void xaStart(final Xid xid) throws Exception { if (tx != null) { final String msg = "Cannot start, session is already doing work in a transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { tx = newTransaction(xid); if (isTrace) { HornetQLogger.LOGGER.trace("xastart into tx= " + tx); } boolean added = resourceManager.putTransaction(xid, tx); if (!added) { final String msg = "Cannot start, there is already a xid " + tx.getXid(); throw new HornetQXAException(XAException.XAER_DUPID, msg); } } } public synchronized void xaSuspend() throws Exception { if (isTrace) { HornetQLogger.LOGGER.trace("xasuspend on " + this.tx); } if (tx == null) { final String msg = "Cannot suspend, session is not doing work in a transaction "; throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { if (tx.getState() == Transaction.State.SUSPENDED) { final String msg = "Cannot suspend, transaction is already suspended " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { tx.suspend(); tx = null; } } } public synchronized void xaPrepare(final Xid xid) throws Exception { if (tx != null && tx.getXid().equals(xid)) { final String msg = "Cannot commit, session is currently doing work in a transaction " + tx.getXid(); throw new HornetQXAException(XAException.XAER_PROTO, msg); } else { Transaction theTx = resourceManager.getTransaction(xid); if (isTrace) { HornetQLogger.LOGGER.trace("xaprepare into " + ", xid=" + xid + ", tx= " + tx); } if (theTx == null) { final String msg = "Cannot find xid in resource manager: " + xid; throw new HornetQXAException(XAException.XAER_NOTA, msg); } else { if (theTx.getState() == Transaction.State.SUSPENDED) { throw new HornetQXAException( XAException.XAER_PROTO, "Cannot prepare transaction, it is suspended " + xid); } else if (theTx.getState() == Transaction.State.PREPARED) { HornetQLogger.LOGGER.info("ignoring prepare on xid as already called :" + xid); } else { theTx.prepare(); } } } } public List<Xid> xaGetInDoubtXids() { List<Xid> xids = new ArrayList<Xid>(); xids.addAll(resourceManager.getPreparedTransactions()); xids.addAll(resourceManager.getHeuristicCommittedTransactions()); xids.addAll(resourceManager.getHeuristicRolledbackTransactions()); return xids; } public int xaGetTimeout() { return resourceManager.getTimeoutSeconds(); } public void xaSetTimeout(final int timeout) { timeoutSeconds = timeout; if (tx != null) { tx.setTimeout(timeout); } } public void start() { setStarted(true); } public void stop() { setStarted(false); } public void waitContextCompletion() { OperationContext formerCtx = storageManager.getContext(); try { try { if (!storageManager.waitOnOperations(10000)) { HornetQLogger.LOGGER.errorCompletingContext(new Exception("warning")); } } catch (Exception e) { HornetQLogger.LOGGER.warn(e.getMessage(), e); } } finally { storageManager.setContext(formerCtx); } } public void close(final boolean failed) { OperationContext formerCtx = storageManager.getContext(); try { storageManager.setContext(sessionContext); storageManager.afterCompleteOperations( new IOAsyncTask() { public void onError(int errorCode, String errorMessage) {} public void done() { try { doClose(failed); } catch (Exception e) { HornetQLogger.LOGGER.errorClosingSession(e); } } }); } finally { storageManager.setContext(formerCtx); } } public void closeConsumer(final long consumerID) throws Exception { final ServerConsumer consumer = consumers.get(consumerID); if (consumer != null) { consumer.close(false); } else { HornetQLogger.LOGGER.cannotFindConsumer(consumerID); } } public void receiveConsumerCredits(final long consumerID, final int credits) throws Exception { ServerConsumer consumer = consumers.get(consumerID); if (consumer == null) { HornetQLogger.LOGGER.debug("There is no consumer with id " + consumerID); return; } consumer.receiveCredits(credits); } public void sendLarge(final MessageInternal message) throws Exception { // need to create the LargeMessage before continue long id = storageManager.generateUniqueID(); LargeServerMessage largeMsg = storageManager.createLargeMessage(id, message); if (HornetQLogger.LOGGER.isTraceEnabled()) { HornetQLogger.LOGGER.trace("sendLarge::" + largeMsg); } if (currentLargeMessage != null) { HornetQLogger.LOGGER.replacingIncompleteLargeMessage(currentLargeMessage.getMessageID()); } currentLargeMessage = largeMsg; } public void send(final ServerMessage message, final boolean direct) throws Exception { long id = storageManager.generateUniqueID(); SimpleString address = message.getAddress(); message.setMessageID(id); message.encodeMessageIDToBuffer(); if (defaultAddress == null && address != null) { defaultAddress = address; } if (address == null) { if (message.isDurable()) { // We need to force a re-encode when the message gets persisted or when it gets reloaded // it will have no address message.setAddress(defaultAddress); } else { // We don't want to force a re-encode when the message gets sent to the consumer message.setAddressTransient(defaultAddress); } } if (isTrace) { HornetQLogger.LOGGER.trace( "send(message=" + message + ", direct=" + direct + ") being called"); } if (message.getAddress() == null) { // This could happen with some tests that are ignoring messages throw HornetQMessageBundle.BUNDLE.noAddress(); } if (message.getAddress().equals(managementAddress)) { // It's a management message handleManagementMessage(message, direct); } else { doSend(message, direct); } } public void sendContinuations( final int packetSize, final long messageBodySize, final byte[] body, final boolean continues) throws Exception { if (currentLargeMessage == null) { throw HornetQMessageBundle.BUNDLE.largeMessageNotInitialised(); } // Immediately release the credits for the continuations- these don't contribute to the // in-memory size // of the message currentLargeMessage.addBytes(body); if (!continues) { currentLargeMessage.releaseResources(); if (messageBodySize >= 0) { currentLargeMessage.putLongProperty(Message.HDR_LARGE_BODY_SIZE, messageBodySize); } doSend(currentLargeMessage, false); currentLargeMessage = null; } } public void requestProducerCredits(final SimpleString address, final int credits) throws Exception { PagingStore store = postOffice.getPagingManager().getPageStore(address); store.executeRunnableWhenMemoryAvailable( new Runnable() { public void run() { callback.sendProducerCreditsMessage(credits, address); } }); } public void setTransferring(final boolean transferring) { Set<ServerConsumer> consumersClone = new HashSet<ServerConsumer>(consumers.values()); for (ServerConsumer consumer : consumersClone) { consumer.setTransferring(transferring); } } public void addMetaData(String key, String data) { if (metaData == null) { metaData = new HashMap<String, String>(); } metaData.put(key, data); } public boolean addUniqueMetaData(String key, String data) { if (server.lookupSession(key, data)) { // There is a duplication of this property return false; } else { addMetaData(key, data); return true; } } public String getMetaData(String key) { String data = null; if (metaData != null) { data = metaData.get(key); } return data; } public String[] getTargetAddresses() { Map<SimpleString, Pair<UUID, AtomicLong>> copy = cloneTargetAddresses(); Iterator<SimpleString> iter = copy.keySet().iterator(); int num = copy.keySet().size(); String[] addresses = new String[num]; int i = 0; while (iter.hasNext()) { addresses[i] = iter.next().toString(); i++; } return addresses; } public String getLastSentMessageID(String address) { Pair<UUID, AtomicLong> value = targetAddressInfos.get(SimpleString.toSimpleString(address)); if (value != null) { return value.getA().toString(); } else { return null; } } public long getCreationTime() { return this.creationTime; } /* (non-Javadoc) * @see org.hornetq.core.server.ServerSession#getProducersInfoJSON() */ public void describeProducersInfo(JSONArray array) throws Exception { Map<SimpleString, Pair<UUID, AtomicLong>> targetCopy = cloneTargetAddresses(); for (Map.Entry<SimpleString, Pair<UUID, AtomicLong>> entry : targetCopy.entrySet()) { JSONObject producerInfo = new JSONObject(); producerInfo.put("connectionID", this.getConnectionID().toString()); producerInfo.put("sessionID", this.getName()); producerInfo.put("destination", entry.getKey().toString()); producerInfo.put("lastUUIDSent", entry.getValue().getA()); producerInfo.put("msgSent", entry.getValue().getB().longValue()); array.put(producerInfo); } } // FailureListener implementation // -------------------------------------------------------------------- public void connectionFailed(final HornetQException me, boolean failedOver) { try { HornetQLogger.LOGGER.clientConnectionFailed(name); close(true); HornetQLogger.LOGGER.clientConnectionFailedClearingSession(name); } catch (Throwable t) { HornetQLogger.LOGGER.errorClosingConnection(this); } } // Public // ---------------------------------------------------------------------------- public void clearLargeMessage() { currentLargeMessage = null; } // Private // ---------------------------------------------------------------------------- private Map<SimpleString, Pair<UUID, AtomicLong>> cloneTargetAddresses() { return new HashMap<SimpleString, Pair<UUID, AtomicLong>>(targetAddressInfos); } private void setStarted(final boolean s) { Set<ServerConsumer> consumersClone = new HashSet<ServerConsumer>(consumers.values()); for (ServerConsumer consumer : consumersClone) { consumer.setStarted(s); } started = s; } private void handleManagementMessage(final ServerMessage message, final boolean direct) throws Exception { try { securityStore.check(message.getAddress(), CheckType.MANAGE, this); } catch (HornetQException e) { if (!autoCommitSends) { tx.markAsRollbackOnly(e); } throw e; } ServerMessage reply = managementService.handleMessage(message); SimpleString replyTo = message.getSimpleStringProperty(ClientMessageImpl.REPLYTO_HEADER_NAME); if (replyTo != null) { reply.setAddress(replyTo); doSend(reply, direct); } } private void doRollback( final boolean clientFailed, final boolean lastMessageAsDelived, final Transaction theTx) throws Exception { boolean wasStarted = started; List<MessageReference> toCancel = new ArrayList<MessageReference>(); for (ServerConsumer consumer : consumers.values()) { if (wasStarted) { consumer.setStarted(false); } toCancel.addAll(consumer.cancelRefs(clientFailed, lastMessageAsDelived, theTx)); } for (MessageReference ref : toCancel) { ref.getQueue().cancel(theTx, ref); } if (wasStarted) { theTx.addOperation( new TransactionOperationAbstract() { public void afterRollback(Transaction tx) { for (ServerConsumer consumer : consumers.values()) { consumer.setStarted(true); } } }); } theTx.rollback(); } private void doSend(final ServerMessage msg, final boolean direct) throws Exception { // check the user has write access to this address. try { securityStore.check(msg.getAddress(), CheckType.SEND, this); } catch (HornetQException e) { if (!autoCommitSends) { tx.markAsRollbackOnly(e); } throw e; } if (tx == null || autoCommitSends) { } else { routingContext.setTransaction(tx); } postOffice.route(msg, routingContext, direct); Pair<UUID, AtomicLong> value = targetAddressInfos.get(msg.getAddress()); if (value == null) { targetAddressInfos.put( msg.getAddress(), new Pair<UUID, AtomicLong>(msg.getUserID(), new AtomicLong(1))); } else { value.setA(msg.getUserID()); value.getB().incrementAndGet(); } routingContext.clear(); } }