/** * A HornetQRAService ensures that HornetQ Resource Adapter will be stopped *before* the HornetQ * server. https://jira.jboss.org/browse/HORNETQ-339 * * @author <a href="mailto:[email protected]">Jeff Mesnil</a> */ public class HornetQRAService { // Constants ----------------------------------------------------- private static final Logger log = Logger.getLogger(HornetQRAService.class); // Attributes ---------------------------------------------------- private final MBeanServer mBeanServer; private final String resourceAdapterObjectName; // Static -------------------------------------------------------- // Constructors -------------------------------------------------- public HornetQRAService(final MBeanServer mBeanServer, final String resourceAdapterObjectName) { this.mBeanServer = mBeanServer; this.resourceAdapterObjectName = resourceAdapterObjectName; } // Public -------------------------------------------------------- public void stop() { try { ObjectName objectName = new ObjectName(resourceAdapterObjectName); Set<ObjectInstance> mbeanSet = mBeanServer.queryMBeans(objectName, null); for (ObjectInstance mbean : mbeanSet) { String stateString = (String) mBeanServer.getAttribute(mbean.getObjectName(), "StateString"); if ("Started".equalsIgnoreCase(stateString) || "Starting".equalsIgnoreCase(stateString)) { mBeanServer.invoke(mbean.getObjectName(), "stop", new Object[0], new String[0]); } } } catch (Exception e) { log.error("Unable to stop HornetQ resource adapter.", e); } } // Package protected --------------------------------------------- // Protected ----------------------------------------------------- // Private ------------------------------------------------------- // Inner classes ------------------------------------------------- }
/** * A ClusterManagerImpl * * @author <a href="mailto:[email protected]">Tim Fox</a> * @author Clebert Suconic * <p>Created 18 Nov 2008 09:23:49 */ public class ClusterManagerImpl implements ClusterManagerInternal { private static final Logger log = Logger.getLogger(ClusterManagerImpl.class); private final Map<String, BroadcastGroup> broadcastGroups = new HashMap<String, BroadcastGroup>(); private final Map<String, Bridge> bridges = new HashMap<String, Bridge>(); private final ExecutorFactory executorFactory; private final HornetQServer server; private final PostOffice postOffice; private final ScheduledExecutorService scheduledExecutor; private ClusterConnection defaultClusterConnection; private final ManagementService managementService; private final Configuration configuration; private final UUID nodeUUID; private volatile boolean started; private volatile boolean backup; private final boolean clustered; // the cluster connections which links this node to other cluster nodes private final Map<String, ClusterConnection> clusterConnections = new HashMap<String, ClusterConnection>(); private final Set<ServerLocatorInternal> clusterLocators = new ConcurrentHashSet<ServerLocatorInternal>(); private final Executor executor; public ClusterManagerImpl( final ExecutorFactory executorFactory, final HornetQServer server, final PostOffice postOffice, final ScheduledExecutorService scheduledExecutor, final ManagementService managementService, final Configuration configuration, final UUID nodeUUID, final boolean backup, final boolean clustered) { if (nodeUUID == null) { throw new IllegalArgumentException("Node uuid is null"); } this.executorFactory = executorFactory; executor = executorFactory.getExecutor(); ; this.server = server; this.postOffice = postOffice; this.scheduledExecutor = scheduledExecutor; this.managementService = managementService; this.configuration = configuration; this.nodeUUID = nodeUUID; this.backup = backup; this.clustered = clustered; } public String describe() { StringWriter str = new StringWriter(); PrintWriter out = new PrintWriter(str); out.println("Information on " + this); out.println("*******************************************************"); for (ClusterConnection conn : this.clusterConnections.values()) { out.println(conn.describe()); } out.println("*******************************************************"); return str.toString(); } public ClusterConnection getDefaultConnection() { return defaultClusterConnection; } @Override public String toString() { return "ClusterManagerImpl[server=" + server + "]@" + System.identityHashCode(this); } public String getNodeId() { return nodeUUID.toString(); } public synchronized void deploy() throws Exception { if (clustered) { for (BroadcastGroupConfiguration config : configuration.getBroadcastGroupConfigurations()) { deployBroadcastGroup(config); } for (ClusterConnectionConfiguration config : configuration.getClusterConfigurations()) { deployClusterConnection(config); } } } public synchronized void start() throws Exception { if (started) { return; } for (BroadcastGroup group : broadcastGroups.values()) { if (!backup) { group.start(); } } for (ClusterConnection conn : clusterConnections.values()) { conn.start(); if (backup && configuration.isSharedStore()) { conn.informTopology(); conn.announceBackup(); } } for (BridgeConfiguration config : configuration.getBridgeConfigurations()) { deployBridge(config, !backup); } started = true; } public void stop() throws Exception { synchronized (this) { if (!started) { return; } if (clustered) { for (BroadcastGroup group : broadcastGroups.values()) { group.stop(); managementService.unregisterBroadcastGroup(group.getName()); } broadcastGroups.clear(); for (ClusterConnection clusterConnection : clusterConnections.values()) { clusterConnection.stop(); managementService.unregisterCluster(clusterConnection.getName().toString()); } } for (Bridge bridge : bridges.values()) { bridge.stop(); managementService.unregisterBridge(bridge.getName().toString()); } bridges.clear(); } for (ServerLocatorInternal clusterLocator : clusterLocators) { try { clusterLocator.close(); } catch (Exception e) { log.warn( "Error closing serverLocator=" + clusterLocator + ", message=" + e.getMessage(), e); } } clusterLocators.clear(); started = false; clearClusterConnections(); } public void flushExecutor() { FutureLatch future = new FutureLatch(); executor.execute(future); if (!future.await(10000)) { server.threadDump( "Couldn't flush ClusterManager executor (" + this + ") in 10 seconds, verify your thread pool size"); } } public boolean isStarted() { return started; } public Map<String, Bridge> getBridges() { return new HashMap<String, Bridge>(bridges); } public Set<ClusterConnection> getClusterConnections() { return new HashSet<ClusterConnection>(clusterConnections.values()); } public Set<BroadcastGroup> getBroadcastGroups() { return new HashSet<BroadcastGroup>(broadcastGroups.values()); } public ClusterConnection getClusterConnection(final String name) { return clusterConnections.get(name); } // backup node becomes live public synchronized void activate() { if (backup) { backup = false; for (BroadcastGroup broadcastGroup : broadcastGroups.values()) { try { broadcastGroup.start(); broadcastGroup.activate(); } catch (Exception e) { log.warn("unable to start broadcast group " + broadcastGroup.getName(), e); } } for (ClusterConnection clusterConnection : clusterConnections.values()) { try { clusterConnection.activate(); } catch (Exception e) { log.warn("unable to start cluster connection " + clusterConnection.getName(), e); } } for (Bridge bridge : bridges.values()) { try { bridge.start(); } catch (Exception e) { log.warn("unable to start bridge " + bridge.getName(), e); } } } } @Override public void announceBackup() { for (ClusterConnection conn : this.clusterConnections.values()) { conn.announceBackup(); } } // XXX HORNETQ-720 + cluster fixes: needs review @Override public void announceReplicatingBackup(Channel liveChannel) { List<ClusterConnectionConfiguration> configs = this.configuration.getClusterConfigurations(); if (!configs.isEmpty()) { ClusterConnectionConfiguration config = configs.get(0); TransportConfiguration connector = configuration.getConnectorConfigurations().get(config.getConnectorName()); if (connector == null) { log.warn( "No connector with name '" + config.getConnectorName() + "'. backup cannot be announced."); return; } liveChannel.send( new BackupRegistrationMessage( nodeUUID.toString(), connector, configuration.getClusterUser(), configuration.getClusterPassword())); } else { log.warn("no cluster connections defined, unable to announce backup"); } } public void addClusterLocator(final ServerLocatorInternal serverLocator) { this.clusterLocators.add(serverLocator); } public void removeClusterLocator(final ServerLocatorInternal serverLocator) { this.clusterLocators.remove(serverLocator); } public synchronized void deployBridge(final BridgeConfiguration config, final boolean start) throws Exception { if (config.getName() == null) { ClusterManagerImpl.log.warn( "Must specify a unique name for each bridge. This one will not be deployed."); return; } if (config.getQueueName() == null) { ClusterManagerImpl.log.warn( "Must specify a queue name for each bridge. This one will not be deployed."); return; } if (config.getForwardingAddress() == null) { ClusterManagerImpl.log.debug( "Forward address is not specified. Will use original message address instead"); } if (bridges.containsKey(config.getName())) { ClusterManagerImpl.log.warn( "There is already a bridge with name " + config.getName() + " deployed. This one will not be deployed."); return; } Transformer transformer = instantiateTransformer(config.getTransformerClassName()); Binding binding = postOffice.getBinding(new SimpleString(config.getQueueName())); if (binding == null) { ClusterManagerImpl.log.warn( "No queue found with name " + config.getQueueName() + " bridge will not be deployed."); return; } Queue queue = (Queue) binding.getBindable(); ServerLocatorInternal serverLocator; if (config.getDiscoveryGroupName() != null) { DiscoveryGroupConfiguration discoveryGroupConfiguration = configuration.getDiscoveryGroupConfigurations().get(config.getDiscoveryGroupName()); if (discoveryGroupConfiguration == null) { ClusterManagerImpl.log.warn( "No discovery group configured with name '" + config.getDiscoveryGroupName() + "'. The bridge will not be deployed."); return; } if (config.isHA()) { serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithHA(discoveryGroupConfiguration); } else { serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithoutHA(discoveryGroupConfiguration); } } else { TransportConfiguration[] tcConfigs = connectorNameListToArray(config.getStaticConnectors()); if (tcConfigs == null) { return; } if (config.isHA()) { serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithHA(tcConfigs); } else { serverLocator = (ServerLocatorInternal) HornetQClient.createServerLocatorWithoutHA(tcConfigs); } } serverLocator.setConfirmationWindowSize(config.getConfirmationWindowSize()); // We are going to manually retry on the bridge in case of failure serverLocator.setReconnectAttempts(0); serverLocator.setInitialConnectAttempts(-1); serverLocator.setRetryInterval(config.getRetryInterval()); serverLocator.setMaxRetryInterval(config.getMaxRetryInterval()); serverLocator.setRetryIntervalMultiplier(config.getRetryIntervalMultiplier()); serverLocator.setClientFailureCheckPeriod(config.getClientFailureCheckPeriod()); serverLocator.setBlockOnDurableSend(!config.isUseDuplicateDetection()); serverLocator.setBlockOnNonDurableSend(!config.isUseDuplicateDetection()); serverLocator.setMinLargeMessageSize(config.getMinLargeMessageSize()); // disable flow control serverLocator.setProducerWindowSize(-1); // This will be set to 30s unless it's changed from embedded / testing // there is no reason to exception the config for this timeout // since the Bridge is supposed to be non-blocking and fast // We may expose this if we find a good use case serverLocator.setCallTimeout(config.getCallTimeout()); if (!config.isUseDuplicateDetection()) { log.debug( "Bridge " + config.getName() + " is configured to not use duplicate detecion, it will send messages synchronously"); } clusterLocators.add(serverLocator); Bridge bridge = new BridgeImpl( serverLocator, config.getReconnectAttempts(), config.getRetryInterval(), config.getRetryIntervalMultiplier(), config.getMaxRetryInterval(), nodeUUID, new SimpleString(config.getName()), queue, executorFactory.getExecutor(), SimpleString.toSimpleString(config.getFilterString()), SimpleString.toSimpleString(config.getForwardingAddress()), scheduledExecutor, transformer, config.isUseDuplicateDetection(), config.getUser(), config.getPassword(), !backup, server.getStorageManager()); bridges.put(config.getName(), bridge); managementService.registerBridge(bridge, config); if (start) { bridge.start(); } } public void destroyBridge(final String name) throws Exception { Bridge bridge; synchronized (this) { bridge = bridges.remove(name); if (bridge != null) { bridge.stop(); managementService.unregisterBridge(name); } } if (bridge != null) { bridge.flushExecutor(); } } // for testing public void clear() { for (Bridge bridge : bridges.values()) { try { bridge.stop(); } catch (Exception e) { log.warn(e.getMessage(), e); } } bridges.clear(); for (ClusterConnection clusterConnection : clusterConnections.values()) { try { clusterConnection.stop(); } catch (Exception e) { e.printStackTrace(); } } clearClusterConnections(); } // Private methods // ---------------------------------------------------------------------------------------------------- private void clearClusterConnections() { clusterConnections.clear(); this.defaultClusterConnection = null; } private void deployClusterConnection(final ClusterConnectionConfiguration config) throws Exception { if (config.getName() == null) { ClusterManagerImpl.log.warn( "Must specify a unique name for each cluster connection. This one will not be deployed."); return; } if (config.getAddress() == null) { ClusterManagerImpl.log.warn( "Must specify an address for each cluster connection. This one will not be deployed."); return; } TransportConfiguration connector = configuration.getConnectorConfigurations().get(config.getConnectorName()); if (connector == null) { log.warn( "No connector with name '" + config.getConnectorName() + "'. The cluster connection will not be deployed."); return; } if (clusterConnections.containsKey(config.getName())) { log.warn( "Cluster Configuration '" + config.getConnectorName() + "' already exists. The cluster connection will not be deployed.", new Exception("trace")); return; } ClusterConnectionImpl clusterConnection; if (config.getDiscoveryGroupName() != null) { DiscoveryGroupConfiguration dg = configuration.getDiscoveryGroupConfigurations().get(config.getDiscoveryGroupName()); if (dg == null) { ClusterManagerImpl.log.warn( "No discovery group with name '" + config.getDiscoveryGroupName() + "'. The cluster connection will not be deployed."); return; } if (log.isDebugEnabled()) { log.debug( this + " Starting a Discovery Group Cluster Connection, name=" + config.getDiscoveryGroupName() + ", dg=" + dg); } clusterConnection = new ClusterConnectionImpl( this, dg, connector, new SimpleString(config.getName()), new SimpleString(config.getAddress()), config.getMinLargeMessageSize(), config.getClientFailureCheckPeriod(), config.getConnectionTTL(), config.getRetryInterval(), config.getRetryIntervalMultiplier(), config.getMaxRetryInterval(), config.getReconnectAttempts(), config.getCallTimeout(), config.getCallFailoverTimeout(), config.isDuplicateDetection(), config.isForwardWhenNoConsumers(), config.getConfirmationWindowSize(), executorFactory, server, postOffice, managementService, scheduledExecutor, config.getMaxHops(), nodeUUID, backup, server.getConfiguration().getClusterUser(), server.getConfiguration().getClusterPassword(), config.isAllowDirectConnectionsOnly()); } else { TransportConfiguration[] tcConfigs = config.getStaticConnectors() != null ? connectorNameListToArray(config.getStaticConnectors()) : null; if (log.isDebugEnabled()) { log.debug(this + " defining cluster connection towards " + Arrays.toString(tcConfigs)); } clusterConnection = new ClusterConnectionImpl( this, tcConfigs, connector, new SimpleString(config.getName()), new SimpleString(config.getAddress()), config.getMinLargeMessageSize(), config.getClientFailureCheckPeriod(), config.getConnectionTTL(), config.getRetryInterval(), config.getRetryIntervalMultiplier(), config.getMaxRetryInterval(), config.getReconnectAttempts(), config.getCallTimeout(), config.getCallFailoverTimeout(), config.isDuplicateDetection(), config.isForwardWhenNoConsumers(), config.getConfirmationWindowSize(), executorFactory, server, postOffice, managementService, scheduledExecutor, config.getMaxHops(), nodeUUID, backup, server.getConfiguration().getClusterUser(), server.getConfiguration().getClusterPassword(), config.isAllowDirectConnectionsOnly()); } if (defaultClusterConnection == null) { defaultClusterConnection = clusterConnection; } managementService.registerCluster(clusterConnection, config); clusterConnections.put(config.getName(), clusterConnection); if (log.isDebugEnabled()) { log.debug("ClusterConnection.start at " + clusterConnection, new Exception("trace")); } } private Transformer instantiateTransformer(final String transformerClassName) { Transformer transformer = null; if (transformerClassName != null) { ClassLoader loader = Thread.currentThread().getContextClassLoader(); try { Class<?> clz = loader.loadClass(transformerClassName); transformer = (Transformer) clz.newInstance(); } catch (Exception e) { throw new IllegalArgumentException( "Error instantiating transformer class \"" + transformerClassName + "\"", e); } } return transformer; } private synchronized void deployBroadcastGroup(final BroadcastGroupConfiguration config) throws Exception { if (broadcastGroups.containsKey(config.getName())) { ClusterManagerImpl.log.warn( "There is already a broadcast-group with name " + config.getName() + " deployed. This one will not be deployed."); return; } InetAddress localAddress = null; if (config.getLocalBindAddress() != null) { localAddress = InetAddress.getByName(config.getLocalBindAddress()); } InetAddress groupAddress = InetAddress.getByName(config.getGroupAddress()); BroadcastGroupImpl group = new BroadcastGroupImpl( nodeUUID.toString(), config.getName(), localAddress, config.getLocalBindPort(), groupAddress, config.getGroupPort(), !backup); for (String connectorInfo : config.getConnectorInfos()) { TransportConfiguration connector = configuration.getConnectorConfigurations().get(connectorInfo); if (connector == null) { logWarnNoConnector(config.getName(), connectorInfo); return; } group.addConnector(connector); } ScheduledFuture<?> future = scheduledExecutor.scheduleWithFixedDelay( group, 0L, config.getBroadcastPeriod(), MILLISECONDS); group.setScheduledFuture(future); broadcastGroups.put(config.getName(), group); managementService.registerBroadcastGroup(group, config); } private void logWarnNoConnector(final String connectorName, final String bgName) { ClusterManagerImpl.log.warn( "There is no connector deployed with name '" + connectorName + "'. The broadcast group with name '" + bgName + "' will not be deployed."); } private TransportConfiguration[] connectorNameListToArray(final List<String> connectorNames) { TransportConfiguration[] tcConfigs = (TransportConfiguration[]) Array.newInstance(TransportConfiguration.class, connectorNames.size()); int count = 0; for (String connectorName : connectorNames) { TransportConfiguration connector = configuration.getConnectorConfigurations().get(connectorName); if (connector == null) { ClusterManagerImpl.log.warn( "No connector defined with name '" + connectorName + "'. The bridge will not be deployed."); return null; } tcConfigs[count++] = connector; } return tcConfigs; } }
/** * A FailureDeadlockTest * * @author <a href="mailto:[email protected]">Tim Fox</a> */ public class FailureDeadlockTest extends ServiceTestBase { private static final Logger log = Logger.getLogger(FailureDeadlockTest.class); private HornetQServer server; private JMSServerManagerImpl jmsServer; private HornetQConnectionFactory cf1; private HornetQConnectionFactory cf2; @Override protected void setUp() throws Exception { super.setUp(); Configuration conf = createDefaultConfig(); conf.setSecurityEnabled(false); conf.getAcceptorConfigurations().add(new TransportConfiguration(INVM_ACCEPTOR_FACTORY)); server = createServer(false, conf); jmsServer = new JMSServerManagerImpl(server); jmsServer.setContext(new NullInitialContext()); jmsServer.start(); cf1 = HornetQJMSClient.createConnectionFactoryWithoutHA( JMSFactoryType.CF, new TransportConfiguration(INVM_CONNECTOR_FACTORY)); cf2 = HornetQJMSClient.createConnectionFactoryWithoutHA( JMSFactoryType.CF, new TransportConfiguration(INVM_CONNECTOR_FACTORY)); } @Override protected void tearDown() throws Exception { cf1.close(); cf2.close(); if (jmsServer != null && jmsServer.isStarted()) { jmsServer.stop(); } super.tearDown(); } // https://jira.jboss.org/jira/browse/JBMESSAGING-1702 // Test that two failures concurrently executing and calling the same exception listener // don't deadlock public void testDeadlock() throws Exception { for (int i = 0; i < 100; i++) { final Connection conn1 = cf1.createConnection(); Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE); RemotingConnection rc1 = ((ClientSessionInternal) ((HornetQSession) sess1).getCoreSession()).getConnection(); final Connection conn2 = cf2.createConnection(); Session sess2 = conn2.createSession(false, Session.AUTO_ACKNOWLEDGE); RemotingConnection rc2 = ((ClientSessionInternal) ((HornetQSession) sess2).getCoreSession()).getConnection(); ExceptionListener listener1 = new ExceptionListener() { public void onException(final JMSException exception) { try { conn2.close(); } catch (Exception e) { FailureDeadlockTest.log.error("Failed to close connection2", e); } } }; conn1.setExceptionListener(listener1); conn2.setExceptionListener(listener1); Failer f1 = new Failer(rc1); Failer f2 = new Failer(rc2); f1.start(); f2.start(); f1.join(); f2.join(); conn1.close(); conn2.close(); } } private class Failer extends Thread { RemotingConnection conn; Failer(final RemotingConnection conn) { this.conn = conn; } @Override public void run() { conn.fail(new HornetQException(HornetQException.NOT_CONNECTED, "blah")); } } // https://jira.jboss.org/jira/browse/JBMESSAGING-1703 // Make sure that failing a connection removes it from the connection manager and can't be // returned in a subsequent // call public void testUsingDeadConnection() throws Exception { for (int i = 0; i < 100; i++) { final Connection conn1 = cf1.createConnection(); Session sess1 = conn1.createSession(false, Session.AUTO_ACKNOWLEDGE); RemotingConnection rc1 = ((ClientSessionInternal) ((HornetQSession) sess1).getCoreSession()).getConnection(); rc1.fail(new HornetQException(HornetQException.NOT_CONNECTED, "blah")); try { conn1.createSession(false, Session.AUTO_ACKNOWLEDGE); fail("should throw exception"); } catch (JMSException e) { // pass } conn1.close(); } } }
/** * A PageProviderIMpl * * <p>TODO: this may be moved entirely into PagingStore as there's an one-to-one relationship here * However I want to keep this isolated as much as possible during development * * @author <a href="mailto:[email protected]">Clebert Suconic</a> */ public class PageCursorProviderImpl implements PageCursorProvider { // Constants ----------------------------------------------------- private static final Logger log = Logger.getLogger(PageCursorProviderImpl.class); boolean isTrace = log.isTraceEnabled(); // Attributes ---------------------------------------------------- private final PagingStore pagingStore; private final StorageManager storageManager; // This is the same executor used at the PageStoreImpl. One Executor per pageStore private final Executor executor; private final SoftValueHashMap<Long, PageCache> softCache; private final ConcurrentMap<Long, PageSubscription> activeCursors = new ConcurrentHashMap<Long, PageSubscription>(); // Static -------------------------------------------------------- // Constructors -------------------------------------------------- public PageCursorProviderImpl( final PagingStore pagingStore, final StorageManager storageManager, final Executor executor, final int maxCacheSize) { this.pagingStore = pagingStore; this.storageManager = storageManager; this.executor = executor; this.softCache = new SoftValueHashMap<Long, PageCache>(maxCacheSize); } // Public -------------------------------------------------------- public PagingStore getAssociatedStore() { return pagingStore; } public synchronized PageSubscription createSubscription( long cursorID, Filter filter, boolean persistent) { if (log.isDebugEnabled()) { log.debug( this.pagingStore.getAddress() + " creating subscription " + cursorID + " with filter " + filter, new Exception("trace")); } PageSubscription activeCursor = activeCursors.get(cursorID); if (activeCursor != null) { throw new IllegalStateException("Cursor " + cursorID + " had already been created"); } activeCursor = new PageSubscriptionImpl( this, pagingStore, storageManager, executor, filter, cursorID, persistent); activeCursors.put(cursorID, activeCursor); return activeCursor; } /* (non-Javadoc) * @see org.hornetq.core.paging.cursor.PageCursorProvider#createCursor() */ public synchronized PageSubscription getSubscription(long cursorID) { return activeCursors.get(cursorID); } public PagedMessage getMessage(final PagePosition pos) throws Exception { PageCache cache = getPageCache(pos); if (pos.getMessageNr() >= cache.getNumberOfMessages()) { // sanity check, this should never happen unless there's a bug throw new IllegalStateException("Invalid messageNumber passed = " + pos + " on " + cache); } return cache.getMessage(pos.getMessageNr()); } public PagedReference newReference( final PagePosition pos, final PagedMessage msg, final PageSubscription subscription) { return new PagedReferenceImpl(pos, msg, subscription); } /** * No need to synchronize this method since the private getPageCache will have a synchronized call */ public PageCache getPageCache(PagePosition pos) { return getPageCache(pos.getPageNr()); } public PageCache getPageCache(final long pageId) { try { boolean needToRead = false; PageCache cache = null; synchronized (softCache) { if (pageId > pagingStore.getCurrentWritingPage()) { return null; } cache = softCache.get(pageId); if (cache == null) { if (!pagingStore.checkPageFileExists((int) pageId)) { return null; } cache = createPageCache(pageId); needToRead = true; // anyone reading from this cache will have to wait reading to finish first // we also want only one thread reading this cache cache.lock(); if (isTrace) { log.trace("adding " + pageId + " into cursor = " + this.pagingStore.getAddress()); } softCache.put(pageId, cache); } } // Reading is done outside of the synchronized block, however // the page stays locked until the entire reading is finished if (needToRead) { Page page = null; try { page = pagingStore.createPage((int) pageId); storageManager.beforePageRead(); page.open(); List<PagedMessage> pgdMessages = page.read(storageManager); cache.setMessages(pgdMessages.toArray(new PagedMessage[pgdMessages.size()])); } finally { try { if (page != null) { page.close(); } } catch (Throwable ignored) { } storageManager.afterPageRead(); cache.unlock(); } } return cache; } catch (Exception e) { throw new RuntimeException( "Couldn't complete paging due to an IO Exception on Paging - " + e.getMessage(), e); } } public void addPageCache(PageCache cache) { synchronized (softCache) { softCache.put(cache.getPageId(), cache); } } public int getCacheMaxSize() { return softCache.getMaxEelements(); } public void setCacheMaxSize(final int size) { softCache.setMaxElements(size); } public int getCacheSize() { synchronized (softCache) { return softCache.size(); } } public void processReload() throws Exception { for (PageSubscription cursor : this.activeCursors.values()) { cursor.processReload(); } cleanup(); } public void stop() { for (PageSubscription cursor : activeCursors.values()) { cursor.stop(); } Future future = new Future(); executor.execute(future); while (!future.await(10000)) { log.warn("Waiting cursor provider " + this + " to finish executors" + executor); } } public void flushExecutors() { for (PageSubscription cursor : activeCursors.values()) { cursor.flushExecutors(); } Future future = new Future(); executor.execute(future); while (!future.await(10000)) { log.warn("Waiting cursor provider " + this + " to finish executors " + executor); } } public void close(PageSubscription cursor) { activeCursors.remove(cursor.getId()); scheduleCleanup(); } /* (non-Javadoc) * @see org.hornetq.core.paging.cursor.PageCursorProvider#scheduleCleanup() */ public void scheduleCleanup() { executor.execute( new Runnable() { public void run() { storageManager.setContext(storageManager.newSingleThreadContext()); try { cleanup(); } finally { storageManager.clearContext(); } } @Override public String toString() { return "PageCursorProvider:scheduleCleanup()"; } }); } public void cleanup() { ArrayList<Page> depagedPages = new ArrayList<Page>(); while (true) { if (pagingStore.lock(100)) { break; } if (!pagingStore.isStarted()) return; } synchronized (this) { try { if (!pagingStore.isStarted()) { return; } if (pagingStore.getNumberOfPages() == 0) { return; } if (log.isDebugEnabled()) { log.debug("Asserting cleanup for address " + this.pagingStore.getAddress()); } ArrayList<PageSubscription> cursorList = new ArrayList<PageSubscription>(); cursorList.addAll(activeCursors.values()); long minPage = checkMinPage(cursorList); if (minPage == pagingStore.getCurrentWritingPage() && pagingStore.getCurrentPage().getNumberOfMessages() > 0) { boolean complete = true; for (PageSubscription cursor : cursorList) { if (!cursor.isComplete(minPage)) { if (log.isDebugEnabled()) { log.debug("Cursor " + cursor + " was considered incomplete at page " + minPage); } complete = false; break; } else { if (log.isDebugEnabled()) { log.debug("Cursor " + cursor + "was considered **complete** at page " + minPage); } } } if (!pagingStore.isStarted()) { return; } if (complete) { if (log.isDebugEnabled()) { log.debug( "Address " + pagingStore.getAddress() + " is leaving page mode as all messages are consumed and acknowledged from the page store"); } pagingStore.forceAnotherPage(); Page currentPage = pagingStore.getCurrentPage(); storePositions(cursorList, currentPage); pagingStore.stopPaging(); // This has to be called after we stopped paging for (PageSubscription cursor : cursorList) { cursor.scheduleCleanupCheck(); } } } for (long i = pagingStore.getFirstPage(); i < minPage; i++) { Page page = pagingStore.depage(); if (page == null) { break; } depagedPages.add(page); } if (pagingStore.getNumberOfPages() == 0 || pagingStore.getNumberOfPages() == 1 && pagingStore.getCurrentPage().getNumberOfMessages() == 0) { pagingStore.stopPaging(); } else { if (log.isTraceEnabled()) { log.trace( "Couldn't cleanup page on address " + this.pagingStore.getAddress() + " as numberOfPages == " + pagingStore.getNumberOfPages() + " and currentPage.numberOfMessages = " + pagingStore.getCurrentPage().getNumberOfMessages()); } } } catch (Exception ex) { log.warn("Couldn't complete cleanup on paging", ex); return; } finally { pagingStore.unlock(); } } try { for (Page depagedPage : depagedPages) { PageCache cache; PagedMessage[] pgdMessages; synchronized (softCache) { cache = softCache.get((long) depagedPage.getPageId()); } if (isTrace) { log.trace("Removing page " + depagedPage.getPageId() + " from page-cache"); } if (cache == null) { // The page is not on cache any more // We need to read the page-file before deleting it // to make sure we remove any large-messages pending storageManager.beforePageRead(); List<PagedMessage> pgdMessagesList = null; try { depagedPage.open(); pgdMessagesList = depagedPage.read(storageManager); } finally { try { depagedPage.close(); } catch (Exception e) { } storageManager.afterPageRead(); } depagedPage.close(); pgdMessages = pgdMessagesList.toArray(new PagedMessage[pgdMessagesList.size()]); } else { pgdMessages = cache.getMessages(); } depagedPage.delete(pgdMessages); synchronized (softCache) { softCache.remove((long) depagedPage.getPageId()); } } } catch (Exception ex) { log.warn("Couldn't complete cleanup on paging", ex); return; } } /** * @param cursorList * @param currentPage * @throws Exception */ private void storePositions(ArrayList<PageSubscription> cursorList, Page currentPage) throws Exception { try { // First step: Move every cursor to the next bookmarked page (that was just created) for (PageSubscription cursor : cursorList) { cursor.confirmPosition(new PagePositionImpl(currentPage.getPageId(), -1)); } while (!storageManager.waitOnOperations(5000)) { log.warn("Couldn't complete operations on IO context " + storageManager.getContext()); } } finally { for (PageSubscription cursor : cursorList) { cursor.enableAutoCleanup(); } } } public void printDebug() { System.out.println("Debug information for PageCursorProviderImpl:"); for (PageCache cache : softCache.values()) { System.out.println("Cache " + cache); } } // Package protected --------------------------------------------- // Protected ----------------------------------------------------- /* Protected as we may let test cases to instrument the test */ protected PageCacheImpl createPageCache(final long pageId) throws Exception { return new PageCacheImpl(pagingStore.createPage((int) pageId)); } // Private ------------------------------------------------------- /** This method is synchronized because we want it to be atomic with the cursors being used */ private long checkMinPage(List<PageSubscription> cursorList) { long minPage = Long.MAX_VALUE; for (PageSubscription cursor : cursorList) { long firstPage = cursor.getFirstPage(); if (log.isDebugEnabled()) { log.debug( this.pagingStore.getAddress() + " has a cursor " + cursor + " with first page=" + firstPage); } if (firstPage < minPage) { minPage = firstPage; } } if (log.isDebugEnabled()) { log.debug(this.pagingStore.getAddress() + " has minPage=" + minPage); } return minPage; } // Inner classes ------------------------------------------------- }
/** * The connection factory * * @author <a href="mailto:[email protected]">Adrian Brock</a> * @author <a href="mailto:[email protected]">Jesper Pedersen</a> * @version $Revision: $ */ public class HornetQRAConnectionFactoryImpl implements HornetQRAConnectionFactory { /** Serial version UID */ static final long serialVersionUID = 7981708919479859360L; /** The logger */ private static final Logger log = Logger.getLogger(HornetQRAConnectionFactoryImpl.class); /** Trace enabled */ private static boolean trace = HornetQRAConnectionFactoryImpl.log.isTraceEnabled(); /** The managed connection factory */ private final HornetQRAManagedConnectionFactory mcf; /** The connection manager */ private ConnectionManager cm; /** Naming reference */ private Reference reference; /** * Constructor * * @param mcf The managed connection factory * @param cm The connection manager */ public HornetQRAConnectionFactoryImpl( final HornetQRAManagedConnectionFactory mcf, final ConnectionManager cm) { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("constructor(" + mcf + ", " + cm + ")"); } this.mcf = mcf; if (cm == null) { // This is standalone usage, no appserver this.cm = new HornetQRAConnectionManager(); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created new ConnectionManager=" + this.cm); } } else { this.cm = cm; } if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace( "Using ManagedConnectionFactory=" + mcf + ", ConnectionManager=" + cm); } } /** * Set the reference * * @param reference The reference */ public void setReference(final Reference reference) { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("setReference(" + reference + ")"); } this.reference = reference; } /** * Get the reference * * @return The reference */ public Reference getReference() { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("getReference()"); } if (reference == null) { try { reference = new Reference( this.getClass().getCanonicalName(), new SerializableObjectRefAddr("HornetQ-CF", this), ConnectionFactoryObjectFactory.class.getCanonicalName(), null); } catch (NamingException e) { HornetQRAConnectionFactoryImpl.log.error("Error while giving object Reference.", e); } } return reference; } /** * Create a queue connection * * @return The connection * @exception JMSException Thrown if the operation fails */ public QueueConnection createQueueConnection() throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createQueueConnection()"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.QUEUE_CONNECTION); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created queue connection: " + s); } return s; } /** * Create a queue connection * * @param userName The user name * @param password The password * @return The connection * @exception JMSException Thrown if the operation fails */ public QueueConnection createQueueConnection(final String userName, final String password) throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createQueueConnection(" + userName + ", ****)"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.QUEUE_CONNECTION); s.setUserName(userName); s.setPassword(password); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created queue connection: " + s); } return s; } /** * Create a topic connection * * @return The connection * @exception JMSException Thrown if the operation fails */ public TopicConnection createTopicConnection() throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createTopicConnection()"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.TOPIC_CONNECTION); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created topic connection: " + s); } return s; } /** * Create a topic connection * * @param userName The user name * @param password The password * @return The connection * @exception JMSException Thrown if the operation fails */ public TopicConnection createTopicConnection(final String userName, final String password) throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createTopicConnection(" + userName + ", ****)"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.TOPIC_CONNECTION); s.setUserName(userName); s.setPassword(password); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created topic connection: " + s); } return s; } /** * Create a connection * * @return The connection * @exception JMSException Thrown if the operation fails */ public Connection createConnection() throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createConnection()"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.CONNECTION); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created connection: " + s); } return s; } /** * Create a connection * * @param userName The user name * @param password The password * @return The connection * @exception JMSException Thrown if the operation fails */ public Connection createConnection(final String userName, final String password) throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createConnection(" + userName + ", ****)"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.CONNECTION); s.setUserName(userName); s.setPassword(password); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created connection: " + s); } return s; } /** * Create a XA queue connection * * @return The connection * @exception JMSException Thrown if the operation fails */ public XAQueueConnection createXAQueueConnection() throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createXAQueueConnection()"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.XA_QUEUE_CONNECTION); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created queue connection: " + s); } return s; } /** * Create a XA queue connection * * @param userName The user name * @param password The password * @return The connection * @exception JMSException Thrown if the operation fails */ public XAQueueConnection createXAQueueConnection(final String userName, final String password) throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createXAQueueConnection(" + userName + ", ****)"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.XA_QUEUE_CONNECTION); s.setUserName(userName); s.setPassword(password); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created queue connection: " + s); } return s; } /** * Create a XA topic connection * * @return The connection * @exception JMSException Thrown if the operation fails */ public XATopicConnection createXATopicConnection() throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createXATopicConnection()"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.XA_TOPIC_CONNECTION); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created topic connection: " + s); } return s; } /** * Create a XA topic connection * * @param userName The user name * @param password The password * @return The connection * @exception JMSException Thrown if the operation fails */ public XATopicConnection createXATopicConnection(final String userName, final String password) throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createXATopicConnection(" + userName + ", ****)"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.XA_TOPIC_CONNECTION); s.setUserName(userName); s.setPassword(password); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created topic connection: " + s); } return s; } /** * Create a XA connection * * @return The connection * @exception JMSException Thrown if the operation fails */ public XAConnection createXAConnection() throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createXAConnection()"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.XA_CONNECTION); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created connection: " + s); } return s; } /** * Create a XA connection * * @param userName The user name * @param password The password * @return The connection * @exception JMSException Thrown if the operation fails */ public XAConnection createXAConnection(final String userName, final String password) throws JMSException { if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("createXAConnection(" + userName + ", ****)"); } HornetQRASessionFactoryImpl s = new HornetQRASessionFactoryImpl(mcf, cm, HornetQRAConnectionFactory.XA_CONNECTION); s.setUserName(userName); s.setPassword(password); if (HornetQRAConnectionFactoryImpl.trace) { HornetQRAConnectionFactoryImpl.log.trace("Created connection: " + s); } return s; } }
/** * This is the method in which the HornetQ server can be deployed externall outside of jBoss. * Alternatively a user can embed by using the same code as in main * * @author <a href="*****@*****.**">Andy Taylor</a> */ public class HornetQBootstrapServer extends BasicBootstrap { private static Logger log = Logger.getLogger(HornetQBootstrapServer.class); /** The deployer */ protected BeanXMLDeployer deployer; /** The deployments */ protected List<KernelDeployment> deployments = new CopyOnWriteArrayList<KernelDeployment>(); /** The arguments */ protected String[] args; private Properties properties; /** * Bootstrap the kernel from the command line * * @param args the command line arguments * @throws Exception for any error */ public static void main(final String[] args) throws Exception { maybeStartHtmlJmxAdaptor(); HornetQBootstrapServer.log.info("Starting HornetQ Server"); final HornetQBootstrapServer bootstrap = new HornetQBootstrapServer(args); bootstrap.run(); bootstrap.addShutdownHook(); } private static void maybeStartHtmlJmxAdaptor() { MBeanServer beanServer = ManagementFactory.getPlatformMBeanServer(); String htmlAdaptorAvailable = System.getProperty("hornetq.htmladaptor.available"); if ("TRUE".equalsIgnoreCase(htmlAdaptorAvailable)) { try { String strPort = System.getProperty("hornetq.htmladaptor.port"); int htmlAdaptorPort = Integer.parseInt(strPort); String userName = System.getProperty("hornetq.htmladaptor.userName"); String password = System.getProperty("hornetq.htmladaptor.password"); final HtmlAdaptorServer adapter = new HtmlAdaptorServer(); ObjectName adapterName = new ObjectName("SimpleAgent:name=htmladapter,port=" + htmlAdaptorPort); adapter.setPort(htmlAdaptorPort); if (userName != null && password != null) { adapter.addUserAuthenticationInfo(new AuthInfo(userName, password)); } beanServer.registerMBean(adapter, adapterName); adapter.start(); Runtime.getRuntime() .addShutdownHook( new Thread() { public void run() { adapter.stop(); } }); } catch (Exception e) { log.error("Error starting up the HtmlAdaptorServer: " + e.getMessage(), e); log.error( "If you don't want this, remove the hornetq.htmladaptor.available property " + "or set it to false"); System.exit(-1); } } } /** Add a simple shutdown hook to stop the server. */ public void addShutdownHook() { String dirName = System.getProperty("hornetq.config.dir", "."); final File file = new File(dirName + "/STOP_ME"); if (file.exists()) { file.delete(); } final Timer timer = new Timer("HornetQ Server Shutdown Timer", true); timer.scheduleAtFixedRate( new TimerTask() { @Override public void run() { if (file.exists()) { try { shutDown(); timer.cancel(); } finally { Runtime.getRuntime().exit(0); } } } }, 500, 500); } @Override public void run() { try { super.run(); } catch (RuntimeException e) { HornetQBootstrapServer.log.error("Failed to start server", e); throw e; } } /** * JBoss 1.0.0 final Standalone Create a new bootstrap * * @param args the arguments * @throws Exception for any error */ public HornetQBootstrapServer(final String... args) throws Exception { super(); this.args = args; } public HornetQBootstrapServer(final KernelConfig kernelConfig, final String... args) throws Exception { super(kernelConfig); this.args = args; } @Override public void bootstrap() throws Throwable { super.bootstrap(); deployer = new BeanXMLDeployer(getKernel()); Runtime.getRuntime().addShutdownHook(new Shutdown()); for (String arg : args) { deploy(arg); } deployer.validate(); } /** * Undeploy a deployment * * @param deployment the deployment */ public void undeploy(final KernelDeployment deployment) throws Throwable { HornetQBootstrapServer.log.debug("Undeploying " + deployment.getName()); deployments.remove(deployment); try { deployer.undeploy(deployment); HornetQBootstrapServer.log.debug("Undeployed " + deployment.getName()); } catch (Throwable t) { HornetQBootstrapServer.log.warn("Error during undeployment: " + deployment.getName(), t); } } public KernelDeployment deploy(final String arg) throws Throwable { ClassLoader cl = Thread.currentThread().getContextClassLoader(); URL url = cl.getResource(arg); if (url == null) { url = cl.getResource("META-INF/" + arg); } // try the system classpath if (url == null) { url = getClass().getClassLoader().getResource(arg); } if (url == null) { File file = new File(arg); if (file.exists()) { url = file.toURI().toURL(); } } if (url == null) { throw new RuntimeException("Unable to find resource:" + arg); } return deploy(url); } /** Deploys a XML on the container */ public KernelDeployment deploy(final String name, final String xml) throws Throwable { ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); PrintStream printOut = new PrintStream(byteOut); printOut.print(xml); printOut.flush(); ByteArrayInputStream is = new ByteArrayInputStream(byteOut.toByteArray()); KernelDeployment deployment = deployer.deploy(name, is); deployments.add(deployment); return deployment; } /** * Deploy a url * * @param url the deployment url * @throws Throwable for any error */ protected KernelDeployment deploy(final URL url) throws Throwable { HornetQBootstrapServer.log.debug("Deploying " + url); KernelDeployment deployment = deployer.deploy(url); deployments.add(deployment); HornetQBootstrapServer.log.debug("Deployed " + url); return deployment; } public void shutDown() { log.info("Stopping HornetQ Server..."); ListIterator<KernelDeployment> iterator = deployments.listIterator(deployments.size()); while (iterator.hasPrevious()) { KernelDeployment deployment = iterator.previous(); try { undeploy(deployment); } catch (Throwable t) { HornetQBootstrapServer.log.warn("Unable to undeploy: " + deployment.getName(), t); } } } @Override protected Properties getConfigProperties() { return properties; } public void setProperties(final Properties props) { properties = props; } protected class Shutdown extends Thread { public Shutdown() { super("hornetq-shutdown-thread"); } @Override public void run() { shutDown(); } } }
/** * A FileDeploymentManagerTest * * @author <a href="mailto:[email protected]">Tim Fox</a> */ public class FileDeploymentManagerTest extends UnitTestCase { private static final Logger log = Logger.getLogger(FileDeploymentManagerTest.class); public void testStartStop1() throws Exception { testStartStop1("fdm_test_file.xml"); } public void testStartStop2() throws Exception { testStartStop2("fdm_test_file.xml"); } public void testStartStop1WithWhitespace() throws Exception { testStartStop1("fdm test file.xml"); if (!isWindows()) { testStartStop1("fdm\ttest\tfile.xml"); } } public void testStartStop2WithWhitespace() throws Exception { testStartStop2("fdm test file.xml"); if (!isWindows()) { testStartStop2("fdm\ttest\tfile.xml"); } } private void testStartStop1(final String filename) throws Exception { FileDeploymentManager fdm = new FileDeploymentManager(Long.MAX_VALUE); FileDeploymentManagerTest.log.debug("Filename is " + filename); File file = new File("target/test-classes/"); file.mkdirs(); file = new File("target/test-classes/" + filename); FileDeploymentManagerTest.log.debug(file.getAbsoluteFile()); file.createNewFile(); FakeDeployer deployer = new FakeDeployer(filename); fdm.registerDeployer(deployer); fdm.unregisterDeployer(deployer); fdm.registerDeployer(deployer); fdm.start(); try { URI expected = file.toURI(); URI deployedUrl = deployer.deployedUri; Assert.assertTrue(expected.toString().equalsIgnoreCase(deployedUrl.toString())); deployer.deployedUri = null; fdm.start(); Assert.assertNull(deployer.deployedUri); fdm.stop(); } finally { file.delete(); fdm.stop(); } } private void testStartStop2(final String filename) throws Exception { FileDeploymentManager fdm = new FileDeploymentManager(Long.MAX_VALUE); FileDeploymentManagerTest.log.debug("Filename is " + filename); File file = new File("target/test-classes/"); file.mkdirs(); file = new File("target/test-classes/" + filename); FileDeploymentManagerTest.log.debug(file.getAbsoluteFile()); file.createNewFile(); FakeDeployer deployer = new FakeDeployer(filename); fdm.start(); try { fdm.registerDeployer(deployer); URI expected = file.toURI(); URI deployedUrl = deployer.deployedUri; Assert.assertTrue(expected.toString().equalsIgnoreCase(deployedUrl.toString())); deployer.deployedUri = null; fdm.start(); Assert.assertNull(deployer.deployedUri); fdm.stop(); } finally { file.delete(); fdm.stop(); } } public void testRegisterUnregister() throws Exception { FileDeploymentManager fdm = new FileDeploymentManager(Long.MAX_VALUE); fdm.start(); String filename1 = "fdm_test_file.xml1"; String filename2 = "fdm_test_file.xml2"; String filename3 = "fdm_test_file.xml3"; File file1 = new File("target/test-classes/"); File file2 = new File("target/test-classes/"); File file3 = new File("target/test-classes/"); file1.mkdirs(); file2.mkdirs(); file3.mkdirs(); file1 = new File("target/test-classes/" + filename1); file2 = new File("target/test-classes/" + filename2); file3 = new File("target/test-classes/" + filename3); file1.createNewFile(); file2.createNewFile(); file3.createNewFile(); FakeDeployer deployer1 = new FakeDeployer(filename1); FakeDeployer deployer2 = new FakeDeployer(filename2); FakeDeployer deployer3 = new FakeDeployer(filename3); FakeDeployer deployer4 = new FakeDeployer(filename3); // Can have multiple deployers on the same file try { URI url1 = file1.toURI(); deployer1.deploy(url1); URI url2 = file2.toURI(); deployer2.deploy(url2); URI url3 = file3.toURI(); deployer3.deploy(url3); deployer4.deploy(url3); fdm.registerDeployer(deployer1); fdm.registerDeployer(deployer2); fdm.registerDeployer(deployer3); fdm.registerDeployer(deployer4); Assert.assertEquals(4, fdm.getDeployers().size()); Assert.assertTrue(fdm.getDeployers().contains(deployer1)); Assert.assertTrue(fdm.getDeployers().contains(deployer2)); Assert.assertTrue(fdm.getDeployers().contains(deployer3)); Assert.assertTrue(fdm.getDeployers().contains(deployer4)); Assert.assertEquals(4, fdm.getDeployed().size()); Assert.assertEquals(file1.toURI(), deployer1.deployedUri); Assert.assertEquals(file2.toURI(), deployer2.deployedUri); Assert.assertEquals(file3.toURI(), deployer3.deployedUri); Assert.assertEquals(file3.toURI(), deployer4.deployedUri); // Registering same again should do nothing fdm.registerDeployer(deployer1); Assert.assertEquals(4, fdm.getDeployers().size()); Assert.assertTrue(fdm.getDeployers().contains(deployer1)); Assert.assertTrue(fdm.getDeployers().contains(deployer2)); Assert.assertTrue(fdm.getDeployers().contains(deployer3)); Assert.assertTrue(fdm.getDeployers().contains(deployer4)); Assert.assertEquals(4, fdm.getDeployed().size()); fdm.unregisterDeployer(deployer1); Assert.assertEquals(3, fdm.getDeployers().size()); Assert.assertTrue(fdm.getDeployers().contains(deployer2)); Assert.assertTrue(fdm.getDeployers().contains(deployer3)); Assert.assertTrue(fdm.getDeployers().contains(deployer4)); Assert.assertEquals(3, fdm.getDeployed().size()); fdm.unregisterDeployer(deployer2); fdm.unregisterDeployer(deployer3); Assert.assertEquals(1, fdm.getDeployers().size()); Assert.assertTrue(fdm.getDeployers().contains(deployer4)); Assert.assertEquals(1, fdm.getDeployed().size()); fdm.unregisterDeployer(deployer4); Assert.assertEquals(0, fdm.getDeployers().size()); Assert.assertEquals(0, fdm.getDeployed().size()); // Now unregister again - should do nothing fdm.unregisterDeployer(deployer1); Assert.assertEquals(0, fdm.getDeployers().size()); Assert.assertEquals(0, fdm.getDeployed().size()); } finally { file1.delete(); file2.delete(); file3.delete(); fdm.stop(); } } public void testRedeploy() throws Exception { FileDeploymentManager fdm = new FileDeploymentManager(Long.MAX_VALUE); fdm.start(); String filename = "fdm_test_file.xml1"; File file = new File("target/test-classes/"); file.mkdirs(); file = new File("target/test-classes/" + filename); file.createNewFile(); long oldLastModified = file.lastModified(); FakeDeployer deployer = new FakeDeployer(filename); try { URI url = file.toURI(); deployer.deploy(url); fdm.registerDeployer(deployer); Assert.assertEquals(file.toURI(), deployer.deployedUri); // Touch the file file.setLastModified(oldLastModified + 1000); deployer.redeploy(url); fdm.run(); Assert.assertEquals(1, fdm.getDeployers().size()); Assert.assertTrue(fdm.getDeployers().contains(deployer)); Assert.assertEquals(1, fdm.getDeployed().size()); URI expected = file.toURI(); URI deployedUrl = deployer.deployedUri; Assert.assertTrue(expected.toString().equalsIgnoreCase(deployedUrl.toString())); Pair<URI, Deployer> pair = new Pair<URI, Deployer>(url, deployer); Assert.assertEquals(oldLastModified + 1000, fdm.getDeployed().get(pair).lastModified); deployer.reDeployedUri = null; // Scanning again should not redeploy fdm.run(); Assert.assertEquals(oldLastModified + 1000, fdm.getDeployed().get(pair).lastModified); Assert.assertNull(deployer.reDeployedUri); } finally { file.delete(); fdm.stop(); } } public void testUndeployAndDeployAgain() throws Exception { FileDeploymentManager fdm = new FileDeploymentManager(Long.MAX_VALUE); fdm.start(); String filename = "fdm_test_file.xml1"; File file = new File("target/test-classes/"); file.mkdirs(); file = new File("target/test-classes/" + filename); file.createNewFile(); FakeDeployer deployer = new FakeDeployer(filename); try { URI uri = file.toURI(); deployer.deploy(uri); fdm.registerDeployer(deployer); Assert.assertEquals(1, fdm.getDeployers().size()); Assert.assertTrue(fdm.getDeployers().contains(deployer)); Assert.assertEquals(1, fdm.getDeployed().size()); Assert.assertEquals(file.toURI(), deployer.deployedUri); deployer.deployedUri = null; file.delete(); // This should cause undeployment deployer.undeploy(uri); Assert.assertEquals(file.toURI(), deployer.unDeployedUri); fdm.run(); Assert.assertEquals(1, fdm.getDeployers().size()); Assert.assertTrue(fdm.getDeployers().contains(deployer)); Assert.assertEquals(0, fdm.getDeployed().size()); // Recreate file and it should be redeployed file.createNewFile(); deployer.deploy(uri); fdm.run(); Assert.assertEquals(1, fdm.getDeployers().size()); Assert.assertTrue(fdm.getDeployers().contains(deployer)); Assert.assertEquals(1, fdm.getDeployed().size()); Assert.assertEquals(file.toURI(), deployer.deployedUri); } finally { file.delete(); fdm.stop(); } } class FakeDeployer implements Deployer { URI deployedUri; URI unDeployedUri; URI reDeployedUri; boolean started; private final String file; public FakeDeployer(final String file) { this.file = file; } public String[] getConfigFileNames() { return new String[] {file}; } @Override public void deploy(final URI url) throws Exception { deployedUri = url; } @Override public void redeploy(final URI url) throws Exception { reDeployedUri = url; } @Override public void undeploy(final URI url) throws Exception { unDeployedUri = url; } @Override public void start() throws Exception { started = true; } @Override public void stop() throws Exception { started = false; } @Override public boolean isStarted() { return started; } } }
/** * extends the simple manager to allow wildcard addresses to be used. * * @author <a href="mailto:[email protected]">Andy Taylor</a> */ public class WildcardAddressManager extends SimpleAddressManager { private static final Logger log = Logger.getLogger(WildcardAddressManager.class); static final char SINGLE_WORD = '*'; static final char ANY_WORDS = '#'; static final char DELIM = '.'; static final SimpleString SINGLE_WORD_SIMPLESTRING = new SimpleString("*"); static final SimpleString ANY_WORDS_SIMPLESTRING = new SimpleString("#"); /** * These are all the addresses, we use this so we can link back from the actual address to its * linked wilcard addresses or vice versa */ private final Map<SimpleString, Address> addresses = new HashMap<SimpleString, Address>(); private final Map<SimpleString, Address> wildCardAddresses = new HashMap<SimpleString, Address>(); public WildcardAddressManager(final BindingsFactory bindingsFactory) { super(bindingsFactory); } @Override public Bindings getBindingsForRoutingAddress(final SimpleString address) throws Exception { Bindings bindings = super.getBindingsForRoutingAddress(address); // this should only happen if we're routing to an address that has no mappings when we're // running checkAllowable if (bindings == null && !wildCardAddresses.isEmpty()) { Address add = addAndUpdateAddressMap(address); if (!add.containsWildCard()) { for (Address destAdd : add.getLinkedAddresses()) { Bindings b = super.getBindingsForRoutingAddress(destAdd.getAddress()); if (b != null) { Collection<Binding> theBindings = b.getBindings(); for (Binding theBinding : theBindings) { super.addMappingInternal(address, theBinding); } } } } bindings = super.getBindingsForRoutingAddress(address); } return bindings; } /** * If the address to add the binding to contains a wildcard then a copy of the binding (with the * same underlying queue) will be added to the actual mappings. Otherwise the binding is added as * normal. * * @param binding the binding to add * @return true if the address was a new mapping */ @Override public boolean addBinding(final Binding binding) throws Exception { boolean exists = super.addBinding(binding); if (!exists) { Address add = addAndUpdateAddressMap(binding.getAddress()); if (add.containsWildCard()) { for (Address destAdd : add.getLinkedAddresses()) { super.addMappingInternal(destAdd.getAddress(), binding); } } else { for (Address destAdd : add.getLinkedAddresses()) { Bindings bindings = super.getBindingsForRoutingAddress(destAdd.getAddress()); for (Binding b : bindings.getBindings()) { super.addMappingInternal(binding.getAddress(), b); } } } } return exists; } /** * If the address is a wild card then the binding will be removed from the actual mappings for any * linked address. otherwise it will be removed as normal. * * @param uniqueName the name of the binding to remove * @return true if this was the last mapping for a specific address */ @Override public Binding removeBinding(final SimpleString uniqueName) throws Exception { Binding binding = super.removeBinding(uniqueName); if (binding != null) { Address add = getAddress(binding.getAddress()); if (!add.containsWildCard()) { for (Address theAddress : add.getLinkedAddresses()) { Bindings bindings = super.getBindingsForRoutingAddress(theAddress.getAddress()); if (bindings != null) { for (Binding b : bindings.getBindings()) { super.removeBindingInternal(binding.getAddress(), b.getUniqueName()); } } } } else { for (Address theAddress : add.getLinkedAddresses()) { super.removeBindingInternal(theAddress.getAddress(), uniqueName); } } removeAndUpdateAddressMap(add); } return binding; } @Override public void clear() { super.clear(); addresses.clear(); wildCardAddresses.clear(); } private Address getAddress(final SimpleString address) { Address add = new AddressImpl(address); Address actualAddress; if (add.containsWildCard()) { actualAddress = wildCardAddresses.get(address); } else { actualAddress = addresses.get(address); } return actualAddress != null ? actualAddress : add; } private synchronized Address addAndUpdateAddressMap(final SimpleString address) { Address add = new AddressImpl(address); Address actualAddress; if (add.containsWildCard()) { actualAddress = wildCardAddresses.get(address); } else { actualAddress = addresses.get(address); } if (actualAddress == null) { actualAddress = add; addAddress(address, actualAddress); } if (actualAddress.containsWildCard()) { for (Address destAdd : addresses.values()) { if (destAdd.matches(actualAddress)) { destAdd.addLinkedAddress(actualAddress); actualAddress.addLinkedAddress(destAdd); } } } else { for (Address destAdd : wildCardAddresses.values()) { if (actualAddress.matches(destAdd)) { destAdd.addLinkedAddress(actualAddress); actualAddress.addLinkedAddress(destAdd); } } } return actualAddress; } private void addAddress(final SimpleString address, final Address actualAddress) { if (actualAddress.containsWildCard()) { wildCardAddresses.put(address, actualAddress); } else { addresses.put(address, actualAddress); } } private synchronized void removeAndUpdateAddressMap(final Address address) throws Exception { // we only remove if there are no bindings left Bindings bindings = super.getBindingsForRoutingAddress(address.getAddress()); if (bindings == null || bindings.getBindings().size() == 0) { List<Address> addresses = address.getLinkedAddresses(); for (Address address1 : addresses) { address1.removeLinkedAddress(address); Bindings linkedBindings = super.getBindingsForRoutingAddress(address1.getAddress()); if (linkedBindings == null || linkedBindings.getBindings().size() == 0) { removeAddress(address1); } } removeAddress(address); } } private void removeAddress(final Address add) { if (add.containsWildCard()) { wildCardAddresses.remove(add.getAddress()); } else { addresses.remove(add.getAddress()); } } }
/** * ConfigurationImpl This class allows the Configuration class to be configured via a config file. * * @author <a href="*****@*****.**">Andy Taylor</a> * @author <a href="*****@*****.**">Tim Fox</a> */ public class FileConfiguration extends ConfigurationImpl { private static final long serialVersionUID = -4766689627675039596L; private static final Logger log = Logger.getLogger(FileConfiguration.class); // Constants ------------------------------------------------------------------------ private static final String DEFAULT_CONFIGURATION_URL = "hornetq-configuration.xml"; // For a bridge confirmations must be activated or send acknowledgements won't return public static final int DEFAULT_CONFIRMATION_WINDOW_SIZE = 1024 * 1024; // Static -------------------------------------------------------------------------- // Attributes ---------------------------------------------------------------------- private String configurationUrl = DEFAULT_CONFIGURATION_URL; private boolean started; // Public ------------------------------------------------------------------------- public synchronized void start() throws Exception { if (started) { return; } URL url = getClass().getClassLoader().getResource(configurationUrl); if (url == null) { // The URL is outside of the classloader. Trying a pure url now url = new URL(configurationUrl); } FileConfiguration.log.debug("Loading server configuration from " + url); Reader reader = new InputStreamReader(url.openStream()); String xml = org.hornetq.utils.XMLUtil.readerToString(reader); xml = XMLUtil.replaceSystemProps(xml); Element e = org.hornetq.utils.XMLUtil.stringToElement(xml); FileConfigurationParser parser = new FileConfigurationParser(); // https://jira.jboss.org/browse/HORNETQ-478 - We only want to validate AIO when // starting the server // and we don't want to do it when deploying hornetq-queues.xml which uses the same parser // and XML format parser.setValidateAIO(true); parser.parseMainConfig(e, this); started = true; } public synchronized void stop() throws Exception { started = false; } public String getConfigurationUrl() { return configurationUrl; } public void setConfigurationUrl(final String configurationUrl) { this.configurationUrl = configurationUrl; } // Private ------------------------------------------------------------------------- }
/** * Helper base class for our unit tests * * @author <a href="mailto:[email protected]">Tim Fox</a> * @author <a href="mailto:[email protected]">Clebert</a> */ public class UnitTestCase extends TestCase { // Constants ----------------------------------------------------- private static final Logger log = Logger.getLogger(UnitTestCase.class); public static final String INVM_ACCEPTOR_FACTORY = "org.hornetq.core.remoting.impl.invm.InVMAcceptorFactory"; public static final String INVM_CONNECTOR_FACTORY = "org.hornetq.core.remoting.impl.invm.InVMConnectorFactory"; public static final String NETTY_ACCEPTOR_FACTORY = "org.hornetq.core.remoting.impl.netty.NettyAcceptorFactory"; public static final String NETTY_CONNECTOR_FACTORY = "org.hornetq.core.remoting.impl.netty.NettyConnectorFactory"; // Attributes ---------------------------------------------------- private static final String testDir = System.getProperty("java.io.tmpdir", "/tmp") + "/hornetq-unit-test"; // There is a verification about thread leakages. We only fail a single thread when this happens private static Set<Thread> alreadyFailedThread = new HashSet<Thread>(); // Static -------------------------------------------------------- protected Configuration createDefaultConfig() { return createDefaultConfig(false); } protected Configuration createDefaultConfig(final boolean netty) { if (netty) { return createDefaultConfig( new HashMap<String, Object>(), INVM_ACCEPTOR_FACTORY, NETTY_ACCEPTOR_FACTORY); } else { return createDefaultConfig(new HashMap<String, Object>(), INVM_ACCEPTOR_FACTORY); } } protected static Configuration createClusteredDefaultConfig( final int index, final Map<String, Object> params, final String... acceptors) { Configuration config = createDefaultConfig(index, params, acceptors); config.setClustered(true); return config; } protected static Configuration createDefaultConfig( final int index, final Map<String, Object> params, final String... acceptors) { Configuration configuration = createBasicConfig(index); configuration.getAcceptorConfigurations().clear(); for (String acceptor : acceptors) { TransportConfiguration transportConfig = new TransportConfiguration(acceptor, params); configuration.getAcceptorConfigurations().add(transportConfig); } return configuration; } protected static ConfigurationImpl createBasicConfig() { return createBasicConfig(0); } /** * @param serverID * @return */ protected static ConfigurationImpl createBasicConfig(final int serverID) { ConfigurationImpl configuration = new ConfigurationImpl(); configuration.setSecurityEnabled(false); configuration.setBindingsDirectory(getBindingsDir(serverID, false)); configuration.setJournalMinFiles(2); configuration.setJournalDirectory(getJournalDir(serverID, false)); configuration.setJournalFileSize(100 * 1024); configuration.setJournalType(getDefaultJournalType()); configuration.setPagingDirectory(getPageDir(serverID, false)); configuration.setLargeMessagesDirectory(getLargeMessagesDir(serverID, false)); configuration.setJournalCompactMinFiles(0); configuration.setJournalCompactPercentage(0); return configuration; } protected static Configuration createDefaultConfig( final Map<String, Object> params, final String... acceptors) { Configuration configuration = new ConfigurationImpl(); configuration.setSecurityEnabled(false); configuration.setJMXManagementEnabled(false); configuration.setBindingsDirectory(getBindingsDir()); configuration.setJournalMinFiles(2); configuration.setJournalDirectory(getJournalDir()); configuration.setJournalFileSize(100 * 1024); configuration.setPagingDirectory(getPageDir()); configuration.setLargeMessagesDirectory(getLargeMessagesDir()); configuration.setJournalCompactMinFiles(0); configuration.setJournalCompactPercentage(0); configuration.setFileDeploymentEnabled(false); configuration.setJournalType(getDefaultJournalType()); configuration.getAcceptorConfigurations().clear(); for (String acceptor : acceptors) { TransportConfiguration transportConfig = new TransportConfiguration(acceptor, params); configuration.getAcceptorConfigurations().add(transportConfig); } return configuration; } protected static String getUDPDiscoveryAddress() { return System.getProperty("TEST-UDP-ADDRESS", "230.1.2.3"); } protected static String getUDPDiscoveryAddress(int variant) { String value = getUDPDiscoveryAddress(); int posPoint = value.lastIndexOf('.'); int last = Integer.valueOf(value.substring(posPoint + 1)); return value.substring(0, posPoint + 1) + (last + variant); } public static int getUDPDiscoveryPort() { return Integer.parseInt(System.getProperty("TEST-UDP-PORT", "6750")); } public static int getUDPDiscoveryPort(final int variant) { return getUDPDiscoveryPort() + variant; } protected static JournalType getDefaultJournalType() { if (AsynchronousFileImpl.isLoaded()) { return JournalType.ASYNCIO; } else { return JournalType.NIO; } } /** @param name */ public UnitTestCase(final String name) { super(name); } public UnitTestCase() { super(); } public static void forceGC() { WeakReference<Object> dumbReference = new WeakReference<Object>(new Object()); // A loop that will wait GC, using the minimal time as possible while (dumbReference.get() != null) { System.gc(); try { Thread.sleep(500); } catch (InterruptedException e) { } } } public static void forceGC(Reference<?> ref, long timeout) { long waitUntil = System.currentTimeMillis() + timeout; // A loop that will wait GC, using the minimal time as possible while (ref.get() != null && System.currentTimeMillis() < waitUntil) { ArrayList<String> list = new ArrayList<String>(); for (int i = 0; i < 1000; i++) { list.add("Some string with garbage with concatenation " + i); } list.clear(); list = null; System.gc(); try { Thread.sleep(500); } catch (InterruptedException e) { } } } // verify if these weak references are released after a few GCs public static void checkWeakReferences(final WeakReference<?>... references) { int i = 0; boolean hasValue = false; do { hasValue = false; if (i > 0) { UnitTestCase.forceGC(); } for (WeakReference<?> ref : references) { if (ref.get() != null) { hasValue = true; } } } while (i++ <= 30 && hasValue); for (WeakReference<?> ref : references) { Assert.assertNull(ref.get()); } } public static String threadDump(final String msg) { StringWriter str = new StringWriter(); PrintWriter out = new PrintWriter(str); Map<Thread, StackTraceElement[]> stackTrace = Thread.getAllStackTraces(); out.println("*******************************************************************************"); out.println("Complete Thread dump " + msg); for (Map.Entry<Thread, StackTraceElement[]> el : stackTrace.entrySet()) { out.println( "==============================================================================="); out.println( "Thread " + el.getKey() + " name = " + el.getKey().getName() + " id = " + el.getKey().getId() + " group = " + el.getKey().getThreadGroup()); out.println(); for (StackTraceElement traceEl : el.getValue()) { out.println(traceEl); } } out.println("==============================================================================="); out.println("End Thread dump " + msg); out.println("*******************************************************************************"); return str.toString(); } protected static TestSuite createAIOTestSuite(final Class<?> clazz) { TestSuite suite = new TestSuite(clazz.getName() + " testsuite"); if (AIOSequentialFileFactory.isSupported()) { suite.addTestSuite(clazz); } else { // System.out goes towards JUnit report System.out.println("Test " + clazz.getName() + " ignored as AIO is not available"); } return suite; } public static String dumpBytes(final byte[] bytes) { StringBuffer buff = new StringBuffer(); buff.append(System.identityHashCode(bytes) + ", size: " + bytes.length + " ["); for (int i = 0; i < bytes.length; i++) { buff.append(bytes[i]); if (i != bytes.length - 1) { buff.append(", "); } } buff.append("]"); return buff.toString(); } public static String dumbBytesHex(final byte[] buffer, final int bytesPerLine) { StringBuffer buff = new StringBuffer(); buff.append("["); for (int i = 0; i < buffer.length; i++) { buff.append(String.format("%1$2X", buffer[i])); if (i + 1 < buffer.length) { buff.append(", "); } if ((i + 1) % bytesPerLine == 0) { buff.append("\n "); } } buff.append("]"); return buff.toString(); } public static void assertEqualsByteArrays(final byte[] expected, final byte[] actual) { // assertEquals(expected.length, actual.length); for (int i = 0; i < expected.length; i++) { Assert.assertEquals("byte at index " + i, expected[i], actual[i]); } } public static void assertEqualsTransportConfigurations( final TransportConfiguration[] expected, final TransportConfiguration[] actual) { assertEquals(expected.length, actual.length); for (int i = 0; i < expected.length; i++) { Assert.assertEquals("TransportConfiguration at index " + i, expected[i], actual[i]); } } public static void assertEqualsBuffers( final int size, final HornetQBuffer expected, final HornetQBuffer actual) { // assertEquals(expected.length, actual.length); expected.readerIndex(0); actual.readerIndex(0); for (int i = 0; i < size; i++) { byte b1 = expected.readByte(); byte b2 = actual.readByte(); Assert.assertEquals("byte at index " + i, b1, b2); } expected.resetReaderIndex(); actual.resetReaderIndex(); } public static void assertEqualsByteArrays( final int length, final byte[] expected, final byte[] actual) { // we check only for the given length (the arrays might be // larger) Assert.assertTrue(expected.length >= length); Assert.assertTrue(actual.length >= length); for (int i = 0; i < length; i++) { Assert.assertEquals("byte at index " + i, expected[i], actual[i]); } } public static void assertSameXids(final List<Xid> expected, final List<Xid> actual) { Assert.assertNotNull(expected); Assert.assertNotNull(actual); Assert.assertEquals(expected.size(), actual.size()); for (int i = 0; i < expected.size(); i++) { Xid expectedXid = expected.get(i); Xid actualXid = actual.get(i); UnitTestCase.assertEqualsByteArrays( expectedXid.getBranchQualifier(), actualXid.getBranchQualifier()); Assert.assertEquals(expectedXid.getFormatId(), actualXid.getFormatId()); UnitTestCase.assertEqualsByteArrays( expectedXid.getGlobalTransactionId(), actualXid.getGlobalTransactionId()); } } protected static void checkNoBinding(final Context context, final String binding) { try { context.lookup(binding); Assert.fail("there must be no resource to look up for " + binding); } catch (Exception e) { } } protected static Object checkBinding(final Context context, final String binding) throws Exception { Object o = context.lookup(binding); Assert.assertNotNull(o); return o; } /** * @param connectorConfigs * @return */ protected ArrayList<String> registerConnectors( final HornetQServer server, final List<TransportConfiguration> connectorConfigs) { // The connectors need to be pre-configured at main config object but this method is taking // TransportConfigurations directly // So this will first register them at the config and then generate a list of objects ArrayList<String> connectors = new ArrayList<String>(); for (TransportConfiguration tnsp : connectorConfigs) { String name = RandomUtil.randomString(); server.getConfiguration().getConnectorConfigurations().put(name, tnsp); connectors.add(name); } return connectors; } protected static void checkFreePort(final int... ports) { for (int port : ports) { ServerSocket ssocket = null; try { ssocket = new ServerSocket(port); } catch (Exception e) { throw new IllegalStateException("port " + port + " is already bound"); } finally { if (ssocket != null) { try { ssocket.close(); } catch (IOException e) { } } } } } // Constructors -------------------------------------------------- // Protected ----------------------------------------------------- /** @return the testDir */ protected String getTestDir() { return testDir; } protected void clearData() { clearData(getTestDir()); } protected void clearData(final String testDir) { // Need to delete the root File file = new File(testDir); deleteDirectory(file); file.mkdirs(); recreateDirectory(getJournalDir(testDir)); recreateDirectory(getBindingsDir(testDir)); recreateDirectory(getPageDir(testDir)); recreateDirectory(getLargeMessagesDir(testDir)); recreateDirectory(getClientLargeMessagesDir(testDir)); recreateDirectory(getTemporaryDir(testDir)); System.out.println("deleted " + testDir); } /** @return the journalDir */ public static String getJournalDir() { return getJournalDir(testDir); } protected static String getJournalDir(final String testDir) { return testDir + "/journal"; } protected static String getJournalDir(final int index, final boolean backup) { String dir = getJournalDir(testDir) + index + "-" + (backup ? "B" : "L"); return dir; } /** @return the bindingsDir */ protected static String getBindingsDir() { return getBindingsDir(testDir); } /** @return the bindingsDir */ protected static String getBindingsDir(final String testDir) { return testDir + "/bindings"; } /** @return the bindingsDir */ protected static String getBindingsDir(final int index, final boolean backup) { return getBindingsDir(testDir) + index + "-" + (backup ? "B" : "L"); } /** @return the pageDir */ protected static String getPageDir() { return getPageDir(testDir); } /** @return the pageDir */ protected static String getPageDir(final String testDir) { return testDir + "/page"; } protected static String getPageDir(final int index, final boolean backup) { return getPageDir(testDir) + index + "-" + (backup ? "B" : "L"); } /** @return the largeMessagesDir */ protected static String getLargeMessagesDir() { return getLargeMessagesDir(testDir); } /** @return the largeMessagesDir */ protected static String getLargeMessagesDir(final String testDir) { return testDir + "/large-msg"; } protected static String getLargeMessagesDir(final int index, final boolean backup) { return getLargeMessagesDir(testDir) + index + "-" + (backup ? "B" : "L"); } /** @return the clientLargeMessagesDir */ protected String getClientLargeMessagesDir() { return getClientLargeMessagesDir(testDir); } /** @return the clientLargeMessagesDir */ protected String getClientLargeMessagesDir(final String testDir) { return testDir + "/client-large-msg"; } /** @return the temporaryDir */ protected String getTemporaryDir() { return getTemporaryDir(testDir); } /** @return the temporaryDir */ protected String getTemporaryDir(final String testDir) { return testDir + "/temp"; } protected static void expectHornetQException( final String message, final int errorCode, final HornetQAction action) { try { action.run(); Assert.fail(message); } catch (Exception e) { Assert.assertTrue(e instanceof HornetQException); Assert.assertEquals(errorCode, ((HornetQException) e).getCode()); } } protected static void expectHornetQException(final int errorCode, final HornetQAction action) { UnitTestCase.expectHornetQException( "must throw a HornetQException with the expected errorCode: " + errorCode, errorCode, action); } protected static void expectXAException(final int errorCode, final HornetQAction action) { try { action.run(); Assert.fail("must throw a XAException with the expected errorCode: " + errorCode); } catch (Exception e) { Assert.assertTrue(e instanceof XAException); Assert.assertEquals(errorCode, ((XAException) e).errorCode); } } public static byte getSamplebyte(final long position) { return (byte) ('a' + position % ('z' - 'a' + 1)); } // Creates a Fake LargeStream without using a real file public static InputStream createFakeLargeStream(final long size) throws Exception { return new InputStream() { private long count; private boolean closed = false; @Override public void close() throws IOException { super.close(); closed = true; } @Override public int read() throws IOException { if (closed) { throw new IOException("Stream was closed"); } if (count++ < size) { return UnitTestCase.getSamplebyte(count - 1); } else { return -1; } } }; } /** * It validates a Bean (POJO) using simple setters and getters with random values. You can pass a * list of properties to be ignored, as some properties will have a pre-defined domain (not being * possible to use random-values on them) */ protected void validateGettersAndSetters(final Object pojo, final String... ignoredProperties) throws Exception { HashSet<String> ignoreSet = new HashSet<String>(); for (String ignore : ignoredProperties) { ignoreSet.add(ignore); } BeanInfo info = Introspector.getBeanInfo(pojo.getClass()); PropertyDescriptor properties[] = info.getPropertyDescriptors(); for (PropertyDescriptor prop : properties) { Object value; if (prop.getPropertyType() == String.class) { value = RandomUtil.randomString(); } else if (prop.getPropertyType() == Integer.class || prop.getPropertyType() == Integer.TYPE) { value = RandomUtil.randomInt(); } else if (prop.getPropertyType() == Long.class || prop.getPropertyType() == Long.TYPE) { value = RandomUtil.randomLong(); } else if (prop.getPropertyType() == Boolean.class || prop.getPropertyType() == Boolean.TYPE) { value = RandomUtil.randomBoolean(); } else if (prop.getPropertyType() == Double.class || prop.getPropertyType() == Double.TYPE) { value = RandomUtil.randomDouble(); } else { System.out.println( "Can't validate property of type " + prop.getPropertyType() + " on " + prop.getName()); value = null; } if (value != null && prop.getWriteMethod() != null && prop.getReadMethod() == null) { System.out.println("WriteOnly property " + prop.getName() + " on " + pojo.getClass()); } else if (value != null & prop.getWriteMethod() != null && prop.getReadMethod() != null && !ignoreSet.contains(prop.getName())) { System.out.println("Validating " + prop.getName() + " type = " + prop.getPropertyType()); prop.getWriteMethod().invoke(pojo, value); Assert.assertEquals("Property " + prop.getName(), value, prop.getReadMethod().invoke(pojo)); } } } // Package protected --------------------------------------------- // Protected ----------------------------------------------------- Map<Thread, StackTraceElement[]> previousThreads; @Override protected void setUp() throws Exception { super.setUp(); OperationContextImpl.clearContext(); deleteDirectory(new File(getTestDir())); InVMRegistry.instance.clear(); // checkFreePort(TransportConstants.DEFAULT_PORT); previousThreads = Thread.getAllStackTraces(); UnitTestCase.log.info("###### starting test " + this.getClass().getName() + "." + getName()); } @Override protected void tearDown() throws Exception { cleanupPools(); Map<Thread, StackTraceElement[]> threadMap = Thread.getAllStackTraces(); for (Thread thread : threadMap.keySet()) { StackTraceElement[] stack = threadMap.get(thread); for (StackTraceElement stackTraceElement : stack) { if (stackTraceElement.getMethodName().contains("getConnectionWithRetry") && !alreadyFailedThread.contains(thread)) { alreadyFailedThread.add(thread); System.out.println( threadDump( this.getName() + " has left threads running. Look at thread " + thread.getName() + " id = " + thread.getId() + " has running locators on test " + this.getName() + " on this following dump")); fail("test left serverlocator running, this could effect other tests"); // System.exit(0); } else if (stackTraceElement.getMethodName().contains("BroadcastGroupImpl.run") && !alreadyFailedThread.contains(thread)) { alreadyFailedThread.add(thread); System.out.println( threadDump( this.getName() + " has left threads running. Look at thread " + thread.getName() + " id = " + thread.getId() + " is still broadcasting " + this.getName() + " on this following dump")); fail("test left broadcastgroupimpl running, this could effect other tests"); // System.exit(0); } } } Map<Thread, StackTraceElement[]> postThreads = Thread.getAllStackTraces(); boolean failedThread = false; if (postThreads.size() > previousThreads.size()) { StringBuffer buffer = new StringBuffer(); buffer.append( "*********************************************************************************\n"); buffer.append("LEAKING THREADS\n"); for (Thread aliveThread : postThreads.keySet()) { if (!aliveThread.getName().contains("SunPKCS11") && !previousThreads.containsKey(aliveThread)) { failedThread = true; buffer.append( "=============================================================================\n"); buffer.append( "Thread " + aliveThread + " is still alive with the following stackTrace:\n"); StackTraceElement[] elements = postThreads.get(aliveThread); for (StackTraceElement el : elements) { buffer.append(el + "\n"); } } } buffer.append( "*********************************************************************************\n"); System.out.println(buffer.toString()); } // assertFalse("Thread Failed", failedThread); super.tearDown(); } /** */ protected void cleanupPools() { OperationContextImpl.clearContext(); int invmSize = InVMRegistry.instance.size(); if (invmSize > 0) { InVMRegistry.instance.clear(); fail("invm registry still had acceptors registered"); } if (AsynchronousFileImpl.getTotalMaxIO() != 0) { AsynchronousFileImpl.resetMaxAIO(); Assert.fail("test did not close all its files " + AsynchronousFileImpl.getTotalMaxIO()); } // We shutdown the global pools to give a better isolation between tests ServerLocatorImpl.clearThreadPools(); } protected byte[] autoEncode(final Object... args) { int size = 0; for (Object arg : args) { if (arg instanceof Byte) { size++; } else if (arg instanceof Boolean) { size++; } else if (arg instanceof Integer) { size += 4; } else if (arg instanceof Long) { size += 8; } else if (arg instanceof Float) { size += 4; } else if (arg instanceof Double) { size += 8; } else { throw new IllegalArgumentException( "method autoEncode doesn't know how to convert " + arg.getClass() + " yet"); } } ByteBuffer buffer = ByteBuffer.allocate(size); for (Object arg : args) { if (arg instanceof Byte) { buffer.put(((Byte) arg).byteValue()); } else if (arg instanceof Boolean) { Boolean b = (Boolean) arg; buffer.put((byte) (b.booleanValue() ? 1 : 0)); } else if (arg instanceof Integer) { buffer.putInt(((Integer) arg).intValue()); } else if (arg instanceof Long) { buffer.putLong(((Long) arg).longValue()); } else if (arg instanceof Float) { buffer.putFloat(((Float) arg).floatValue()); } else if (arg instanceof Double) { buffer.putDouble(((Double) arg).doubleValue()); } else { throw new IllegalArgumentException( "method autoEncode doesn't know how to convert " + arg.getClass() + " yet"); } } return buffer.array(); } protected void recreateDirectory(final String directory) { File file = new File(directory); deleteDirectory(file); file.mkdirs(); } protected boolean deleteDirectory(final File directory) { if (directory.isDirectory()) { String[] files = directory.list(); for (int j = 0; j < files.length; j++) { if (!deleteDirectory(new File(directory, files[j]))) { return false; } } } return directory.delete(); } protected void copyRecursive(final File from, final File to) throws Exception { if (from.isDirectory()) { if (!to.exists()) { to.mkdir(); } String[] subs = from.list(); for (String sub : subs) { copyRecursive(new File(from, sub), new File(to, sub)); } } else { InputStream in = null; OutputStream out = null; try { in = new BufferedInputStream(new FileInputStream(from)); out = new BufferedOutputStream(new FileOutputStream(to)); int b; while ((b = in.read()) != -1) { out.write(b); } } finally { if (in != null) { in.close(); } if (out != null) { out.close(); } } } } protected void assertRefListsIdenticalRefs( final List<MessageReference> l1, final List<MessageReference> l2) { if (l1.size() != l2.size()) { Assert.fail("Lists different sizes: " + l1.size() + ", " + l2.size()); } Iterator<MessageReference> iter1 = l1.iterator(); Iterator<MessageReference> iter2 = l2.iterator(); while (iter1.hasNext()) { MessageReference o1 = iter1.next(); MessageReference o2 = iter2.next(); Assert.assertTrue("expected " + o1 + " but was " + o2, o1 == o2); } } protected ServerMessage generateMessage(final long id) { ServerMessage message = new ServerMessageImpl(id, 1000); message.setMessageID(id); message.getBodyBuffer().writeString(UUID.randomUUID().toString()); message.setAddress(new SimpleString("foo")); return message; } protected MessageReference generateReference(final Queue queue, final long id) { ServerMessage message = generateMessage(id); return message.createReference(queue); } protected int calculateRecordSize(final int size, final int alignment) { return (size / alignment + (size % alignment != 0 ? 1 : 0)) * alignment; } protected ClientMessage createTextMessage(final String s, final ClientSession clientSession) { return createTextMessage(s, true, clientSession); } protected ClientMessage createTextMessage( final String s, final boolean durable, final ClientSession clientSession) { ClientMessage message = clientSession.createMessage( HornetQTextMessage.TYPE, durable, 0, System.currentTimeMillis(), (byte) 4); message.getBodyBuffer().writeString(s); return message; } protected XidImpl newXID() { return new XidImpl( "xa1".getBytes(), 1, UUIDGenerator.getInstance().generateStringUUID().getBytes()); } protected int getMessageCount(final HornetQServer service, final String address) throws Exception { return getMessageCount(service.getPostOffice(), address); } /** * @param address * @param postOffice * @return * @throws Exception */ protected int getMessageCount(final PostOffice postOffice, final String address) throws Exception { int messageCount = 0; List<QueueBinding> bindings = getLocalQueueBindings(postOffice, address); for (QueueBinding qBinding : bindings) { messageCount += qBinding.getQueue().getMessageCount(); } return messageCount; } protected List<QueueBinding> getLocalQueueBindings( final PostOffice postOffice, final String address) throws Exception { ArrayList<QueueBinding> bindingsFound = new ArrayList<QueueBinding>(); Bindings bindings = postOffice.getBindingsForAddress(new SimpleString(address)); for (Binding binding : bindings.getBindings()) { if (binding instanceof LocalQueueBinding) { bindingsFound.add((QueueBinding) binding); } } return bindingsFound; } // Private ------------------------------------------------------- // Inner classes ------------------------------------------------- protected static interface HornetQAction { void run() throws Exception; } }
/** * A stress test written to investigate http://jira.jboss.org/jira/browse/JBMESSAGING-362 * * @author <a href="mailto:[email protected]">Ovidiu Feodorov</a> * @version <tt>$Revision$</tt> $Id$ */ public class CorruptMessageStressTest extends HornetQServerTestCase { // Constants ----------------------------------------------------- private static Logger log = Logger.getLogger(CorruptMessageStressTest.class); public static int PRODUCER_COUNT = 30; public static int MESSAGE_COUNT = 10000; // Static -------------------------------------------------------- // Attributes ---------------------------------------------------- private InitialContext ic; // Constructors -------------------------------------------------- // Public -------------------------------------------------------- public void testMultipleSenders() throws Exception { ConnectionFactory cf = (ConnectionFactory) ic.lookup("/ConnectionFactory"); Queue queue = (Queue) ic.lookup("/queue/StressTestQueue"); drainDestination(cf, queue); Connection conn = cf.createConnection(); Session[] sessions = new Session[CorruptMessageStressTest.PRODUCER_COUNT]; MessageProducer[] producers = new MessageProducer[CorruptMessageStressTest.PRODUCER_COUNT]; for (int i = 0; i < CorruptMessageStressTest.PRODUCER_COUNT; i++) { sessions[i] = conn.createSession(false, Session.AUTO_ACKNOWLEDGE); producers[i] = sessions[i].createProducer(queue); producers[i].setDeliveryMode(DeliveryMode.NON_PERSISTENT); } Thread[] threads = new Thread[CorruptMessageStressTest.PRODUCER_COUNT]; for (int i = 0; i < CorruptMessageStressTest.PRODUCER_COUNT; i++) { threads[i] = new Thread(new Sender(sessions[i], producers[i]), "Sender Thread #" + i); threads[i].start(); } // wait for the threads to finish for (int i = 0; i < CorruptMessageStressTest.PRODUCER_COUNT; i++) { threads[i].join(); } conn.close(); } // Package protected --------------------------------------------- // Protected ----------------------------------------------------- @Override protected void setUp() throws Exception { super.setUp(); // ServerManagement.start("all"); ic = getInitialContext(); createQueue("StressTestQueue"); CorruptMessageStressTest.log.debug("setup done"); } @Override protected void tearDown() throws Exception { destroyQueue("StressTestQueue"); ic.close(); super.tearDown(); } // Private ------------------------------------------------------- // Inner classes ------------------------------------------------- private class Sender implements Runnable { private final Session session; private final MessageProducer producer; private int count = 0; public Sender(final Session session, final MessageProducer producer) { this.session = session; this.producer = producer; } public void run() { while (true) { if (count == CorruptMessageStressTest.MESSAGE_COUNT) { break; } try { Message m = session.createMessage(); m.setStringProperty("XXX", "XXX-VALUE"); m.setStringProperty("YYY", "YYY-VALUE"); producer.send(m); count++; } catch (Exception e) { CorruptMessageStressTest.log.error("Sender thread failed", e); break; } } } } }
/** * A JMSBaseTest * * @author <mailto:[email protected]">Clebert Suconic</a> */ public class JMSClusteredTestBase extends ServiceTestBase { private static final Logger log = Logger.getLogger(ClusterTestBase.class); protected HornetQServer server1; protected JMSServerManagerImpl jmsServer1; protected HornetQServer server2; protected JMSServerManagerImpl jmsServer2; protected ConnectionFactory cf1; protected ConnectionFactory cf2; protected InVMContext context1; protected InVMContext context2; private static final int MAX_HOPS = 1; // Static -------------------------------------------------------- // Attributes ---------------------------------------------------- // Constructors -------------------------------------------------- // TestCase overrides ------------------------------------------- // Public -------------------------------------------------------- // Package protected --------------------------------------------- // Protected ----------------------------------------------------- /** * @throws Exception * @throws NamingException */ protected Queue createQueue(final String name) throws Exception, NamingException { jmsServer2.createQueue(false, name, null, true, "/queue/" + name); jmsServer1.createQueue(false, name, null, true, "/queue/" + name); return (Queue) context1.lookup("/queue/" + name); } protected Topic createTopic(final String name) throws Exception, NamingException { jmsServer2.createTopic(false, name, "/topic/" + name); jmsServer1.createTopic(false, name, "/topic/" + name); return (Topic) context1.lookup("/topic/" + name); } @Override protected void setUp() throws Exception { super.setUp(); setupServer2(); setupServer1(); jmsServer1.start(); jmsServer1.activated(); jmsServer2.start(); jmsServer2.activated(); cf1 = (ConnectionFactory) HornetQJMSClient.createConnectionFactoryWithoutHA( JMSFactoryType.CF, new TransportConfiguration( InVMConnectorFactory.class.getName(), generateInVMParams(0))); cf2 = (ConnectionFactory) HornetQJMSClient.createConnectionFactoryWithoutHA( JMSFactoryType.CF, new TransportConfiguration( InVMConnectorFactory.class.getName(), generateInVMParams(1))); } /** * @param toOtherServerPair * @throws Exception */ private void setupServer2() throws Exception { List<String> toOtherServerPair = new ArrayList<String>(); toOtherServerPair.add("toServer1"); Configuration conf2 = createDefaultConfig(1, generateInVMParams(1), InVMAcceptorFactory.class.getCanonicalName()); conf2.setSecurityEnabled(false); conf2.setJMXManagementEnabled(true); conf2.setPersistenceEnabled(false); conf2 .getConnectorConfigurations() .put( "toServer1", new TransportConfiguration( InVMConnectorFactory.class.getName(), generateInVMParams(0))); conf2 .getConnectorConfigurations() .put( "server2", new TransportConfiguration( InVMConnectorFactory.class.getName(), generateInVMParams(1))); conf2.setClustered(true); conf2 .getClusterConfigurations() .add( new ClusterConnectionConfiguration( "to-server1", "jms", "server2", 1000, true, true, MAX_HOPS, 1024, toOtherServerPair, false)); JMSConfigurationImpl jmsconfig = new JMSConfigurationImpl(); // jmsconfig.getTopicConfigurations().add(new TopicConfigurationImpl("t1", "topic/t1")); server2 = HornetQServers.newHornetQServer(conf2, false); jmsServer2 = new JMSServerManagerImpl(server2, jmsconfig); context2 = new InVMContext(); jmsServer2.setContext(context2); } /** * @param toOtherServerPair * @throws Exception */ private void setupServer1() throws Exception { List<String> toOtherServerPair = new ArrayList<String>(); toOtherServerPair.add("toServer2"); Configuration conf1 = createDefaultConfig(0, generateInVMParams(0), InVMAcceptorFactory.class.getCanonicalName()); conf1.setSecurityEnabled(false); conf1.setJMXManagementEnabled(true); conf1.setPersistenceEnabled(false); conf1 .getConnectorConfigurations() .put( "toServer2", new TransportConfiguration( InVMConnectorFactory.class.getName(), generateInVMParams(1))); conf1 .getConnectorConfigurations() .put( "server1", new TransportConfiguration( InVMConnectorFactory.class.getName(), generateInVMParams(0))); conf1.setClustered(true); conf1 .getClusterConfigurations() .add( new ClusterConnectionConfiguration( "to-server2", "jms", "server1", 1000, true, true, MAX_HOPS, 1024, toOtherServerPair, false)); JMSConfigurationImpl jmsconfig = new JMSConfigurationImpl(); // jmsconfig.getTopicConfigurations().add(new TopicConfigurationImpl("t1", "topic/t1")); server1 = HornetQServers.newHornetQServer(conf1, false); jmsServer1 = new JMSServerManagerImpl(server1, jmsconfig); context1 = new InVMContext(); jmsServer1.setContext(context1); } @Override protected void tearDown() throws Exception { try { jmsServer2.stop(); server2.stop(); context2.close(); } catch (Throwable e) { log.warn("Can't stop server2", e); } ((HornetQConnectionFactory) cf1).close(); ((HornetQConnectionFactory) cf2).close(); server2 = null; jmsServer2 = null; context2 = null; cf1 = null; try { jmsServer1.stop(); server1.stop(); context1.close(); } catch (Throwable e) { log.warn("Can't stop server2", e); } server1 = null; jmsServer1 = null; context1 = null; super.tearDown(); } // Private ------------------------------------------------------- // Inner classes ------------------------------------------------- protected Map<String, Object> generateInVMParams(final int node) { Map<String, Object> params = new HashMap<String, Object>(); params.put(org.hornetq.core.remoting.impl.invm.TransportConstants.SERVER_ID_PROP_NAME, node); return params; } }
/** * @author <a href="mailto:[email protected]">Clebert Suconic</a> * @author <a href="mailto:[email protected]">Tim Fox</a> * @author <a href="mailto:[email protected]>Andy Taylor</a> */ public class PagingManagerImpl implements PagingManager { // Constants ----------------------------------------------------- // Attributes ---------------------------------------------------- private volatile boolean started = false; private final AtomicLong totalMemoryBytes = new AtomicLong(0); private final ConcurrentMap<SimpleString, PagingStore> stores = new ConcurrentHashMap<SimpleString, PagingStore>(); private final HierarchicalRepository<AddressSettings> addressSettingsRepository; private final PagingStoreFactory pagingStoreFactory; private final StorageManager storageManager; private final ConcurrentMap</*TransactionID*/ Long, PageTransactionInfo> transactions = new ConcurrentHashMap<Long, PageTransactionInfo>(); // Static // -------------------------------------------------------------------------------------------------------------------------- private static final Logger log = Logger.getLogger(PagingManagerImpl.class); // Constructors // -------------------------------------------------------------------------------------------------------------------- public PagingManagerImpl( final PagingStoreFactory pagingSPI, final StorageManager storageManager, final HierarchicalRepository<AddressSettings> addressSettingsRepository) { pagingStoreFactory = pagingSPI; this.addressSettingsRepository = addressSettingsRepository; this.storageManager = storageManager; } // Public // --------------------------------------------------------------------------------------------------------------------------- // PagingManager implementation // ----------------------------------------------------------------------------------------------------- public SimpleString[] getStoreNames() { Set<SimpleString> names = stores.keySet(); return names.toArray(new SimpleString[names.size()]); } public synchronized void reloadStores() throws Exception { List<PagingStore> reloadedStores = pagingStoreFactory.reloadStores(addressSettingsRepository); for (PagingStore store : reloadedStores) { store.start(); stores.put(store.getStoreName(), store); } } private synchronized PagingStore createPageStore(final SimpleString storeName) throws Exception { PagingStore store = stores.get(storeName); if (store == null) { store = newStore(storeName); store.start(); stores.put(storeName, store); } return store; } public void deletePageStore(final SimpleString storeName) throws Exception { PagingStore store = stores.remove(storeName); if (store != null) { store.stop(); } } /** stores is a ConcurrentHashMap, so we don't need to synchronize this method */ public PagingStore getPageStore(final SimpleString storeName) throws Exception { PagingStore store = stores.get(storeName); if (store == null) { store = createPageStore(storeName); } return store; } /** * this will be set by the postOffice itself. There is no way to set this on the constructor as * the PagingManager is constructed before the postOffice. (There is a one-to-one relationship * here) */ public void setPostOffice(final PostOffice postOffice) { pagingStoreFactory.setPostOffice(postOffice); } public void addTransaction(final PageTransactionInfo pageTransaction) { transactions.put(pageTransaction.getTransactionID(), pageTransaction); } public void removeTransaction(final long id) { transactions.remove(id); } public PageTransactionInfo getTransaction(final long id) { return transactions.get(id); } // HornetQComponent implementation // ------------------------------------------------------------------------------------------------ public boolean isStarted() { return started; } public synchronized void start() throws Exception { if (started) { return; } pagingStoreFactory.setPagingManager(this); pagingStoreFactory.setStorageManager(storageManager); reloadStores(); started = true; } public synchronized void stop() throws Exception { if (!started) { return; } started = false; for (PagingStore store : stores.values()) { store.stop(); } pagingStoreFactory.stop(); totalMemoryBytes.set(0); } public void resumeDepages() { if (!started) { // If stop the server while depaging, the server may call a rollback, // the rollback may addSizes back and that would fire a globalDepage. // Because of that we must ignore any startGlobalDepage calls, // and this check needs to be done outside of the lock return; } synchronized (this) { for (PagingStore store : stores.values()) { if (store.isPaging()) { store.startDepaging(); } } } } /* (non-Javadoc) * @see org.hornetq.core.paging.PagingManager#getGlobalSize() */ public long getTotalMemory() { return totalMemoryBytes.get(); } /* (non-Javadoc) * @see org.hornetq.core.paging.PagingManager#addGlobalSize(long) */ public long addSize(final long size) { return totalMemoryBytes.addAndGet(size); } // Package protected --------------------------------------------- // Protected ----------------------------------------------------- // Private ------------------------------------------------------- private PagingStore newStore(final SimpleString address) throws Exception { return pagingStoreFactory.newStore( address, addressSettingsRepository.getMatch(address.toString())); } // Inner classes ------------------------------------------------- }
/** * @author <a href="mailto:[email protected]">Tim Fox</a> * @version <tt>$Revision$</tt> */ public class SessionSendMessage extends MessagePacket { // Constants ----------------------------------------------------- private static final Logger log = Logger.getLogger(SessionSendMessage.class); // Attributes ---------------------------------------------------- private boolean requiresResponse; // Static -------------------------------------------------------- // Constructors -------------------------------------------------- public SessionSendMessage(final MessageInternal message, final boolean requiresResponse) { super(PacketImpl.SESS_SEND, message); this.requiresResponse = requiresResponse; } public SessionSendMessage() { super(PacketImpl.SESS_SEND, new ServerMessageImpl()); } // Public -------------------------------------------------------- public boolean isRequiresResponse() { return requiresResponse; } // Package protected --------------------------------------------- // Protected ----------------------------------------------------- @Override public HornetQBuffer encode(final RemotingConnection connection) { HornetQBuffer buffer = message.getEncodedBuffer(); // Sanity check if (buffer.writerIndex() != message.getEndOfMessagePosition()) { throw new IllegalStateException("Wrong encode position"); } buffer.writeBoolean(requiresResponse); size = buffer.writerIndex(); // Write standard headers int len = size - DataConstants.SIZE_INT; buffer.setInt(0, len); buffer.setByte(DataConstants.SIZE_INT, getType()); buffer.setLong(DataConstants.SIZE_INT + DataConstants.SIZE_BYTE, channelID); // Position reader for reading by Netty buffer.readerIndex(0); message.resetCopied(); return buffer; } @Override public void decodeRest(final HornetQBuffer buffer) { // Buffer comes in after having read standard headers and positioned at Beginning of body part message.decodeFromBuffer(buffer); int ri = buffer.readerIndex(); requiresResponse = buffer.readBoolean(); buffer.readerIndex(ri); } // Private ------------------------------------------------------- // Inner classes ------------------------------------------------- }
/** * A FloodServerTest * * @author <a href="mailto:[email protected]">Tim Fox</a> */ public class FloodServerTest extends UnitTestCase { // Constants ----------------------------------------------------- private static final Logger log = Logger.getLogger(FloodServerTest.class); private HornetQServer server; private JMSServerManagerImpl serverManager; private InVMContext initialContext; private final String topicName = "my-topic"; // Static -------------------------------------------------------- // Attributes ---------------------------------------------------- // Constructors -------------------------------------------------- // TestCase overrides ------------------------------------------- // Public -------------------------------------------------------- // Package protected --------------------------------------------- // Protected ----------------------------------------------------- @Override protected void setUp() throws Exception { super.setUp(); Configuration conf = new ConfigurationImpl(); conf.setSecurityEnabled(false); conf.setJMXManagementEnabled(true); conf.getAcceptorConfigurations() .add(new TransportConfiguration(NettyAcceptorFactory.class.getName())); server = HornetQServers.newHornetQServer(conf, false); server.start(); serverManager = new JMSServerManagerImpl(server); initialContext = new InVMContext(); serverManager.setContext(initialContext); serverManager.start(); serverManager.activated(); serverManager.createTopic(topicName, topicName); registerConnectionFactory(); } @Override protected void tearDown() throws Exception { serverManager.stop(); server.stop(); server = null; serverManager = null; super.tearDown(); } // Private ------------------------------------------------------- // Inner classes ------------------------------------------------- private void registerConnectionFactory() throws Exception { int retryInterval = 1000; double retryIntervalMultiplier = 1.0; int reconnectAttempts = -1; boolean failoverOnServerShutdown = true; long callTimeout = 30000; List<Pair<TransportConfiguration, TransportConfiguration>> connectorConfigs = new ArrayList<Pair<TransportConfiguration, TransportConfiguration>>(); connectorConfigs.add( new Pair<TransportConfiguration, TransportConfiguration>( new TransportConfiguration(NettyConnectorFactory.class.getName()), null)); List<String> jndiBindings = new ArrayList<String>(); jndiBindings.add("/cf"); serverManager.createConnectionFactory( "ManualReconnectionToSingleServerTest", connectorConfigs, null, 1000, HornetQClient.DEFAULT_CONNECTION_TTL, callTimeout, HornetQClient.DEFAULT_CACHE_LARGE_MESSAGE_CLIENT, HornetQClient.DEFAULT_MIN_LARGE_MESSAGE_SIZE, HornetQClient.DEFAULT_CONSUMER_WINDOW_SIZE, HornetQClient.DEFAULT_CONSUMER_MAX_RATE, HornetQClient.DEFAULT_CONFIRMATION_WINDOW_SIZE, HornetQClient.DEFAULT_PRODUCER_WINDOW_SIZE, HornetQClient.DEFAULT_PRODUCER_MAX_RATE, false, false, false, HornetQClient.DEFAULT_AUTO_GROUP, false, HornetQClient.DEFAULT_CONNECTION_LOAD_BALANCING_POLICY_CLASS_NAME, HornetQClient.DEFAULT_ACK_BATCH_SIZE, HornetQClient.DEFAULT_ACK_BATCH_SIZE, HornetQClient.DEFAULT_USE_GLOBAL_POOLS, HornetQClient.DEFAULT_SCHEDULED_THREAD_POOL_MAX_SIZE, HornetQClient.DEFAULT_THREAD_POOL_MAX_SIZE, retryInterval, retryIntervalMultiplier, 1000, reconnectAttempts, failoverOnServerShutdown, null, jndiBindings); } public void testFoo() {} public void _testFlood() throws Exception { ConnectionFactory cf = (ConnectionFactory) initialContext.lookup("/cf"); final int numProducers = 20; final int numConsumers = 20; final int numMessages = 10000; ProducerThread[] producers = new ProducerThread[numProducers]; for (int i = 0; i < numProducers; i++) { producers[i] = new ProducerThread(cf, numMessages); } ConsumerThread[] consumers = new ConsumerThread[numConsumers]; for (int i = 0; i < numConsumers; i++) { consumers[i] = new ConsumerThread(cf, numMessages); } for (int i = 0; i < numConsumers; i++) { consumers[i].start(); } for (int i = 0; i < numProducers; i++) { producers[i].start(); } for (int i = 0; i < numConsumers; i++) { consumers[i].join(); } for (int i = 0; i < numProducers; i++) { producers[i].join(); } } class ProducerThread extends Thread { private final Connection connection; private final Session session; private final MessageProducer producer; private final int numMessages; ProducerThread(final ConnectionFactory cf, final int numMessages) throws Exception { connection = cf.createConnection(); session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); producer = session.createProducer(HornetQJMSClient.createTopic("my-topic")); producer.setDeliveryMode(DeliveryMode.NON_PERSISTENT); this.numMessages = numMessages; } @Override public void run() { try { byte[] bytes = new byte[1000]; BytesMessage message = session.createBytesMessage(); message.writeBytes(bytes); for (int i = 0; i < numMessages; i++) { producer.send(message); // if (i % 1000 == 0) // { // log.info("Producer " + this + " sent " + i); // } } connection.close(); } catch (Exception e) { e.printStackTrace(); } } } class ConsumerThread extends Thread { private final Connection connection; private final Session session; private final MessageConsumer consumer; private final int numMessages; ConsumerThread(final ConnectionFactory cf, final int numMessages) throws Exception { connection = cf.createConnection(); connection.start(); session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); consumer = session.createConsumer(HornetQJMSClient.createTopic("my-topic")); this.numMessages = numMessages; } @Override public void run() { try { for (int i = 0; i < numMessages; i++) { Message msg = consumer.receive(); if (msg == null) { FloodServerTest.log.error("message is null"); break; } // if (i % 1000 == 0) // { // log.info("Consumer " + this + " received " + i); // } } connection.close(); } catch (Exception e) { e.printStackTrace(); } } } }