/** * This would force a journal duplication on bindings even with the scenario that generated fixed, * the server shouldn't hold of from starting * * @throws Exception */ @Test public void testForceDuplicationOnBindings() throws Exception { queue = server.createQueue(QUEUE, QUEUE, null, true, false); ClientSessionFactory factory = locator.createSessionFactory(); ClientSession session = factory.createSession(false, false, false); ClientProducer producer = session.createProducer(QUEUE); producer.send(session.createMessage(true)); session.commit(); long queueID = server.getStorageManager().generateUniqueID(); long txID = server.getStorageManager().generateUniqueID(); // Forcing a situation where the server would unexpectedly create a duplicated queue. The server // should still start normally LocalQueueBinding newBinding = new LocalQueueBinding( QUEUE, new QueueImpl(queueID, QUEUE, QUEUE, null, true, false, null, null, null, null, null), server.getNodeID()); server.getStorageManager().addQueueBinding(txID, newBinding); server.getStorageManager().commitBindings(txID); server.stop(); // a duplicate binding would impede the server from starting server.start(); waitForServer(server); server.stop(); }
@Test public void testGetNumberOfBytesPerPage() throws Exception { SimpleString address = RandomUtil.randomSimpleString(); session.createQueue(address, address, true); AddressControl addressControl = createManagementControl(address); Assert.assertEquals( HornetQDefaultConfiguration.getDefaultJournalFileSize(), addressControl.getNumberOfBytesPerPage()); session.close(); server.stop(); AddressSettings addressSettings = new AddressSettings(); addressSettings.setPageSizeBytes(1024); server.getAddressSettingsRepository().addMatch(address.toString(), addressSettings); server.start(); ServerLocator locator2 = HornetQClient.createServerLocatorWithoutHA( new TransportConfiguration(UnitTestCase.INVM_CONNECTOR_FACTORY)); addServerLocator(locator2); ClientSessionFactory sf2 = createSessionFactory(locator2); session = sf2.createSession(false, true, false); session.createQueue(address, address, true); Assert.assertEquals(1024, addressControl.getNumberOfBytesPerPage()); }
@Override @After public void tearDown() throws Exception { if (clientSession != null) { try { clientSession.close(); } catch (HornetQException e1) { // } } if (clientSessionXa != null) { try { clientSessionXa.close(); } catch (HornetQException e1) { // } } if (locator != null) { locator.close(); } if (server != null && server.isStarted()) { try { server.stop(); } catch (Exception e1) { // } } server = null; clientSession = null; super.tearDown(); }
public void test2HornetQServersManagedFrom1MBeanServer() throws Exception { Configuration config_0 = createDefaultConfig(); config_0.setJMXManagementEnabled(true); String jmxDomain_1 = HornetQDefaultConfiguration.DEFAULT_JMX_DOMAIN + ".1"; Configuration config_1 = createBasicConfig(); Map<String, Object> params = new HashMap<String, Object>(); params.put(TransportConstants.SERVER_ID_PROP_NAME, 1); config_1 .getAcceptorConfigurations() .add(new TransportConfiguration(InVMAcceptorFactory.class.getName(), params)); config_1.setJMXDomain(jmxDomain_1); config_1.setJMXManagementEnabled(true); HornetQServer server_0 = HornetQServers.newHornetQServer(config_0, mbeanServer, false); HornetQServer server_1 = HornetQServers.newHornetQServer(config_1, mbeanServer, false); ObjectNameBuilder builder_0 = ObjectNameBuilder.DEFAULT; ObjectNameBuilder builder_1 = ObjectNameBuilder.create(jmxDomain_1); checkNoResource(builder_0.getHornetQServerObjectName()); checkNoResource(builder_1.getHornetQServerObjectName()); server_0.start(); checkResource(builder_0.getHornetQServerObjectName()); checkNoResource(builder_1.getHornetQServerObjectName()); server_1.start(); checkResource(builder_0.getHornetQServerObjectName()); checkResource(builder_1.getHornetQServerObjectName()); server_0.stop(); checkNoResource(builder_0.getHornetQServerObjectName()); checkResource(builder_1.getHornetQServerObjectName()); server_1.stop(); checkNoResource(builder_0.getHornetQServerObjectName()); checkNoResource(builder_1.getHornetQServerObjectName()); }
/** @throws Exception */ protected void stopServer() throws Exception { serverManager.stop(); server.stop(); serverManager = null; server = null; }
@Override public void tearDown() throws Exception { try { sf.close(); locator.close(); server.stop(); } finally { super.tearDown(); } }
@Override protected void tearDown() throws Exception { try { jmsServer2.stop(); server2.stop(); context2.close(); } catch (Throwable e) { log.warn("Can't stop server2", e); } ((HornetQConnectionFactory) cf1).close(); ((HornetQConnectionFactory) cf2).close(); server2 = null; jmsServer2 = null; context2 = null; cf1 = null; try { jmsServer1.stop(); server1.stop(); context1.close(); } catch (Throwable e) { log.warn("Can't stop server2", e); } server1 = null; jmsServer1 = null; context1 = null; super.tearDown(); }
@Override protected void tearDown() throws Exception { if (server != null) { server.stop(); } server = null; connectorConfig = null; super.tearDown(); }
@Override protected void tearDown() throws Exception { serverManager.stop(); server.stop(); server = null; serverManager = null; super.tearDown(); }
@Override public void tearDown() throws Exception { for (Tester tst : producers) { tst.close(); } for (Tester tst : consumers) { tst.close(); } sharedSf.close(); sharedLocator.close(); messagingService.stop(); super.tearDown(); }
public void testConnection( final String acceptorHost, final String connectorHost, final boolean mustConnect) throws Exception { System.out.println( "acceptor=" + acceptorHost + ", connector=" + connectorHost + ", mustConnect=" + mustConnect); Map<String, Object> params = new HashMap<String, Object>(); params.put(getHostPropertyKey(), acceptorHost); TransportConfiguration acceptorConfig = new TransportConfiguration(getAcceptorFactoryClassName(), params); Set<TransportConfiguration> transportConfigs = new HashSet<TransportConfiguration>(); transportConfigs.add(acceptorConfig); Configuration config = createDefaultConfig(true); config.setAcceptorConfigurations(transportConfigs); HornetQServer messagingService = createServer(false, config); messagingService.start(); params = new HashMap<String, Object>(); params.put(getHostPropertyKey(), connectorHost); TransportConfiguration connectorConfig = new TransportConfiguration(getConnectorFactoryClassName(), params); try { ClientSessionFactory sf = HornetQClient.createClientSessionFactory(connectorConfig); if (mustConnect) { ClientSession session = sf.createSession(false, true, true); session.close(); System.out.println("connection OK"); } else { try { sf.createSession(false, true, true); Assert.fail( "session creation must fail because connector must not be able to connect to the server bound to another network interface"); } catch (Exception e) { } } } finally { if (messagingService != null) { messagingService.stop(); } } }
protected void stopServer() throws Exception { locator.close(); try { factory.close(); } catch (Throwable ignored) { } factory = null; try { server.stop(); } catch (Throwable ignored) { } server = null; }
public void testCreateConsumerWithFilter() throws Exception { HornetQServer service = createServer(false); try { service.start(); ClientSessionFactory cf = createInVMFactory(); cf.setProducerMaxRate(99); cf.setBlockOnNonDurableSend(true); cf.setBlockOnNonDurableSend(true); ClientSessionInternal clientSession = (ClientSessionInternal) cf.createSession(false, true, true); clientSession.createQueue(queueName, queueName, false); ClientConsumer consumer = clientSession.createConsumer(queueName, "foo=bar"); Assert.assertNotNull(consumer); clientSession.close(); } finally { service.stop(); } }
public void testCreateConsumerNoQ() throws Exception { HornetQServer service = createServer(false); try { service.start(); ClientSessionFactory cf = createInVMFactory(); cf.setProducerMaxRate(99); cf.setBlockOnNonDurableSend(true); cf.setBlockOnNonDurableSend(true); ClientSessionInternal clientSession = (ClientSessionInternal) cf.createSession(false, true, true); try { clientSession.createConsumer(queueName); Assert.fail("should throw exception"); } catch (HornetQException e) { Assert.assertEquals(e.getCode(), HornetQException.QUEUE_DOES_NOT_EXIST); } clientSession.close(); } finally { service.stop(); } }
public void testCreateConsumerWithInvalidFilter() throws Exception { HornetQServer service = createServer(false); try { service.start(); ClientSessionFactory cf = createInVMFactory(); cf.setProducerMaxRate(99); cf.setBlockOnNonDurableSend(true); cf.setBlockOnNonDurableSend(true); ClientSessionInternal clientSession = (ClientSessionInternal) cf.createSession(false, true, true); clientSession.createQueue(queueName, queueName, false); try { clientSession.createConsumer(queueName, "foobar"); Assert.fail("should throw exception"); } catch (HornetQException e) { Assert.assertEquals(e.getCode(), HornetQException.INVALID_FILTER_EXPRESSION); } clientSession.close(); } finally { service.stop(); } }
/** * This will simulate what would happen with topic creationg where a single record is supposed to * be created on the journal * * @throws Exception */ @Test public void testDuplicateDestinationsOnTopic() throws Exception { for (int i = 0; i < 5; i++) { if (server.locateQueue(SimpleString.toSimpleString("jms.topic.tt")) == null) { server.createQueue( SimpleString.toSimpleString("jms.topic.tt"), SimpleString.toSimpleString("jms.topic.tt"), SimpleString.toSimpleString(HornetQServerImpl.GENERIC_IGNORED_FILTER), true, false); } server.stop(); SequentialFileFactory messagesFF = new NIOSequentialFileFactory(getBindingsDir(), null); JournalImpl messagesJournal = new JournalImpl(1024 * 1024, 2, 0, 0, messagesFF, "hornetq-bindings", "bindings", 1); messagesJournal.start(); LinkedList<RecordInfo> infos = new LinkedList<RecordInfo>(); messagesJournal.load(infos, null, null); int bindings = 0; for (RecordInfo info : infos) { if (info.getUserRecordType() == JournalRecordIds.QUEUE_BINDING_RECORD) { bindings++; } } assertEquals(1, bindings); System.out.println("Bindings: " + bindings); messagesJournal.stop(); if (i < 4) server.start(); } }
private void restartServer() throws Exception { server.stop(); server = null; server = createServer(true, configuration); server.getAddressSettingsRepository().addMatch(address.toString(), qs); // start the server server.start(); AddressSettings qs1 = new AddressSettings(); qs1.setLastValueQueue(true); server.getAddressSettingsRepository().addMatch(address.toString(), qs1); // then we create a client as normal locator.close(); locator = HornetQClient.createServerLocatorWithoutHA( new TransportConfiguration(UnitTestCase.INVM_CONNECTOR_FACTORY)); locator.setBlockOnAcknowledge(true); locator.setAckBatchSize(0); ClientSessionFactory sessionFactory = createSessionFactory(locator); clientSession = sessionFactory.createSession(false, true, true); clientSessionXa = sessionFactory.createSession(true, false, false); }
/** * This would recreate the scenario where a queue was duplicated * * @throws Exception */ @Test public void testHangDuplicateQueues() throws Exception { final Semaphore blocked = new Semaphore(1); final CountDownLatch latchDelete = new CountDownLatch(1); class MyQueueWithBlocking extends QueueImpl { /** * @param id * @param address * @param name * @param filter * @param pageSubscription * @param durable * @param temporary * @param scheduledExecutor * @param postOffice * @param storageManager * @param addressSettingsRepository * @param executor */ public MyQueueWithBlocking( final long id, final SimpleString address, final SimpleString name, final Filter filter, final PageSubscription pageSubscription, final boolean durable, final boolean temporary, final ScheduledExecutorService scheduledExecutor, final PostOffice postOffice, final StorageManager storageManager, final HierarchicalRepository<AddressSettings> addressSettingsRepository, final Executor executor) { super( id, address, name, filter, pageSubscription, durable, temporary, scheduledExecutor, postOffice, storageManager, addressSettingsRepository, executor); } @Override public synchronized int deleteMatchingReferences(final int flushLimit, final Filter filter) throws Exception { latchDelete.countDown(); blocked.acquire(); blocked.release(); return super.deleteMatchingReferences(flushLimit, filter); } } class LocalFactory extends QueueFactoryImpl { public LocalFactory( final ExecutorFactory executorFactory, final ScheduledExecutorService scheduledExecutor, final HierarchicalRepository<AddressSettings> addressSettingsRepository, final StorageManager storageManager) { super(executorFactory, scheduledExecutor, addressSettingsRepository, storageManager); } @Override public Queue createQueue( final long persistenceID, final SimpleString address, final SimpleString name, final Filter filter, final PageSubscription pageSubscription, final boolean durable, final boolean temporary) { queue = new MyQueueWithBlocking( persistenceID, address, name, filter, pageSubscription, durable, temporary, scheduledExecutor, postOffice, storageManager, addressSettingsRepository, executorFactory.getExecutor()); return queue; } } LocalFactory queueFactory = new LocalFactory( server.getExecutorFactory(), server.getScheduledPool(), server.getAddressSettingsRepository(), server.getStorageManager()); queueFactory.setPostOffice(server.getPostOffice()); ((HornetQServerImpl) server).replaceQueueFactory(queueFactory); queue = server.createQueue(QUEUE, QUEUE, null, true, false); blocked.acquire(); ClientSessionFactory factory = locator.createSessionFactory(); ClientSession session = factory.createSession(false, false, false); ClientProducer producer = session.createProducer(QUEUE); producer.send(session.createMessage(true)); session.commit(); Thread tDelete = new Thread() { @Override public void run() { try { server.destroyQueue(QUEUE); } catch (Exception e) { e.printStackTrace(); } } }; tDelete.start(); Assert.assertTrue(latchDelete.await(10, TimeUnit.SECONDS)); try { server.createQueue(QUEUE, QUEUE, null, true, false); } catch (Exception expected) { } blocked.release(); server.stop(); tDelete.join(); session.close(); // a duplicate binding would impede the server from starting server.start(); waitForServer(server); server.stop(); }
@After public void tearDown() throws Exception { server.stop(); locator.close(); super.tearDown(); }
@Test public void testSimpleDistributionBackupStrategyFull() throws Exception { HornetQServer server0 = createServer(0, 1, BackupStrategy.FULL); HornetQServer server1 = createServer(1, 0, BackupStrategy.FULL); TransportConfiguration liveConnector0 = getConnectorTransportConfiguration("liveConnector" + 0, 0); TransportConfiguration liveConnector1 = getConnectorTransportConfiguration("liveConnector" + 1, 1); try (ServerLocator serverLocator = HornetQClient.createServerLocatorWithoutHA(liveConnector0)) { server0.start(); server1.start(); ClientSessionFactory sessionFactory0 = serverLocator.createSessionFactory(liveConnector0); waitForRemoteBackup(sessionFactory0, 10); ClientSessionFactory sessionFactory1 = serverLocator.createSessionFactory(liveConnector1); waitForRemoteBackup(sessionFactory1, 10); Topology topology = serverLocator.getTopology(); Collection<TopologyMemberImpl> members = topology.getMembers(); assertEquals(members.size(), 2); Map<String, HornetQServer> backupServers0 = server0.getClusterManager().getHAManager().getBackupServers(); assertEquals(backupServers0.size(), 1); Map<String, HornetQServer> backupServers1 = server1.getClusterManager().getHAManager().getBackupServers(); assertEquals(backupServers1.size(), 1); HornetQServer backupServer0 = backupServers0.values().iterator().next(); HornetQServer backupServer1 = backupServers1.values().iterator().next(); waitForRemoteBackupSynchronization(backupServer0); waitForRemoteBackupSynchronization(backupServer1); assertEquals(server0.getNodeID(), backupServer1.getNodeID()); assertEquals(server1.getNodeID(), backupServer0.getNodeID()); Set<TransportConfiguration> backupAcceptors0 = backupServer0.getConfiguration().getAcceptorConfigurations(); assertEquals(1, backupAcceptors0.size()); assertEquals("5545", backupAcceptors0.iterator().next().getParams().get("port")); Set<TransportConfiguration> backupAcceptors1 = backupServer1.getConfiguration().getAcceptorConfigurations(); assertEquals(1, backupAcceptors1.size()); assertEquals("5546", backupAcceptors1.iterator().next().getParams().get("port")); Map<String, TransportConfiguration> connectorConfigurations0 = backupServer0.getConfiguration().getConnectorConfigurations(); assertEquals(2, connectorConfigurations0.size()); assertEquals("5545", connectorConfigurations0.get("liveConnector0").getParams().get("port")); assertEquals( "5446", connectorConfigurations0.get("remoteConnector0").getParams().get("port")); Map<String, TransportConfiguration> connectorConfigurations1 = backupServer1.getConfiguration().getConnectorConfigurations(); assertEquals(2, connectorConfigurations1.size()); assertEquals("5546", connectorConfigurations1.get("liveConnector1").getParams().get("port")); assertEquals( "5445", connectorConfigurations1.get("remoteConnector1").getParams().get("port")); if (policyType == HAPolicy.POLICY_TYPE.COLOCATED_SHARED_STORE) { assertEquals( server0.getConfiguration().getJournalDirectory(), backupServer1.getConfiguration().getJournalDirectory()); assertEquals( server0.getConfiguration().getBindingsDirectory(), backupServer1.getConfiguration().getBindingsDirectory()); assertEquals( server0.getConfiguration().getLargeMessagesDirectory(), backupServer1.getConfiguration().getLargeMessagesDirectory()); assertEquals( server0.getConfiguration().getPagingDirectory(), backupServer1.getConfiguration().getPagingDirectory()); assertEquals( server1.getConfiguration().getJournalDirectory(), backupServer0.getConfiguration().getJournalDirectory()); assertEquals( server1.getConfiguration().getBindingsDirectory(), backupServer0.getConfiguration().getBindingsDirectory()); assertEquals( server1.getConfiguration().getLargeMessagesDirectory(), backupServer0.getConfiguration().getLargeMessagesDirectory()); assertEquals( server1.getConfiguration().getPagingDirectory(), backupServer0.getConfiguration().getPagingDirectory()); } else { assertNotEquals( server0.getConfiguration().getJournalDirectory(), backupServer1.getConfiguration().getJournalDirectory()); assertNotEquals( server0.getConfiguration().getBindingsDirectory(), backupServer1.getConfiguration().getBindingsDirectory()); assertNotEquals( server0.getConfiguration().getLargeMessagesDirectory(), backupServer1.getConfiguration().getLargeMessagesDirectory()); assertNotEquals( server0.getConfiguration().getPagingDirectory(), backupServer1.getConfiguration().getPagingDirectory()); assertNotEquals( server1.getConfiguration().getJournalDirectory(), backupServer0.getConfiguration().getJournalDirectory()); assertNotEquals( server1.getConfiguration().getBindingsDirectory(), backupServer0.getConfiguration().getBindingsDirectory()); assertNotEquals( server1.getConfiguration().getLargeMessagesDirectory(), backupServer0.getConfiguration().getLargeMessagesDirectory()); assertNotEquals( server1.getConfiguration().getPagingDirectory(), backupServer0.getConfiguration().getPagingDirectory()); } } finally { server0.stop(); server1.stop(); } }
public void doTestStressSend(final boolean netty) throws Exception { HornetQServer server = createServer(false, netty); server.start(); ServerLocator locator = createNonHALocator(netty); ClientSessionFactory sf = createSessionFactory(locator); ClientSession session = null; final int batchSize = 2000; final int numberOfMessages = 100000; try { server.start(); session = sf.createSession(false, false); session.createQueue("address", "queue"); ClientProducer producer = session.createProducer("address"); ClientMessage message = session.createMessage(false); message.getBodyBuffer().writeBytes(new byte[1024]); for (int i = 0; i < numberOfMessages; i++) { producer.send(message); if (i % batchSize == 0) { System.out.println("Sent " + i); session.commit(); } } session.commit(); session.close(); session = sf.createSession(false, false); ClientConsumer consumer = session.createConsumer("queue"); session.start(); for (int i = 0; i < numberOfMessages; i++) { ClientMessage msg = consumer.receive(5000); Assert.assertNotNull(msg); msg.acknowledge(); if (i % batchSize == 0) { System.out.println("Consumed " + i); session.commit(); } } session.commit(); } finally { if (session != null) { try { sf.close(); session.close(); } catch (Exception e) { e.printStackTrace(); } } locator.close(); server.stop(); } }
public void doTestStressSend(final boolean netty) throws Exception { // first set up the server Map<String, Object> params = new HashMap<String, Object>(); params.put(TransportConstants.PORT_PROP_NAME, 5445); params.put(TransportConstants.HOST_PROP_NAME, "localhost"); params.put(TransportConstants.USE_NIO_PROP_NAME, true); // minimize threads to maximize possibility for deadlock params.put(TransportConstants.NIO_REMOTING_THREADS_PROPNAME, 1); params.put(TransportConstants.BATCH_DELAY, 50); Configuration config = createDefaultConfig(params, ServiceTestBase.NETTY_ACCEPTOR_FACTORY); HornetQServer server = createServer(true, config); server.getConfiguration().setThreadPoolMaxSize(2); server.start(); // now the client side Map<String, Object> connectionParams = new HashMap<String, Object>(); connectionParams.put(TransportConstants.PORT_PROP_NAME, 5445); connectionParams.put(TransportConstants.HOST_PROP_NAME, "localhost"); connectionParams.put(TransportConstants.USE_NIO_PROP_NAME, true); connectionParams.put(TransportConstants.BATCH_DELAY, 50); connectionParams.put(TransportConstants.NIO_REMOTING_THREADS_PROPNAME, 6); final TransportConfiguration transpConf = new TransportConfiguration(NettyConnectorFactory.class.getName(), connectionParams); final ServerLocator locator = createNonHALocator(netty); // each thread will do this number of transactions final int numberOfMessages = 100; // these must all be the same final int numProducers = 30; final int numConsumerProducers = 30; final int numConsumers = 30; // each produce, consume+produce and consume increments this counter final AtomicInteger totalCount = new AtomicInteger(0); // the total we expect if all producers, consumer-producers and // consumers complete normally int totalExpectedCount = (numProducers + numConsumerProducers + numConsumerProducers) * numberOfMessages; // each group gets a separate connection final Connection connectionProducer; final Connection connectionConsumerProducer; final Connection connectionConsumer; // create the 2 queues used in the test ClientSessionFactory sf = locator.createSessionFactory(transpConf); ClientSession session = sf.createTransactedSession(); session.createQueue("jms.queue.queue", "jms.queue.queue"); session.createQueue("jms.queue.queue2", "jms.queue.queue2"); session.commit(); sf.close(); session.close(); locator.close(); // create and start JMS connections HornetQConnectionFactory cf = HornetQJMSClient.createConnectionFactoryWithoutHA(JMSFactoryType.CF, transpConf); connectionProducer = cf.createConnection(); connectionProducer.start(); connectionConsumerProducer = cf.createConnection(); connectionConsumerProducer.start(); connectionConsumer = cf.createConnection(); connectionConsumer.start(); // these threads produce messages on the the first queue for (int i = 0; i < numProducers; i++) { new Thread() { @Override public void run() { Session session = null; try { session = connectionProducer.createSession(true, Session.SESSION_TRANSACTED); MessageProducer messageProducer = session.createProducer(HornetQDestination.createQueue("queue")); messageProducer.setDeliveryMode(DeliveryMode.PERSISTENT); for (int i = 0; i < numberOfMessages; i++) { BytesMessage message = session.createBytesMessage(); message.writeBytes(new byte[3000]); message.setStringProperty("Service", "LoadShedService"); message.setStringProperty("Action", "testAction"); messageProducer.send(message); session.commit(); totalCount.incrementAndGet(); } } catch (Exception e) { throw new RuntimeException(e); } finally { if (session != null) { try { session.close(); } catch (Exception e) { e.printStackTrace(); } } } } }.start(); } // these threads just consume from the one and produce on a second queue for (int i = 0; i < numConsumerProducers; i++) { new Thread() { @Override public void run() { Session session = null; try { session = connectionConsumerProducer.createSession(true, Session.SESSION_TRANSACTED); MessageConsumer consumer = session.createConsumer(HornetQDestination.createQueue("queue")); MessageProducer messageProducer = session.createProducer(HornetQDestination.createQueue("queue2")); messageProducer.setDeliveryMode(DeliveryMode.PERSISTENT); for (int i = 0; i < numberOfMessages; i++) { BytesMessage message = (BytesMessage) consumer.receive(5000); if (message == null) { return; } message = session.createBytesMessage(); message.writeBytes(new byte[3000]); message.setStringProperty("Service", "LoadShedService"); message.setStringProperty("Action", "testAction"); messageProducer.send(message); session.commit(); totalCount.incrementAndGet(); } } catch (Exception e) { throw new RuntimeException(e); } finally { if (session != null) { try { session.close(); } catch (Exception e) { e.printStackTrace(); } } } } }.start(); } // these threads consume from the second queue for (int i = 0; i < numConsumers; i++) { new Thread() { @Override public void run() { Session session = null; try { session = connectionConsumer.createSession(true, Session.SESSION_TRANSACTED); MessageConsumer consumer = session.createConsumer(HornetQDestination.createQueue("queue2")); for (int i = 0; i < numberOfMessages; i++) { BytesMessage message = (BytesMessage) consumer.receive(5000); if (message == null) { return; } session.commit(); totalCount.incrementAndGet(); } } catch (Exception e) { throw new RuntimeException(e); } finally { if (session != null) { try { session.close(); } catch (Exception e) { e.printStackTrace(); } } } } }.start(); } // check that the overall transaction count reaches the expected number, // which would indicate that the system didn't stall int timeoutCounter = 0; int maxSecondsToWait = 60; while (timeoutCounter < maxSecondsToWait && totalCount.get() < totalExpectedCount) { timeoutCounter++; Thread.sleep(1000); System.out.println( "Not done yet.. " + (maxSecondsToWait - timeoutCounter) + "; " + totalCount.get()); } System.out.println("Done.." + totalCount.get() + ", expected " + totalExpectedCount); Assert.assertEquals("Possible deadlock", totalExpectedCount, totalCount.get()); System.out.println("After assert"); // attempt cleaning up (this is not in a finally, still needs some work) connectionProducer.close(); connectionConsumerProducer.close(); connectionConsumer.close(); server.stop(); }
private void internalTestCleanup(final JournalType journalType) throws Throwable { setupServer(journalType); ClientSession session = sf.createSession(false, true, true); ClientProducer prod = session.createProducer(CompactingStressTest.AD1); for (int i = 0; i < 500; i++) { prod.send(session.createMessage(true)); } session.commit(); prod.close(); ClientConsumer cons = session.createConsumer(CompactingStressTest.Q2); prod = session.createProducer(CompactingStressTest.AD2); session.start(); for (int i = 0; i < 200; i++) { System.out.println("Iteration " + i); // Sending non transactionally, so it would test non transactional stuff on the journal for (int j = 0; j < 1000; j++) { Message msg = session.createMessage(true); msg.getBodyBuffer().writeBytes(new byte[1024]); prod.send(msg); } // I need to guarantee a roundtrip to the server, to make sure everything is persisted session.commit(); for (int j = 0; j < 1000; j++) { ClientMessage msg = cons.receive(2000); Assert.assertNotNull(msg); msg.acknowledge(); } // I need to guarantee a roundtrip to the server, to make sure everything is persisted session.commit(); } Assert.assertNull(cons.receiveImmediate()); session.close(); server.stop(); setupServer(journalType); server.start(); session = sf.createSession(false, true, true); cons = session.createConsumer(CompactingStressTest.Q1); session.start(); for (int i = 0; i < 500; i++) { ClientMessage msg = cons.receive(1000); Assert.assertNotNull(msg); msg.acknowledge(); } Assert.assertNull(cons.receiveImmediate()); prod = session.createProducer(CompactingStressTest.AD2); session.close(); }
public void internalTestMultiProducer(final JournalType journalType) throws Throwable { setupServer(journalType); ClientSession session = sf.createSession(false, false); try { ClientProducer producer = session.createProducer(CompactingStressTest.AD3); ClientMessage msg = session.createMessage(true); for (int i = 0; i < CompactingStressTest.TOT_AD3; i++) { producer.send(msg); if (i % 100 == 0) { session.commit(); } } session.commit(); } finally { session.close(); } server.stop(); setupServer(journalType); final AtomicInteger numberOfMessages = new AtomicInteger(0); final int NUMBER_OF_FAST_MESSAGES = 100000; final int SLOW_INTERVAL = 100; final CountDownLatch latchReady = new CountDownLatch(2); final CountDownLatch latchStart = new CountDownLatch(1); class FastProducer extends Thread { Throwable e; FastProducer() { super("Fast-Thread"); } @Override public void run() { ClientSession session = null; ClientSession sessionSlow = null; latchReady.countDown(); try { UnitTestCase.waitForLatch(latchStart); session = sf.createSession(true, true); sessionSlow = sf.createSession(false, false); ClientProducer prod = session.createProducer(CompactingStressTest.AD2); ClientProducer slowProd = sessionSlow.createProducer(CompactingStressTest.AD1); for (int i = 0; i < NUMBER_OF_FAST_MESSAGES; i++) { if (i % SLOW_INTERVAL == 0) { if (numberOfMessages.incrementAndGet() % 5 == 0) { sessionSlow.commit(); } slowProd.send(session.createMessage(true)); } ClientMessage msg = session.createMessage(true); prod.send(msg); } sessionSlow.commit(); } catch (Throwable e) { this.e = e; } finally { try { session.close(); } catch (Throwable e) { this.e = e; } try { sessionSlow.close(); } catch (Throwable e) { this.e = e; } } } } class FastConsumer extends Thread { Throwable e; FastConsumer() { super("Fast-Consumer"); } @Override public void run() { ClientSession session = null; latchReady.countDown(); try { UnitTestCase.waitForLatch(latchStart); session = sf.createSession(true, true); session.start(); ClientConsumer cons = session.createConsumer(CompactingStressTest.Q2); for (int i = 0; i < NUMBER_OF_FAST_MESSAGES; i++) { ClientMessage msg = cons.receive(60 * 1000); msg.acknowledge(); } Assert.assertNull(cons.receiveImmediate()); } catch (Throwable e) { this.e = e; } finally { try { session.close(); } catch (Throwable e) { this.e = e; } } } } FastConsumer f1 = new FastConsumer(); f1.start(); FastProducer p1 = new FastProducer(); p1.start(); UnitTestCase.waitForLatch(latchReady); latchStart.countDown(); p1.join(); if (p1.e != null) { throw p1.e; } f1.join(); if (f1.e != null) { throw f1.e; } sf.close(); server.stop(); setupServer(journalType); ClientSession sess = null; try { sess = sf.createSession(true, true); ClientConsumer cons = sess.createConsumer(CompactingStressTest.Q1); sess.start(); for (int i = 0; i < numberOfMessages.intValue(); i++) { ClientMessage msg = cons.receive(60000); Assert.assertNotNull(msg); msg.acknowledge(); } Assert.assertNull(cons.receiveImmediate()); cons.close(); cons = sess.createConsumer(CompactingStressTest.Q2); Assert.assertNull(cons.receiveImmediate()); cons.close(); cons = sess.createConsumer(CompactingStressTest.Q3); for (int i = 0; i < CompactingStressTest.TOT_AD3; i++) { ClientMessage msg = cons.receive(60000); Assert.assertNotNull(msg); msg.acknowledge(); } Assert.assertNull(cons.receiveImmediate()); } finally { try { sess.close(); } catch (Throwable ignored) { } } }
@Test public void testGetNumberOfPages() throws Exception { session.close(); server.stop(); server.getConfiguration().setPersistenceEnabled(true); SimpleString address = RandomUtil.randomSimpleString(); AddressSettings addressSettings = new AddressSettings(); addressSettings.setPageSizeBytes(1024); addressSettings.setMaxSizeBytes(10 * 1024); final int NUMBER_MESSAGES_BEFORE_PAGING = 5; server.getAddressSettingsRepository().addMatch(address.toString(), addressSettings); server.start(); ServerLocator locator2 = HornetQClient.createServerLocatorWithoutHA( new TransportConfiguration(UnitTestCase.INVM_CONNECTOR_FACTORY)); addServerLocator(locator2); ClientSessionFactory sf2 = createSessionFactory(locator2); session = sf2.createSession(false, true, false); session.start(); session.createQueue(address, address, true); QueueImpl serverQueue = (QueueImpl) server.locateQueue(address); ClientProducer producer = session.createProducer(address); for (int i = 0; i < NUMBER_MESSAGES_BEFORE_PAGING; i++) { ClientMessage msg = session.createMessage(true); msg.getBodyBuffer().writeBytes(new byte[512]); producer.send(msg); } session.commit(); AddressControl addressControl = createManagementControl(address); Assert.assertEquals(0, addressControl.getNumberOfPages()); ClientMessage msg = session.createMessage(true); msg.getBodyBuffer().writeBytes(new byte[512]); producer.send(msg); session.commit(); Assert.assertEquals(1, addressControl.getNumberOfPages()); msg = session.createMessage(true); msg.getBodyBuffer().writeBytes(new byte[512]); producer.send(msg); session.commit(); Assert.assertEquals(1, addressControl.getNumberOfPages()); msg = session.createMessage(true); msg.getBodyBuffer().writeBytes(new byte[512]); producer.send(msg); session.commit(); Assert.assertEquals("# of pages is 2", 2, addressControl.getNumberOfPages()); System.out.println("Address size=" + addressControl.getAddressSize()); Assert.assertEquals( serverQueue.getPageSubscription().getPagingStore().getAddressSize(), addressControl.getAddressSize()); }
@Test public void testSimpleDistributionOfBackupsMaxBackupsExceeded() throws Exception { HornetQServer server0 = createServer(0, 1, BackupStrategy.FULL); HornetQServer server1 = createServer(1, 0, BackupStrategy.FULL); HornetQServer server2 = createServer(2, 0, BackupStrategy.FULL); HornetQServer server3 = createServer(3, 0, BackupStrategy.FULL); TransportConfiguration liveConnector0 = getConnectorTransportConfiguration("liveConnector" + 0, 0); TransportConfiguration liveConnector1 = getConnectorTransportConfiguration("liveConnector" + 1, 1); TransportConfiguration liveConnector2 = getConnectorTransportConfiguration("liveConnector" + 2, 2); TransportConfiguration liveConnector3 = getConnectorTransportConfiguration("liveConnector" + 3, 3); try (ServerLocator serverLocator = HornetQClient.createServerLocatorWithoutHA(liveConnector0)) { server0.start(); server1.start(); ClientSessionFactory sessionFactory0 = serverLocator.createSessionFactory(liveConnector0); waitForRemoteBackup(sessionFactory0, 10); ClientSessionFactory sessionFactory1 = serverLocator.createSessionFactory(liveConnector1); waitForRemoteBackup(sessionFactory1, 10); Topology topology = serverLocator.getTopology(); Collection<TopologyMemberImpl> members = topology.getMembers(); assertEquals(members.size(), 2); Map<String, HornetQServer> backupServers0 = server0.getClusterManager().getHAManager().getBackupServers(); assertEquals(backupServers0.size(), 1); Map<String, HornetQServer> backupServers1 = server1.getClusterManager().getHAManager().getBackupServers(); assertEquals(backupServers1.size(), 1); HornetQServer backupServer0 = backupServers0.values().iterator().next(); HornetQServer backupServer1 = backupServers1.values().iterator().next(); waitForRemoteBackupSynchronization(backupServer0); waitForRemoteBackupSynchronization(backupServer1); assertEquals(server0.getNodeID(), backupServer1.getNodeID()); assertEquals(server1.getNodeID(), backupServer0.getNodeID()); server2.start(); // just give server2 time to try both server 0 and 1 ClientSessionFactory sessionFactory2 = serverLocator.createSessionFactory(liveConnector2); server3.start(); ClientSessionFactory sessionFactory3 = serverLocator.createSessionFactory(liveConnector3); waitForRemoteBackup(sessionFactory2, 10); waitForRemoteBackup(sessionFactory3, 10); assertEquals(members.size(), 2); Map<String, HornetQServer> backupServers2 = server2.getClusterManager().getHAManager().getBackupServers(); assertEquals(backupServers2.size(), 1); Map<String, HornetQServer> backupServers3 = server3.getClusterManager().getHAManager().getBackupServers(); assertEquals(backupServers3.size(), 1); HornetQServer backupServer2 = backupServers2.values().iterator().next(); HornetQServer backupServer3 = backupServers3.values().iterator().next(); waitForRemoteBackupSynchronization(backupServer2); waitForRemoteBackupSynchronization(backupServer3); assertEquals(server0.getNodeID(), backupServer1.getNodeID()); assertEquals(server1.getNodeID(), backupServer0.getNodeID()); assertEquals(server2.getNodeID(), backupServer3.getNodeID()); assertEquals(server3.getNodeID(), backupServer2.getNodeID()); } finally { server0.stop(); server1.stop(); server2.stop(); server3.stop(); } }
@Test public void testLargeMessageCompressionRestartAndCheckSize() throws Exception { final int messageSize = 1024 * 1024; HornetQServer server = createServer(true, isNetty()); server.start(); ClientSessionFactory sf = createSessionFactory(locator); ClientSession session = addClientSession(sf.createSession(false, false, false)); session.createQueue(ADDRESS, ADDRESS, true); ClientProducer producer = session.createProducer(ADDRESS); byte[] msgs = new byte[1024 * 1024]; for (int i = 0; i < msgs.length; i++) { msgs[i] = RandomUtil.randomByte(); } Message clientFile = createLargeClientMessage(session, msgs, true); producer.send(clientFile); session.commit(); session.close(); sf.close(); locator.close(); server.stop(); server = createServer(true, isNetty()); server.start(); locator = createFactory(isNetty()); sf = createSessionFactory(locator); session = sf.createSession(); session.start(); ClientConsumer consumer = session.createConsumer(ADDRESS); ClientMessage msg1 = consumer.receive(1000); Assert.assertNotNull(msg1); assertEquals(messageSize, msg1.getBodySize()); String testDir = getTestDir(); File testFile = new File(testDir, "async_large_message"); FileOutputStream output = new FileOutputStream(testFile); msg1.saveToOutputStream(output); msg1.acknowledge(); session.commit(); consumer.close(); session.close(); // verify FileInputStream input = new FileInputStream(testFile); for (int i = 0; i < messageSize; i++) { byte b = (byte) input.read(); assertEquals("position = " + i, msgs[i], b); } input.close(); testFile.delete(); validateNoFilesOnLargeDir(); }
@Test public void testHighVolume() throws Exception { final String FILE_NAME = getTestDir() + "/export.out"; final String QUEUE_NAME = "A1"; HornetQServer server = createServer(true); server.start(); ServerLocator locator = createInVMNonHALocator(); ClientSessionFactory factory = locator.createSessionFactory(); ClientSession session = factory.createSession(false, false, false); session.createQueue(QUEUE_NAME, QUEUE_NAME, true); ClientProducer producer = session.createProducer(QUEUE_NAME); ClientMessage msg = session.createMessage(true); final int SIZE = 10240; final int COUNT = 20000; byte[] bodyTst = new byte[SIZE]; for (int i = 0; i < SIZE; i++) { bodyTst[i] = (byte) (i + 1); } msg.getBodyBuffer().writeBytes(bodyTst); assertEquals(bodyTst.length, msg.getBodySize()); for (int i = 0; i < COUNT; i++) { producer.send(msg); if (i % 500 == 0) session.commit(); System.out.println("Sent " + i); } session.commit(); session.close(); locator.close(); server.stop(); System.out.println("Writing XML..."); FileOutputStream xmlOutputStream = new FileOutputStream(FILE_NAME); BufferedOutputStream bufferOut = new BufferedOutputStream(xmlOutputStream); XmlDataExporter xmlDataExporter = new XmlDataExporter( bufferOut, getBindingsDir(), getJournalDir(), getPageDir(), getLargeMessagesDir()); xmlDataExporter.writeXMLData(); bufferOut.close(); System.out.println("Done writing XML."); deleteDirectory(new File(getJournalDir())); deleteDirectory(new File(getBindingsDir())); deleteDirectory(new File(getPageDir())); deleteDirectory(new File(getLargeMessagesDir())); server.start(); locator = createInVMNonHALocator(); factory = locator.createSessionFactory(); session = factory.createSession(false, false, true); ClientSession managementSession = factory.createSession(false, true, true); System.out.println("Reading XML..."); FileInputStream xmlInputStream = new FileInputStream(FILE_NAME); XmlDataImporter xmlDataImporter = new XmlDataImporter(xmlInputStream, session, managementSession); xmlDataImporter.processXml(); xmlInputStream.close(); System.out.println("Done reading XML."); ClientConsumer consumer = session.createConsumer(QUEUE_NAME); session.start(); for (int i = 0; i < COUNT; i++) { msg = consumer.receive(CONSUMER_TIMEOUT); System.out.println("Received " + i); Assert.assertNotNull(msg); assertEquals(msg.getBodySize(), bodyTst.length); byte[] bodyRead = new byte[bodyTst.length]; msg.getBodyBuffer().readBytes(bodyRead); assertEqualsByteArrays(bodyTst, bodyRead); } session.close(); locator.close(); server.stop(); File temp = new File(FILE_NAME); temp.delete(); }
@Test public void testDelayedRedeliveryWithStart() throws Exception { ClientSessionFactory sessionFactory = createSessionFactory(locator); ClientSession session = sessionFactory.createSession(false, false, false); session.createQueue(qName, qName, null, true); session.close(); ClientSession session1 = sessionFactory.createSession(false, true, true); ClientProducer producer = session1.createProducer(qName); final int NUM_MESSAGES = 1; for (int i = 0; i < NUM_MESSAGES; i++) { ClientMessage tm = createDurableMessage(session1, "message" + i); producer.send(tm); } session1.close(); ClientSession session2 = sessionFactory.createSession(false, false, false); ClientConsumer consumer2 = session2.createConsumer(qName); session2.start(); for (int i = 0; i < NUM_MESSAGES; i++) { ClientMessage tm = consumer2.receive(500); Assert.assertNotNull(tm); Assert.assertEquals("message" + i, tm.getBodyBuffer().readString()); } // Now rollback long now = System.currentTimeMillis(); session2.rollback(); session2.close(); sessionFactory.close(); locator.close(); server.stop(); initServer(); sessionFactory = createSessionFactory(locator); session2 = sessionFactory.createSession(false, false, false); consumer2 = session2.createConsumer(qName); Thread.sleep(3000); session2.start(); // This should redeliver with a delayed redelivery for (int i = 0; i < NUM_MESSAGES; i++) { ClientMessage tm = consumer2.receive(DelayedMessageTest.DELAY + 1000); Assert.assertNotNull(tm); long time = System.currentTimeMillis(); Assert.assertTrue(time - now >= DelayedMessageTest.DELAY); // Hudson can introduce a large degree of indeterminism } session2.commit(); session2.close(); }
@AfterClass public static void cleanup() throws Exception { server.stop(); server = null; }