public void createQueue( final SimpleString address, final SimpleString name, final SimpleString filterString, final boolean temporary, final boolean durable) throws Exception { if (durable) { // make sure the user has privileges to create this queue securityStore.check(address, CheckType.CREATE_DURABLE_QUEUE, this); } else { securityStore.check(address, CheckType.CREATE_NON_DURABLE_QUEUE, this); } Queue queue = server.createQueue(address, name, filterString, durable, temporary); if (temporary) { // Temporary queue in core simply means the queue will be deleted if // the remoting connection // dies. It does not mean it will get deleted automatically when the // session is closed. // It is up to the user to delete the queue when finished with it TempQueueCleanerUpper cleaner = new TempQueueCleanerUpper(postOffice, name, queue); remotingConnection.addCloseListener(cleaner); remotingConnection.addFailureListener(cleaner); tempQueueCleannerUppers.put(name, cleaner); } }
/** * This would force a journal duplication on bindings even with the scenario that generated fixed, * the server shouldn't hold of from starting * * @throws Exception */ @Test public void testForceDuplicationOnBindings() throws Exception { queue = server.createQueue(QUEUE, QUEUE, null, true, false); ClientSessionFactory factory = locator.createSessionFactory(); ClientSession session = factory.createSession(false, false, false); ClientProducer producer = session.createProducer(QUEUE); producer.send(session.createMessage(true)); session.commit(); long queueID = server.getStorageManager().generateUniqueID(); long txID = server.getStorageManager().generateUniqueID(); // Forcing a situation where the server would unexpectedly create a duplicated queue. The server // should still start normally LocalQueueBinding newBinding = new LocalQueueBinding( QUEUE, new QueueImpl(queueID, QUEUE, QUEUE, null, true, false, null, null, null, null, null), server.getNodeID()); server.getStorageManager().addQueueBinding(txID, newBinding); server.getStorageManager().commitBindings(txID); server.stop(); // a duplicate binding would impede the server from starting server.start(); waitForServer(server); server.stop(); }
@Override public void setUp() throws Exception { super.setUp(); Configuration config = createDefaultConfig(); HashMap<String, AddressSettings> settings = new HashMap<String, AddressSettings>(); messagingService = createServer(true, config, 10024, 200024, settings); messagingService.start(); pagedServerQueue = (QueueImpl) messagingService.createQueue(ADDRESS, ADDRESS, null, true, false); }
public void createQueue( final SimpleString address, final SimpleString name, final SimpleString filterString, final boolean temporary, final boolean durable) throws Exception { if (durable) { // make sure the user has privileges to create this queue securityStore.check(address, CheckType.CREATE_DURABLE_QUEUE, this); } else { securityStore.check(address, CheckType.CREATE_NON_DURABLE_QUEUE, this); } Queue queue = server.createQueue(address, name, filterString, durable, temporary); if (temporary) { // Temporary queue in core simply means the queue will be deleted if // the remoting connection // dies. It does not mean it will get deleted automatically when the // session is closed. // It is up to the user to delete the queue when finished with it TempQueueCleanerUpper cleaner = new TempQueueCleanerUpper(server, name); remotingConnection.addCloseListener(cleaner); remotingConnection.addFailureListener(cleaner); tempQueueCleannerUppers.put(name, cleaner); } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Queue " + name + " created on address " + name + " with filter=" + filterString + " temporary = " + temporary + " durable=" + durable + " on session user="******", connection=" + this.remotingConnection); } }
// An exception during delivery shouldn't make the message disappear @Test public void testExceptionWhileDelivering() throws Exception { queue = server.createQueue(QUEUE, QUEUE, null, true, false); HangInterceptor hangInt = new HangInterceptor(); try { locator.addInterceptor(hangInt); ClientSessionFactory factory = locator.createSessionFactory(); ClientSession session = factory.createSession(false, false, false); ClientProducer producer = session.createProducer(QUEUE); ClientConsumer consumer = session.createConsumer(QUEUE); producer.send(session.createMessage(true)); session.commit(); hangInt.close(); session.start(); Assert.assertTrue(hangInt.reusableLatch.await(10, TimeUnit.SECONDS)); hangInt.pendingException = new HornetQException(); hangInt.open(); session.close(); session = factory.createSession(false, false); session.start(); consumer = session.createConsumer(QUEUE); ClientMessage msg = consumer.receive(5000); Assert.assertNotNull(msg); msg.acknowledge(); session.commit(); } finally { hangInt.open(); } }
/** * This will simulate what would happen with topic creationg where a single record is supposed to * be created on the journal * * @throws Exception */ @Test public void testDuplicateDestinationsOnTopic() throws Exception { for (int i = 0; i < 5; i++) { if (server.locateQueue(SimpleString.toSimpleString("jms.topic.tt")) == null) { server.createQueue( SimpleString.toSimpleString("jms.topic.tt"), SimpleString.toSimpleString("jms.topic.tt"), SimpleString.toSimpleString(HornetQServerImpl.GENERIC_IGNORED_FILTER), true, false); } server.stop(); SequentialFileFactory messagesFF = new NIOSequentialFileFactory(getBindingsDir(), null); JournalImpl messagesJournal = new JournalImpl(1024 * 1024, 2, 0, 0, messagesFF, "hornetq-bindings", "bindings", 1); messagesJournal.start(); LinkedList<RecordInfo> infos = new LinkedList<RecordInfo>(); messagesJournal.load(infos, null, null); int bindings = 0; for (RecordInfo info : infos) { if (info.getUserRecordType() == JournalRecordIds.QUEUE_BINDING_RECORD) { bindings++; } } assertEquals(1, bindings); System.out.println("Bindings: " + bindings); messagesJournal.stop(); if (i < 4) server.start(); } }
/** * This would recreate the scenario where a queue was duplicated * * @throws Exception */ @Test public void testHangDuplicateQueues() throws Exception { final Semaphore blocked = new Semaphore(1); final CountDownLatch latchDelete = new CountDownLatch(1); class MyQueueWithBlocking extends QueueImpl { /** * @param id * @param address * @param name * @param filter * @param pageSubscription * @param durable * @param temporary * @param scheduledExecutor * @param postOffice * @param storageManager * @param addressSettingsRepository * @param executor */ public MyQueueWithBlocking( final long id, final SimpleString address, final SimpleString name, final Filter filter, final PageSubscription pageSubscription, final boolean durable, final boolean temporary, final ScheduledExecutorService scheduledExecutor, final PostOffice postOffice, final StorageManager storageManager, final HierarchicalRepository<AddressSettings> addressSettingsRepository, final Executor executor) { super( id, address, name, filter, pageSubscription, durable, temporary, scheduledExecutor, postOffice, storageManager, addressSettingsRepository, executor); } @Override public synchronized int deleteMatchingReferences(final int flushLimit, final Filter filter) throws Exception { latchDelete.countDown(); blocked.acquire(); blocked.release(); return super.deleteMatchingReferences(flushLimit, filter); } } class LocalFactory extends QueueFactoryImpl { public LocalFactory( final ExecutorFactory executorFactory, final ScheduledExecutorService scheduledExecutor, final HierarchicalRepository<AddressSettings> addressSettingsRepository, final StorageManager storageManager) { super(executorFactory, scheduledExecutor, addressSettingsRepository, storageManager); } @Override public Queue createQueue( final long persistenceID, final SimpleString address, final SimpleString name, final Filter filter, final PageSubscription pageSubscription, final boolean durable, final boolean temporary) { queue = new MyQueueWithBlocking( persistenceID, address, name, filter, pageSubscription, durable, temporary, scheduledExecutor, postOffice, storageManager, addressSettingsRepository, executorFactory.getExecutor()); return queue; } } LocalFactory queueFactory = new LocalFactory( server.getExecutorFactory(), server.getScheduledPool(), server.getAddressSettingsRepository(), server.getStorageManager()); queueFactory.setPostOffice(server.getPostOffice()); ((HornetQServerImpl) server).replaceQueueFactory(queueFactory); queue = server.createQueue(QUEUE, QUEUE, null, true, false); blocked.acquire(); ClientSessionFactory factory = locator.createSessionFactory(); ClientSession session = factory.createSession(false, false, false); ClientProducer producer = session.createProducer(QUEUE); producer.send(session.createMessage(true)); session.commit(); Thread tDelete = new Thread() { @Override public void run() { try { server.destroyQueue(QUEUE); } catch (Exception e) { e.printStackTrace(); } } }; tDelete.start(); Assert.assertTrue(latchDelete.await(10, TimeUnit.SECONDS)); try { server.createQueue(QUEUE, QUEUE, null, true, false); } catch (Exception expected) { } blocked.release(); server.stop(); tDelete.join(); session.close(); // a duplicate binding would impede the server from starting server.start(); waitForServer(server); server.stop(); }
@Test public void testHangOnDelivery() throws Exception { queue = server.createQueue(QUEUE, QUEUE, null, true, false); try { ClientSessionFactory factory = locator.createSessionFactory(); ClientSession sessionProducer = factory.createSession(false, false, false); ServerLocator consumerLocator = createInVMNonHALocator(); ClientSessionFactory factoryConsumer = consumerLocator.createSessionFactory(); ClientSession sessionConsumer = factoryConsumer.createSession(); ClientProducer producer = sessionProducer.createProducer(QUEUE); ClientConsumer consumer = sessionConsumer.createConsumer(QUEUE); producer.send(sessionProducer.createMessage(true)); blockConsumers(); sessionProducer.commit(); sessionConsumer.start(); awaitBlocking(); // this shouldn't lock producer.send(sessionProducer.createMessage(true)); sessionProducer.commit(); // These two operations should finish without the test hanging queue.getMessagesAdded(1); queue.getMessageCount(1); releaseConsumers(); // a rollback to make sure everything will be reset on the deliveries // and that both consumers will receive each a message // this is to guarantee the server will have both consumers regsitered sessionConsumer.rollback(); // a flush to guarantee any pending task is finished on flushing out delivery and pending msgs queue.flushExecutor(); Assert.assertEquals(2, queue.getMessageCount()); Assert.assertEquals(2, queue.getMessagesAdded()); ClientMessage msg = consumer.receive(5000); Assert.assertNotNull(msg); msg.acknowledge(); msg = consumer.receive(5000); Assert.assertNotNull(msg); msg.acknowledge(); sessionProducer.commit(); sessionConsumer.commit(); sessionProducer.close(); sessionConsumer.close(); } finally { releaseConsumers(); } }
@Override public void nodeUP(final TopologyMember topologyMember, final boolean last) { if (stopping) { return; } final String nodeID = topologyMember.getNodeId(); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { String ClusterTestBase = "receiving nodeUP for nodeID="; HornetQServerLogger.LOGGER.debug( this + ClusterTestBase + nodeID + " connectionPair=" + topologyMember); } // discard notifications about ourselves unless its from our backup if (nodeID.equals(nodeManager.getNodeId().toString())) { if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( this + "::informing about backup to itself, nodeUUID=" + nodeManager.getNodeId() + ", connectorPair=" + topologyMember + ", this = " + this); } return; } // if the node is more than 1 hop away, we do not create a bridge for direct cluster connection if (allowDirectConnectionsOnly && !allowableConnections.contains(topologyMember.getLive())) { return; } // FIXME required to prevent cluster connections w/o discovery group // and empty static connectors to create bridges... ulgy! if (serverLocator == null) { return; } /*we don't create bridges to backups*/ if (topologyMember.getLive() == null) { if (isTrace) { HornetQServerLogger.LOGGER.trace( this + " ignoring call with nodeID=" + nodeID + ", topologyMember=" + topologyMember + ", last=" + last); } return; } synchronized (recordsGuard) { try { MessageFlowRecord record = records.get(nodeID); if (record == null) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + "::Creating record for nodeID=" + nodeID + ", topologyMember=" + topologyMember); } // New node - create a new flow record final SimpleString queueName = new SimpleString("sf." + name + "." + nodeID); Binding queueBinding = postOffice.getBinding(queueName); Queue queue; if (queueBinding != null) { queue = (Queue) queueBinding.getBindable(); } else { // Add binding in storage so the queue will get reloaded on startup and we can find it - // it's never // actually routed to at that address though queue = server.createQueue(queueName, queueName, null, true, false); } createNewRecord( topologyMember.getUniqueEventID(), nodeID, topologyMember.getLive(), queueName, queue, true); } else { if (isTrace) { HornetQServerLogger.LOGGER.trace( this + " ignored nodeUp record for " + topologyMember + " on nodeID=" + nodeID + " as the record already existed"); } } } catch (Exception e) { HornetQServerLogger.LOGGER.errorUpdatingTopology(e); } } }
@Test public void testOrder1() throws Throwable { boolean persistentMessages = true; Configuration config = createDefaultConfig(); config.setJournalSyncNonTransactional(false); HornetQServer server = createServer(true, config, PAGE_SIZE, PAGE_MAX, new HashMap<String, AddressSettings>()); server.start(); final int messageSize = 1024; final int numberOfMessages = 500; ServerLocator locator = createInVMNonHALocator(); locator.setClientFailureCheckPeriod(1000); locator.setConnectionTTL(2000); locator.setReconnectAttempts(0); locator.setBlockOnNonDurableSend(true); locator.setBlockOnDurableSend(true); locator.setBlockOnAcknowledge(true); locator.setConsumerWindowSize(1024 * 1024); ClientSessionFactory sf = createSessionFactory(locator); ClientSession session = sf.createSession(false, false, false); server.createQueue(ADDRESS, ADDRESS, null, true, false); ClientProducer producer = session.createProducer(PagingTest.ADDRESS); byte[] body = new byte[messageSize]; ByteBuffer bb = ByteBuffer.wrap(body); for (int j = 1; j <= messageSize; j++) { bb.put(getSamplebyte(j)); } for (int i = 0; i < numberOfMessages; i++) { ClientMessage message = session.createMessage(persistentMessages); HornetQBuffer bodyLocal = message.getBodyBuffer(); bodyLocal.writeBytes(body); message.putIntProperty(new SimpleString("id"), i); producer.send(message); if (i % 1000 == 0) { session.commit(); } } session.commit(); session.close(); session = sf.createSession(true, true, 0); session.start(); ClientConsumer consumer = session.createConsumer(ADDRESS); for (int i = 0; i < numberOfMessages / 2; i++) { ClientMessage message = consumer.receive(5000); assertNotNull(message); assertEquals(i, message.getIntProperty("id").intValue()); if (i < 100) { // Do not consume the last one so we could restart message.acknowledge(); } } session.close(); session = null; sf.close(); sf = createSessionFactory(locator); locator = createInVMNonHALocator(); session = sf.createSession(true, true, 0); session.start(); consumer = session.createConsumer(ADDRESS); for (int i = 100; i < numberOfMessages; i++) { ClientMessage message = consumer.receive(5000); assertNotNull(message); assertEquals(i, message.getIntProperty("id").intValue()); message.acknowledge(); } session.close(); }
@Test public void testOrderOverRollback2() throws Throwable { boolean persistentMessages = true; Configuration config = createDefaultConfig(); config.setJournalSyncNonTransactional(false); HornetQServer server = createServer(true, config, PAGE_SIZE, PAGE_MAX, new HashMap<String, AddressSettings>()); server.start(); final int messageSize = 1024; final int numberOfMessages = 200; ServerLocator locator = createInVMNonHALocator(); locator.setClientFailureCheckPeriod(1000); locator.setConnectionTTL(2000); locator.setReconnectAttempts(0); locator.setBlockOnNonDurableSend(true); locator.setBlockOnDurableSend(true); locator.setBlockOnAcknowledge(true); locator.setConsumerWindowSize(0); ClientSessionFactory sf = createSessionFactory(locator); ClientSession session = sf.createSession(false, false, false); QueueImpl queue = (QueueImpl) server.createQueue(ADDRESS, ADDRESS, null, true, false); ClientProducer producer = session.createProducer(PagingTest.ADDRESS); byte[] body = new byte[messageSize]; ByteBuffer bb = ByteBuffer.wrap(body); for (int j = 1; j <= messageSize; j++) { bb.put(getSamplebyte(j)); } for (int i = 0; i < numberOfMessages; i++) { ClientMessage message = session.createMessage(persistentMessages); HornetQBuffer bodyLocal = message.getBodyBuffer(); bodyLocal.writeBytes(body); message.putIntProperty(new SimpleString("id"), i); producer.send(message); if (i % 1000 == 0) { session.commit(); } } session.commit(); session.close(); session = sf.createSession(false, false, 0); session.start(); ClientConsumer consumer = session.createConsumer(ADDRESS); // number of references without paging int numberOfRefs = queue.getNumberOfReferences(); // consume all non-paged references for (int ref = 0; ref < numberOfRefs; ref++) { ClientMessage msg = consumer.receive(5000); assertNotNull(msg); msg.acknowledge(); } session.commit(); session.close(); session = sf.createSession(false, false, 0); session.start(); consumer = session.createConsumer(ADDRESS); ClientMessage msg = consumer.receive(5000); assertNotNull(msg); int msgIDRolledBack = msg.getIntProperty("id").intValue(); msg.acknowledge(); session.rollback(); msg = consumer.receive(5000); assertNotNull(msg); assertEquals(msgIDRolledBack, msg.getIntProperty("id").intValue()); session.rollback(); session.close(); sf.close(); locator.close(); server.stop(); server.start(); locator = createInVMNonHALocator(); locator.setClientFailureCheckPeriod(1000); locator.setConnectionTTL(2000); locator.setReconnectAttempts(0); locator.setBlockOnNonDurableSend(true); locator.setBlockOnDurableSend(true); locator.setBlockOnAcknowledge(true); locator.setConsumerWindowSize(0); sf = createSessionFactory(locator); session = sf.createSession(false, false, 0); session.start(); consumer = session.createConsumer(ADDRESS); for (int i = msgIDRolledBack; i < numberOfMessages; i++) { ClientMessage message = consumer.receive(5000); assertNotNull(message); assertEquals(i, message.getIntProperty("id").intValue()); message.acknowledge(); } session.commit(); session.close(); }
@Test public void testPageCounter2() throws Throwable { boolean persistentMessages = true; Configuration config = createDefaultConfig(); config.setJournalSyncNonTransactional(false); HornetQServer server = createServer(true, config, PAGE_SIZE, PAGE_MAX, new HashMap<String, AddressSettings>()); server.start(); final int messageSize = 1024; final int numberOfMessages = 500; ServerLocator locator = createInVMNonHALocator(); locator.setClientFailureCheckPeriod(1000); locator.setConnectionTTL(2000); locator.setReconnectAttempts(0); locator.setBlockOnNonDurableSend(true); locator.setBlockOnDurableSend(true); locator.setBlockOnAcknowledge(true); locator.setConsumerWindowSize(1024 * 1024); ClientSessionFactory sf = createSessionFactory(locator); ClientSession session = sf.createSession(false, false, false); Queue q1 = server.createQueue(ADDRESS, ADDRESS, null, true, false); Queue q2 = server.createQueue(ADDRESS, new SimpleString("inactive"), null, true, false); ClientProducer producer = session.createProducer(PagingTest.ADDRESS); byte[] body = new byte[messageSize]; ByteBuffer bb = ByteBuffer.wrap(body); for (int j = 1; j <= messageSize; j++) { bb.put(getSamplebyte(j)); } final AtomicInteger errors = new AtomicInteger(0); Thread t1 = new Thread() { @Override public void run() { try { ServerLocator sl = createInVMNonHALocator(); ClientSessionFactory sf = sl.createSessionFactory(); ClientSession sess = sf.createSession(true, true, 0); sess.start(); ClientConsumer cons = sess.createConsumer(ADDRESS); for (int i = 0; i < 100; i++) { ClientMessage msg = cons.receive(5000); assertNotNull(msg); assertEquals(i, msg.getIntProperty("id").intValue()); msg.acknowledge(); } sess.close(); sl.close(); } catch (Throwable e) { e.printStackTrace(); errors.incrementAndGet(); } } }; for (int i = 0; i < numberOfMessages; i++) { ClientMessage message = session.createMessage(persistentMessages); HornetQBuffer bodyLocal = message.getBodyBuffer(); bodyLocal.writeBytes(body); message.putIntProperty(new SimpleString("id"), i); producer.send(message); if (i % 20 == 0) { session.commit(); } } session.commit(); t1.start(); t1.join(); assertEquals(0, errors.get()); long timeout = System.currentTimeMillis() + 10000; while (numberOfMessages - 100 != q1.getMessageCount() && System.currentTimeMillis() < timeout) { Thread.sleep(500); } assertEquals(numberOfMessages, q2.getMessageCount()); assertEquals(numberOfMessages, q2.getMessagesAdded()); assertEquals(numberOfMessages - 100, q1.getMessageCount()); assertEquals(numberOfMessages, q2.getMessagesAdded()); }
@Test public void testPageCounter() throws Throwable { boolean persistentMessages = true; Configuration config = createDefaultConfig(); config.setJournalSyncNonTransactional(false); HornetQServer server = createServer(true, config, PAGE_SIZE, PAGE_MAX, new HashMap<String, AddressSettings>()); server.start(); final int messageSize = 1024; final int numberOfMessages = 500; ServerLocator locator = createInVMNonHALocator(); locator.setClientFailureCheckPeriod(1000); locator.setConnectionTTL(2000); locator.setReconnectAttempts(0); locator.setBlockOnNonDurableSend(true); locator.setBlockOnDurableSend(true); locator.setBlockOnAcknowledge(true); locator.setConsumerWindowSize(1024 * 1024); ClientSessionFactory sf = createSessionFactory(locator); ClientSession session = sf.createSession(false, false, false); Queue q1 = server.createQueue(ADDRESS, ADDRESS, null, true, false); Queue q2 = server.createQueue(ADDRESS, new SimpleString("inactive"), null, true, false); ClientProducer producer = session.createProducer(PagingTest.ADDRESS); byte[] body = new byte[messageSize]; ByteBuffer bb = ByteBuffer.wrap(body); for (int j = 1; j <= messageSize; j++) { bb.put(getSamplebyte(j)); } final AtomicInteger errors = new AtomicInteger(0); Thread t1 = new Thread() { @Override public void run() { try { ServerLocator sl = createInVMNonHALocator(); ClientSessionFactory sf = sl.createSessionFactory(); ClientSession sess = sf.createSession(true, true, 0); sess.start(); ClientConsumer cons = sess.createConsumer(ADDRESS); for (int i = 0; i < numberOfMessages; i++) { ClientMessage msg = cons.receive(5000); assertNotNull(msg); assertEquals(i, msg.getIntProperty("id").intValue()); msg.acknowledge(); } assertNull(cons.receiveImmediate()); sess.close(); sl.close(); } catch (Throwable e) { e.printStackTrace(); errors.incrementAndGet(); } } }; t1.start(); for (int i = 0; i < numberOfMessages; i++) { ClientMessage message = session.createMessage(persistentMessages); HornetQBuffer bodyLocal = message.getBodyBuffer(); bodyLocal.writeBytes(body); message.putIntProperty(new SimpleString("id"), i); producer.send(message); if (i % 20 == 0) { session.commit(); } } session.commit(); t1.join(); assertEquals(0, errors.get()); assertEquals(numberOfMessages, q2.getMessageCount()); assertEquals(numberOfMessages, q2.getMessagesAdded()); assertEquals(0, q1.getMessageCount()); assertEquals(numberOfMessages, q1.getMessagesAdded()); session.close(); sf.close(); locator.close(); server.stop(); server.start(); Bindings bindings = server.getPostOffice().getBindingsForAddress(ADDRESS); q1 = null; q2 = null; for (Binding bind : bindings.getBindings()) { if (bind instanceof LocalQueueBinding) { LocalQueueBinding qb = (LocalQueueBinding) bind; if (qb.getQueue().getName().equals(ADDRESS)) { q1 = qb.getQueue(); } if (qb.getQueue().getName().equals(new SimpleString("inactive"))) { q2 = qb.getQueue(); } } } assertNotNull(q1); assertNotNull(q2); assertEquals("q2 msg count", numberOfMessages, q2.getMessageCount()); assertEquals("q2 msgs added", numberOfMessages, q2.getMessagesAdded()); assertEquals("q1 msg count", 0, q1.getMessageCount()); // 0, since nothing was sent to the queue after the server was restarted assertEquals("q1 msgs added", 0, q1.getMessagesAdded()); }