public void sendContinuations( final int packetSize, final long messageBodySize, final byte[] body, final boolean continues) throws Exception { if (currentLargeMessage == null) { throw HornetQMessageBundle.BUNDLE.largeMessageNotInitialised(); } // Immediately release the credits for the continuations- these don't contribute to the // in-memory size // of the message currentLargeMessage.addBytes(body); if (!continues) { currentLargeMessage.releaseResources(); if (messageBodySize >= 0) { currentLargeMessage.putLongProperty(Message.HDR_LARGE_BODY_SIZE, messageBodySize); } doSend(currentLargeMessage, false); currentLargeMessage = null; } }
public QueueQueryResult executeQueueQuery(final SimpleString name) throws Exception { if (name == null) { throw HornetQMessageBundle.BUNDLE.queueNameIsNull(); } QueueQueryResult response; Binding binding = postOffice.getBinding(name); if (binding != null && binding.getType() == BindingType.LOCAL_QUEUE) { Queue queue = (Queue) binding.getBindable(); Filter filter = queue.getFilter(); SimpleString filterString = filter == null ? null : filter.getFilterString(); response = new QueueQueryResult( name, binding.getAddress(), queue.isDurable(), queue.isTemporary(), filterString, queue.getConsumerCount(), queue.getMessageCount()); } // make an exception for the management address (see HORNETQ-29) else if (name.equals(managementAddress)) { response = new QueueQueryResult(name, managementAddress, true, false, null, -1, -1); } else { response = new QueueQueryResult(); } return response; }
public void connectionCreated( final HornetQComponent component, final Connection connection, final String protocol) { if (connections.putIfAbsent((String) connection.getID(), connection) != null) { throw HornetQMessageBundle.BUNDLE.connectionExists(connection.getID()); } listener.connectionCreated(component, connection, protocol); }
public void createConsumer( final long consumerID, final SimpleString queueName, final SimpleString filterString, final boolean browseOnly) throws Exception { Binding binding = postOffice.getBinding(queueName); if (binding == null || binding.getType() != BindingType.LOCAL_QUEUE) { throw HornetQMessageBundle.BUNDLE.noSuchQueue(queueName); } securityStore.check(binding.getAddress(), CheckType.CONSUME, this); Filter filter = FilterImpl.createFilter(filterString); ServerConsumer consumer = new ServerConsumerImpl( consumerID, this, (QueueBinding) binding, filter, started, browseOnly, storageManager, callback, preAcknowledge, strictUpdateDeliveryCount, managementService); consumers.put(consumer.getID(), consumer); if (!browseOnly) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(ManagementHelper.HDR_ADDRESS, binding.getAddress()); props.putSimpleStringProperty(ManagementHelper.HDR_CLUSTER_NAME, binding.getClusterName()); props.putSimpleStringProperty(ManagementHelper.HDR_ROUTING_NAME, binding.getRoutingName()); props.putIntProperty(ManagementHelper.HDR_DISTANCE, binding.getDistance()); Queue theQueue = (Queue) binding.getBindable(); props.putIntProperty(ManagementHelper.HDR_CONSUMER_COUNT, theQueue.getConsumerCount()); if (filterString != null) { props.putSimpleStringProperty(ManagementHelper.HDR_FILTERSTRING, filterString); } Notification notification = new Notification(null, CONSUMER_CREATED, props); managementService.sendNotification(notification); } }
public boolean changeMessagePriority(final long messageID, final int newPriority) throws Exception { checkStarted(); clearIO(); try { if (newPriority < 0 || newPriority > 9) { throw HornetQMessageBundle.BUNDLE.invalidNewPriority(newPriority); } return queue.changeReferencePriority(messageID, (byte) newPriority); } finally { blockOnIO(); } }
private Transformer instantiateTransformer(final String transformerClassName) { Transformer transformer = null; if (transformerClassName != null) { ClassLoader loader = Thread.currentThread().getContextClassLoader(); try { Class<?> clz = loader.loadClass(transformerClassName); transformer = (Transformer) clz.newInstance(); } catch (Exception e) { throw HornetQMessageBundle.BUNDLE.errorCreatingTransformerClass(e, transformerClassName); } } return transformer; }
public int changeMessagesPriority(final String filterStr, final int newPriority) throws Exception { checkStarted(); clearIO(); try { if (newPriority < 0 || newPriority > 9) { throw HornetQMessageBundle.BUNDLE.invalidNewPriority(newPriority); } Filter filter = FilterImpl.createFilter(filterStr); return queue.changeReferencesPriority(filter, (byte) newPriority); } finally { blockOnIO(); } }
public void send(final ServerMessage message, final boolean direct) throws Exception { // large message may come from StompSession directly, in which // case the id header already generated. if (!message.isLargeMessage()) { long id = storageManager.generateUniqueID(); message.setMessageID(id); message.encodeMessageIDToBuffer(); } SimpleString address = message.getAddress(); if (defaultAddress == null && address != null) { defaultAddress = address; } if (address == null) { if (message.isDurable()) { // We need to force a re-encode when the message gets persisted or when it gets reloaded // it will have no address message.setAddress(defaultAddress); } else { // We don't want to force a re-encode when the message gets sent to the consumer message.setAddressTransient(defaultAddress); } } if (isTrace) { HornetQServerLogger.LOGGER.trace( "send(message=" + message + ", direct=" + direct + ") being called"); } if (message.getAddress() == null) { // This could happen with some tests that are ignoring messages throw HornetQMessageBundle.BUNDLE.noAddress(); } if (message.getAddress().equals(managementAddress)) { // It's a management message handleManagementMessage(message, direct); } else { doSend(message, direct); } }
public void acknowledge(final long consumerID, final long messageID) throws Exception { ServerConsumer consumer = consumers.get(consumerID); if (consumer == null) { throw HornetQMessageBundle.BUNDLE.consumerDoesntExist(consumerID); } if (tx != null && tx.getState() == State.ROLLEDBACK) { // JBPAPP-8845 - if we let stuff to be acked on a rolled back TX, we will just // have these messages to be stuck on the limbo until the server is restarted // The tx has already timed out, so we need to ack and rollback immediately Transaction newTX = newTransaction(); consumer.acknowledge(autoCommitAcks, newTX, messageID); newTX.rollback(); } else { consumer.acknowledge(autoCommitAcks, tx, messageID); } }
public boolean moveMessage( final long messageID, final String otherQueueName, final boolean rejectDuplicates) throws Exception { checkStarted(); clearIO(); try { Binding binding = postOffice.getBinding(new SimpleString(otherQueueName)); if (binding == null) { throw HornetQMessageBundle.BUNDLE.noQueueFound(otherQueueName); } return queue.moveReference(messageID, binding.getAddress(), rejectDuplicates); } finally { blockOnIO(); } }
@Override public void run() { while (!closed) { try { long now = System.currentTimeMillis(); Set<Object> idsToRemove = new HashSet<Object>(); for (ConnectionEntry entry : connections.values()) { RemotingConnection conn = entry.connection; boolean flush = true; if (entry.ttl != -1) { if (!conn.checkDataReceived()) { if (now >= entry.lastCheck + entry.ttl) { idsToRemove.add(conn.getID()); flush = false; } } else { entry.lastCheck = now; } } if (flush) { conn.flush(); } } for (Object id : idsToRemove) { RemotingConnection conn = removeConnection(id); if (conn != null) { conn.fail(HornetQMessageBundle.BUNDLE.clientExited(conn.getRemoteAddress())); } } if (latch.await(pauseInterval, TimeUnit.MILLISECONDS)) return; } catch (Throwable e) { HornetQServerLogger.LOGGER.errorOnFailureCheck(e); } } }
private synchronized void initialise() throws HornetQException { if (readOnly) { return; } try { state = STATE.INITIALIZED; setThreadPools(); instantiateLoadBalancingPolicy(); if (discoveryGroupConfiguration != null) { InetAddress groupAddress = InetAddress.getByName(discoveryGroupConfiguration.getGroupAddress()); InetAddress lbAddress; if (discoveryGroupConfiguration.getLocalBindAddress() != null) { lbAddress = InetAddress.getByName(discoveryGroupConfiguration.getLocalBindAddress()); } else { lbAddress = null; } discoveryGroup = new DiscoveryGroupImpl( nodeID, discoveryGroupConfiguration.getName(), lbAddress, groupAddress, discoveryGroupConfiguration.getGroupPort(), discoveryGroupConfiguration.getRefreshTimeout()); discoveryGroup.registerListener(this); discoveryGroup.start(); } readOnly = true; } catch (Exception e) { state = null; throw HornetQMessageBundle.BUNDLE.failedToInitialiseSessionFactory(e); } }
public void connectionCreated( final HornetQComponent component, final Connection connection, final String protocol) { if (server == null) { throw new IllegalStateException( "Unable to create connection, server hasn't finished starting up"); } ProtocolManager pmgr = this.getProtocolManager(protocol.toString()); if (pmgr == null) { throw HornetQMessageBundle.BUNDLE.unknownProtocol(protocol); } ConnectionEntry entry = pmgr.createConnectionEntry((Acceptor) component, connection); if (isTrace) { HornetQServerLogger.LOGGER.trace("Connection created " + connection); } connections.put(connection.getID(), entry); }
public int moveMessages( final String filterStr, final String otherQueueName, final boolean rejectDuplicates) throws Exception { checkStarted(); clearIO(); try { Filter filter = FilterImpl.createFilter(filterStr); Binding binding = postOffice.getBinding(new SimpleString(otherQueueName)); if (binding == null) { throw HornetQMessageBundle.BUNDLE.noQueueFound(otherQueueName); } int retValue = queue.moveReferences(filter, binding.getAddress(), rejectDuplicates); return retValue; } finally { blockOnIO(); } }
public BindingQueryResult executeBindingQuery(final SimpleString address) throws Exception { if (address == null) { throw HornetQMessageBundle.BUNDLE.addressIsNull(); } List<SimpleString> names = new ArrayList<SimpleString>(); // make an exception for the management address (see HORNETQ-29) if (address.equals(managementAddress)) { return new BindingQueryResult(true, names); } Bindings bindings = postOffice.getMatchingBindings(address); for (Binding binding : bindings.getBindings()) { if (binding.getType() == BindingType.LOCAL_QUEUE || binding.getType() == BindingType.REMOTE_QUEUE) { names.add(binding.getUniqueName()); } } return new BindingQueryResult(!names.isEmpty(), names); }
public ClientSessionFactory createSessionFactory() throws HornetQException { assertOpen(); initialise(); if (initialConnectors == null && discoveryGroup != null) { // Wait for an initial broadcast to give us at least one node in the cluster long timeout = clusterConnection ? 0 : discoveryGroupConfiguration.getDiscoveryInitialWaitTimeout(); boolean ok = discoveryGroup.waitForBroadcast(timeout); if (!ok) { throw HornetQMessageBundle.BUNDLE.connectionTimedOutInInitialBroadcast(); } } ClientSessionFactoryInternal factory = null; synchronized (this) { boolean retry; int attempts = 0; do { retry = false; TransportConfiguration tc = selectConnector(); if (tc == null) { throw HornetQMessageBundle.BUNDLE.noTCForSessionFactory(); } // try each factory in the list until we find one which works try { factory = new ClientSessionFactoryImpl( this, tc, callTimeout, callFailoverTimeout, clientFailureCheckPeriod, connectionTTL, retryInterval, retryIntervalMultiplier, maxRetryInterval, reconnectAttempts, threadPool, scheduledThreadPool, interceptors); try { addToConnecting(factory); factory.connect(initialConnectAttempts, failoverOnInitialConnection); } finally { removeFromConnecting(factory); } } catch (HornetQException e) { factory.close(); factory = null; if (e.getType() == HornetQExceptionType.NOT_CONNECTED) { attempts++; if (topologyArray != null && attempts == topologyArray.length) { throw HornetQMessageBundle.BUNDLE.cannotConnectToServers(); } if (topologyArray == null && initialConnectors != null && attempts == initialConnectors.length) { throw HornetQMessageBundle.BUNDLE.cannotConnectToServers(); } retry = true; } else { throw e; } } } while (retry); if (ha || clusterConnection) { final long timeout = System.currentTimeMillis() + 30000; while (!isClosed() && !receivedTopology && timeout > System.currentTimeMillis()) { // Now wait for the topology try { wait(1000); } catch (InterruptedException ignore) { } } if (System.currentTimeMillis() > timeout && !receivedTopology) { throw HornetQMessageBundle.BUNDLE.connectionTimedOutOnReceiveTopology(discoveryGroup); } } addFactory(factory); return factory; } }
public synchronized List<PagedMessage> read(StorageManager storage) throws Exception { if (isDebug) { HornetQServerLogger.LOGGER.debug( "reading page " + this.pageId + " on address = " + storeName); } if (!file.isOpen()) { throw HornetQMessageBundle.BUNDLE.invalidPageIO(); } ArrayList<PagedMessage> messages = new ArrayList<PagedMessage>(); size.set((int) file.size()); // Using direct buffer, as described on https://jira.jboss.org/browse/HORNETQ-467 ByteBuffer directBuffer = storage.allocateDirectBuffer((int) file.size()); try { file.position(0); file.read(directBuffer); directBuffer.rewind(); HornetQBuffer fileBuffer = HornetQBuffers.wrappedBuffer(directBuffer); fileBuffer.writerIndex(fileBuffer.capacity()); while (fileBuffer.readable()) { final int position = fileBuffer.readerIndex(); byte byteRead = fileBuffer.readByte(); if (byteRead == Page.START_BYTE) { if (fileBuffer.readerIndex() + DataConstants.SIZE_INT < fileBuffer.capacity()) { int messageSize = fileBuffer.readInt(); int oldPos = fileBuffer.readerIndex(); if (fileBuffer.readerIndex() + messageSize < fileBuffer.capacity() && fileBuffer.getByte(oldPos + messageSize) == Page.END_BYTE) { PagedMessage msg = new PagedMessageImpl(); msg.decode(fileBuffer); byte b = fileBuffer.readByte(); if (b != Page.END_BYTE) { // Sanity Check: This would only happen if there is a bug on decode or any internal // code, as // this // constraint was already checked throw new IllegalStateException( "Internal error, it wasn't possible to locate END_BYTE " + b); } msg.initMessage(storage); if (isTrace) { HornetQServerLogger.LOGGER.trace( "Reading message " + msg + " on pageId=" + this.pageId + " for address=" + storeName); } messages.add(msg); } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } finally { storage.freeDirectBuffer(directBuffer); } numberOfMessages.set(messages.size()); return messages; }
public void acknowledge(final boolean autoCommitAcks, Transaction tx, final long messageID) throws Exception { if (browseOnly) { return; } // Acknowledge acknowledges all refs delivered by the consumer up to and including the one // explicitly // acknowledged // We use a transaction here as if the message is not found, we should rollback anything done // This could eventually happen on retries during transactions, and we need to make sure we // don't ACK things we are not supposed to acknowledge boolean startedTransaction = false; if (tx == null || autoCommitAcks) { startedTransaction = true; tx = new TransactionImpl(storageManager); } try { MessageReference ref; do { ref = deliveringRefs.poll(); if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( "ACKing ref " + ref + " on tx= " + tx + ", consumer=" + this); } if (ref == null) { throw HornetQMessageBundle.BUNDLE.consumerNoReference( id, messageID, messageQueue.getName()); } ref.getQueue().acknowledge(tx, ref); } while (ref.getMessage().getMessageID() != messageID); if (startedTransaction) { tx.commit(); } } catch (HornetQException e) { if (startedTransaction) { tx.rollback(); } else { tx.markAsRollbackOnly(e); } throw e; } catch (Throwable e) { HornetQServerLogger.LOGGER.errorAckingMessage((Exception) e); HornetQException hqex = new HornetQIllegalStateException(e.getMessage()); if (startedTransaction) { tx.rollback(); } else { tx.markAsRollbackOnly(hqex); } throw hqex; } }
public synchronized void onMessage(final ClientMessage message) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "ClusterCommunication::Flow record on " + clusterConnector + " Receiving message " + message); } try { // Reset the bindings if (message.containsProperty(PostOfficeImpl.HDR_RESET_QUEUE_DATA)) { clearBindings(); firstReset = true; return; } if (!firstReset) { return; } // TODO - optimised this by just passing int in header - but filter needs to be extended to // support IN with // a list of integers SimpleString type = message.getSimpleStringProperty(ManagementHelper.HDR_NOTIFICATION_TYPE); NotificationType ntype = NotificationType.valueOf(type.toString()); switch (ntype) { case BINDING_ADDED: { doBindingAdded(message); break; } case BINDING_REMOVED: { doBindingRemoved(message); break; } case CONSUMER_CREATED: { doConsumerCreated(message); break; } case CONSUMER_CLOSED: { doConsumerClosed(message); break; } case PROPOSAL: { doProposalReceived(message); break; } case PROPOSAL_RESPONSE: { doProposalResponseReceived(message); break; } default: { throw HornetQMessageBundle.BUNDLE.invalidType(ntype); } } } catch (Exception e) { HornetQServerLogger.LOGGER.errorHandlingMessage(e); } }
public synchronized void start() throws Exception { if (started) { return; } ClassLoader tccl = AccessController.doPrivileged( new PrivilegedAction<ClassLoader>() { public ClassLoader run() { return Thread.currentThread().getContextClassLoader(); } }); // The remoting service maintains it's own thread pool for handling remoting traffic // If OIO each connection will have it's own thread // If NIO these are capped at nio-remoting-threads which defaults to num cores * 3 // This needs to be a different thread pool to the main thread pool especially for OIO where we // may need // to support many hundreds of connections, but the main thread pool must be kept small for // better performance ThreadFactory tFactory = new HornetQThreadFactory( "HornetQ-remoting-threads-" + server.toString() + "-" + System.identityHashCode(this), false, tccl); threadPool = Executors.newCachedThreadPool(tFactory); ClassLoader loader = Thread.currentThread().getContextClassLoader(); for (TransportConfiguration info : acceptorsConfig) { try { Class<?> clazz = loader.loadClass(info.getFactoryClassName()); AcceptorFactory factory = (AcceptorFactory) clazz.newInstance(); // Check valid properties if (info.getParams() != null) { Set<String> invalid = ConfigurationHelper.checkKeys( factory.getAllowableProperties(), info.getParams().keySet()); if (!invalid.isEmpty()) { HornetQServerLogger.LOGGER.invalidAcceptorKeys( ConfigurationHelper.stringSetToCommaListString(invalid)); continue; } } String protocol = ConfigurationHelper.getStringProperty( TransportConstants.PROTOCOL_PROP_NAME, TransportConstants.DEFAULT_PROTOCOL, info.getParams()); ProtocolManager manager = protocolMap.get(protocol); if (manager == null) { throw HornetQMessageBundle.BUNDLE.noProtocolManagerFound(protocol); } ClusterConnection clusterConnection = lookupClusterConnection(info); Acceptor acceptor = factory.createAcceptor( clusterConnection, info.getParams(), new DelegatingBufferHandler(), manager, this, threadPool, scheduledThreadPool, manager); if (defaultInvmSecurityPrincipal != null && acceptor.isUnsecurable()) { acceptor.setDefaultHornetQPrincipal(defaultInvmSecurityPrincipal); } acceptors.add(acceptor); if (managementService != null) { acceptor.setNotificationService(managementService); managementService.registerAcceptor(acceptor, info); } } catch (Exception e) { HornetQServerLogger.LOGGER.errorCreatingAcceptor(e, info.getFactoryClassName()); } } for (Acceptor a : acceptors) { a.start(); } // This thread checks connections that need to be closed, and also flushes confirmations failureCheckAndFlushThread = new FailureCheckAndFlushThread(RemotingServiceImpl.CONNECTION_TTL_CHECK_INTERVAL); failureCheckAndFlushThread.start(); started = true; }
public void createConsumer( final long consumerID, final SimpleString queueName, final SimpleString filterString, final boolean browseOnly, final boolean supportLargeMessage, final Integer credits) throws Exception { Binding binding = postOffice.getBinding(queueName); if (binding == null || binding.getType() != BindingType.LOCAL_QUEUE) { throw HornetQMessageBundle.BUNDLE.noSuchQueue(queueName); } securityStore.check(binding.getAddress(), CheckType.CONSUME, this); Filter filter = FilterImpl.createFilter(filterString); ServerConsumer consumer = new ServerConsumerImpl( consumerID, this, (QueueBinding) binding, filter, started, browseOnly, storageManager, callback, preAcknowledge, strictUpdateDeliveryCount, managementService, supportLargeMessage, credits); consumers.put(consumer.getID(), consumer); if (!browseOnly) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(ManagementHelper.HDR_ADDRESS, binding.getAddress()); props.putSimpleStringProperty(ManagementHelper.HDR_CLUSTER_NAME, binding.getClusterName()); props.putSimpleStringProperty(ManagementHelper.HDR_ROUTING_NAME, binding.getRoutingName()); props.putIntProperty(ManagementHelper.HDR_DISTANCE, binding.getDistance()); Queue theQueue = (Queue) binding.getBindable(); props.putIntProperty(ManagementHelper.HDR_CONSUMER_COUNT, theQueue.getConsumerCount()); // HORNETQ-946 props.putSimpleStringProperty( ManagementHelper.HDR_USER, SimpleString.toSimpleString(username)); props.putSimpleStringProperty( ManagementHelper.HDR_REMOTE_ADDRESS, SimpleString.toSimpleString(this.remotingConnection.getRemoteAddress())); props.putSimpleStringProperty( ManagementHelper.HDR_SESSION_NAME, SimpleString.toSimpleString(name)); if (filterString != null) { props.putSimpleStringProperty(ManagementHelper.HDR_FILTERSTRING, filterString); } Notification notification = new Notification(null, CONSUMER_CREATED, props); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Session with user="******", connection=" + this.remotingConnection + " created a consumer on queue " + queueName + ", filter = " + filterString); } managementService.sendNotification(notification); } }
public ClientSessionFactory connect() throws HornetQException { assertOpen(); initialise(); ClientSessionFactory csf = null; createConnectors(); try { int retryNumber = 0; while (csf == null && !isClosed()) { retryNumber++; for (Connector conn : connectors) { if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug(this + "::Submitting connect towards " + conn); } csf = conn.tryConnect(); if (csf != null) { csf.getConnection() .addFailureListener( new FailureListener() { // Case the node where the cluster connection was connected is gone, we need // to restart the // connection public void connectionFailed( HornetQException exception, boolean failedOver) { if (clusterConnection && exception.getType() == HornetQExceptionType.DISCONNECTED) { try { ServerLocatorImpl.this.start(startExecutor); } catch (Exception e) { // There isn't much to be done if this happens here HornetQLogger.LOGGER.errorStartingLocator(e); } } } @Override public String toString() { return "FailureListener('restarts cluster connections')"; } }); if (HornetQLogger.LOGGER.isDebugEnabled()) { HornetQLogger.LOGGER.debug( "Returning " + csf + " after " + retryNumber + " retries on StaticConnector " + ServerLocatorImpl.this); } return csf; } } if (initialConnectAttempts >= 0 && retryNumber > initialConnectAttempts) { break; } if (!isClosed()) { Thread.sleep(retryInterval); } } } catch (RejectedExecutionException e) { HornetQLogger.LOGGER.debug("Rejected execution", e); throw e; } catch (Exception e) { HornetQLogger.LOGGER.errorConnectingToNodes(e); throw HornetQMessageBundle.BUNDLE.cannotConnectToStaticConnectors(e); } if (!isClosed()) { HornetQLogger.LOGGER.errorConnectingToNodes(e); throw HornetQMessageBundle.BUNDLE.cannotConnectToStaticConnectors2(); } return null; }