/** This method is synchronized because we want it to be atomic with the cursors being used */ private long checkMinPage(Collection<PageSubscription> cursorList) { long minPage = Long.MAX_VALUE; for (PageSubscription cursor : cursorList) { long firstPage = cursor.getFirstPage(); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this.pagingStore.getAddress() + " has a cursor " + cursor + " with first page=" + firstPage); } // the cursor will return -1 if the cursor is empty if (firstPage >= 0 && firstPage < minPage) { minPage = firstPage; } } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug(this.pagingStore.getAddress() + " has minPage=" + minPage); } return minPage; }
public void stop(final boolean criticalError) throws Exception { if (!started) { return; } failureCheckAndFlushThread.close(criticalError); // We need to stop them accepting first so no new connections are accepted after we send the // disconnect message for (Acceptor acceptor : acceptors) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug("Pausing acceptor " + acceptor); } acceptor.pause(); } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug("Sending disconnect on live connections"); } HashSet<ConnectionEntry> connectionEntries = new HashSet<ConnectionEntry>(connections.values()); // Now we ensure that no connections will process any more packets after this method is complete // then send a disconnect packet for (ConnectionEntry entry : connectionEntries) { RemotingConnection conn = entry.connection; if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace("Sending connection.disconnection packet to " + conn); } conn.disconnect(criticalError); } for (Acceptor acceptor : acceptors) { acceptor.stop(); } acceptors.clear(); connections.clear(); if (managementService != null) { managementService.unregisterAcceptors(); } threadPool.shutdown(); if (!criticalError) { boolean ok = threadPool.awaitTermination(10000, TimeUnit.MILLISECONDS); if (!ok) { HornetQServerLogger.LOGGER.timeoutRemotingThreadPool(); } } started = false; }
public void nodeDown(final long eventUID, final String nodeID) { if (stopping) { return; } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + " receiving nodeDown for nodeID=" + nodeID, new Exception("trace")); } if (nodeID.equals(nodeManager.getNodeId().toString())) { return; } // Remove the flow record for that node MessageFlowRecord record = records.remove(nodeID); if (record != null) { try { if (isTrace) { HornetQServerLogger.LOGGER.trace("Closing clustering record " + record); } record.close(); } catch (Exception e) { HornetQServerLogger.LOGGER.errorClosingFlowRecord(e); } } }
public void markAsRollbackOnly(final HornetQException exception1) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug("Marking Transaction " + this.id + " as rollback only"); } state = State.ROLLBACK_ONLY; this.exception = exception1; }
public void stop() throws Exception { if (!started) { return; } stopping = true; if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug(this + "::stopping ClusterConnection"); } if (serverLocator != null) { serverLocator.removeClusterTopologyListener(this); } HornetQServerLogger.LOGGER.debug( "Cluster connection being stopped for node" + nodeManager.getNodeId() + ", server = " + this.server + " serverLocator = " + serverLocator); synchronized (this) { for (MessageFlowRecord record : records.values()) { try { record.close(); } catch (Exception ignore) { } } } if (managementService != null) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(new SimpleString("name"), name); Notification notification = new Notification( nodeManager.getNodeId().toString(), NotificationType.CLUSTER_CONNECTION_STOPPED, props); managementService.sendNotification(notification); } executor.execute( new Runnable() { public void run() { synchronized (ClusterConnectionImpl.this) { closeLocator(serverLocator); serverLocator = null; } } }); started = false; }
public void receiveCredits(final int credits) throws Exception { if (credits == -1) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + ":: FlowControl::Received disable flow control message"); } // No flow control availableCredits = null; // There may be messages already in the queue promptDelivery(); } else if (credits == 0) { // reset, used on slow consumers HornetQServerLogger.LOGGER.debug( this + ":: FlowControl::Received reset flow control message"); availableCredits.set(0); } else { int previous = availableCredits.getAndAdd(credits); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + "::FlowControl::Received " + credits + " credits, previous value = " + previous + " currentValue = " + availableCredits.get()); } if (previous <= 0 && previous + credits > 0) { if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( this + "::calling promptDelivery from receiving credits"); } promptDelivery(); } } }
public ServerLocatorInternal createServerLocator() { if (tcConfigs != null && tcConfigs.length > 0) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( ClusterConnectionImpl.this + "Creating a serverLocator for " + Arrays.toString(tcConfigs)); } ServerLocatorImpl locator = new ServerLocatorImpl(topology, true, tcConfigs); locator.setClusterConnection(true); return locator; } return null; }
public void createQueue( final SimpleString address, final SimpleString name, final SimpleString filterString, final boolean temporary, final boolean durable) throws Exception { if (durable) { // make sure the user has privileges to create this queue securityStore.check(address, CheckType.CREATE_DURABLE_QUEUE, this); } else { securityStore.check(address, CheckType.CREATE_NON_DURABLE_QUEUE, this); } Queue queue = server.createQueue(address, name, filterString, durable, temporary); if (temporary) { // Temporary queue in core simply means the queue will be deleted if // the remoting connection // dies. It does not mean it will get deleted automatically when the // session is closed. // It is up to the user to delete the queue when finished with it TempQueueCleanerUpper cleaner = new TempQueueCleanerUpper(server, name); remotingConnection.addCloseListener(cleaner); remotingConnection.addFailureListener(cleaner); tempQueueCleannerUppers.put(name, cleaner); } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Queue " + name + " created on address " + name + " with filter=" + filterString + " temporary = " + temporary + " durable=" + durable + " on session user="******", connection=" + this.remotingConnection); } }
private void run() { try { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug("deleting temporary queue " + bindingName); } try { server.destroyQueue(bindingName, null, false); } catch (HornetQException e) { // that's fine.. it can happen due to queue already been deleted HornetQServerLogger.LOGGER.debug(e.getMessage(), e); } } catch (Exception e) { HornetQServerLogger.LOGGER.errorRemovingTempQueue(e, bindingName); } }
public void nodeAnnounced( final long uniqueEventID, final String nodeID, final String nodeName, final Pair<TransportConfiguration, TransportConfiguration> connectorPair, final boolean backup) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + "::NodeAnnounced, backup=" + backup + nodeID + connectorPair); } TransportConfiguration live = connectorPair.getA(); TransportConfiguration backupTC = connectorPair.getB(); TopologyMemberImpl newMember = new TopologyMemberImpl(nodeID, nodeName, live, backupTC); newMember.setUniqueEventID(uniqueEventID); if (backup) { topology.updateBackup(new TopologyMemberImpl(nodeID, nodeName, live, backupTC)); } else { topology.updateMember(uniqueEventID, nodeID, newMember); } }
public synchronized PageSubscription createSubscription( long cursorID, Filter filter, boolean persistent) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this.pagingStore.getAddress() + " creating subscription " + cursorID + " with filter " + filter, new Exception("trace")); } if (activeCursors.containsKey(cursorID)) { throw new IllegalStateException("Cursor " + cursorID + " had already been created"); } PageSubscription activeCursor = new PageSubscriptionImpl( this, pagingStore, storageManager, executor, filter, cursorID, persistent); activeCursors.put(cursorID, activeCursor); return activeCursor; }
public HandleStatus handle(final MessageReference ref) throws Exception { if (availableCredits != null && availableCredits.get() <= 0) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + " is busy for the lack of credits. Current credits = " + availableCredits + " Can't receive reference " + ref); } return HandleStatus.BUSY; } // TODO - https://jira.jboss.org/browse/HORNETQ-533 // if (!writeReady.get()) // { // return HandleStatus.BUSY; // } synchronized (lock) { // If the consumer is stopped then we don't accept the message, it // should go back into the // queue for delivery later. if (!started || transferring) { return HandleStatus.BUSY; } // If there is a pendingLargeMessage we can't take another message // This has to be checked inside the lock as the set to null is done inside the lock if (largeMessageDeliverer != null) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + " is busy delivering large message " + largeMessageDeliverer + ", can't deliver reference " + ref); } return HandleStatus.BUSY; } final ServerMessage message = ref.getMessage(); if (filter != null && !filter.match(message)) { if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( "Reference " + ref + " is a noMatch on consumer " + this); } return HandleStatus.NO_MATCH; } if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace("Handling reference " + ref); } if (!browseOnly) { if (!preAcknowledge) { deliveringRefs.add(ref); } ref.handled(); ref.incrementDeliveryCount(); // If updateDeliveries = false (set by strict-update), // the updateDeliveryCount would still be updated after c if (strictUpdateDeliveryCount && !ref.isPaged()) { if (ref.getMessage().isDurable() && ref.getQueue().isDurable() && !ref.getQueue().isInternalQueue() && !ref.isPaged()) { storageManager.updateDeliveryCount(ref); } } if (preAcknowledge) { if (message.isLargeMessage()) { // we must hold one reference, or the file will be deleted before it could be delivered ((LargeServerMessage) message).incrementDelayDeletionCount(); } // With pre-ack, we ack *before* sending to the client ref.getQueue().acknowledge(ref); } } if (message.isLargeMessage()) { largeMessageDeliverer = new LargeMessageDeliverer((LargeServerMessage) message, ref); } lockDelivery.readLock().lock(); return HandleStatus.HANDLED; } }
private void createNewRecord( final long eventUID, final String targetNodeID, final TransportConfiguration connector, final SimpleString queueName, final Queue queue, final boolean start) throws Exception { String nodeId; synchronized (this) { if (!started) { return; } if (serverLocator == null) { return; } nodeId = serverLocator.getNodeID(); } final ServerLocatorInternal targetLocator = new ServerLocatorImpl(topology, true, connector); targetLocator.setReconnectAttempts(0); targetLocator.setInitialConnectAttempts(0); targetLocator.setClientFailureCheckPeriod(clientFailureCheckPeriod); targetLocator.setConnectionTTL(connectionTTL); targetLocator.setInitialConnectAttempts(0); targetLocator.setConfirmationWindowSize(confirmationWindowSize); targetLocator.setBlockOnDurableSend(!useDuplicateDetection); targetLocator.setBlockOnNonDurableSend(!useDuplicateDetection); targetLocator.setRetryInterval(retryInterval); targetLocator.setMaxRetryInterval(maxRetryInterval); targetLocator.setRetryIntervalMultiplier(retryIntervalMultiplier); targetLocator.setMinLargeMessageSize(minLargeMessageSize); // No producer flow control on the bridges, as we don't want to lock the queues targetLocator.setProducerWindowSize(-1); targetLocator.setAfterConnectionInternalListener(this); targetLocator.setNodeID(nodeId); targetLocator.setClusterTransportConfiguration( serverLocator.getClusterTransportConfiguration()); if (retryInterval > 0) { targetLocator.setRetryInterval(retryInterval); } targetLocator.disableFinalizeCheck(); targetLocator.addIncomingInterceptor( new IncomingInterceptorLookingForExceptionMessage(manager, executorFactory.getExecutor())); MessageFlowRecordImpl record = new MessageFlowRecordImpl( targetLocator, eventUID, targetNodeID, connector, queueName, queue); ClusterConnectionBridge bridge = new ClusterConnectionBridge( this, manager, targetLocator, serverLocator, reconnectAttempts, retryInterval, retryIntervalMultiplier, maxRetryInterval, nodeManager.getUUID(), record.getEventUID(), record.getTargetNodeID(), record.getQueueName(), record.getQueue(), executorFactory.getExecutor(), null, null, scheduledExecutor, null, useDuplicateDetection, clusterUser, clusterPassword, server.getStorageManager(), managementService.getManagementAddress(), managementService.getManagementNotificationAddress(), record, record.getConnector()); targetLocator.setIdentity( "(Cluster-connection-bridge::" + bridge.toString() + "::" + this.toString() + ")"); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "creating record between " + this.connector + " and " + connector + bridge); } record.setBridge(bridge); records.put(targetNodeID, record); if (start) { bridge.start(); } }
@Override public void nodeUP(final TopologyMember topologyMember, final boolean last) { if (stopping) { return; } final String nodeID = topologyMember.getNodeId(); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { String ClusterTestBase = "receiving nodeUP for nodeID="; HornetQServerLogger.LOGGER.debug( this + ClusterTestBase + nodeID + " connectionPair=" + topologyMember); } // discard notifications about ourselves unless its from our backup if (nodeID.equals(nodeManager.getNodeId().toString())) { if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( this + "::informing about backup to itself, nodeUUID=" + nodeManager.getNodeId() + ", connectorPair=" + topologyMember + ", this = " + this); } return; } // if the node is more than 1 hop away, we do not create a bridge for direct cluster connection if (allowDirectConnectionsOnly && !allowableConnections.contains(topologyMember.getLive())) { return; } // FIXME required to prevent cluster connections w/o discovery group // and empty static connectors to create bridges... ulgy! if (serverLocator == null) { return; } /*we don't create bridges to backups*/ if (topologyMember.getLive() == null) { if (isTrace) { HornetQServerLogger.LOGGER.trace( this + " ignoring call with nodeID=" + nodeID + ", topologyMember=" + topologyMember + ", last=" + last); } return; } synchronized (recordsGuard) { try { MessageFlowRecord record = records.get(nodeID); if (record == null) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + "::Creating record for nodeID=" + nodeID + ", topologyMember=" + topologyMember); } // New node - create a new flow record final SimpleString queueName = new SimpleString("sf." + name + "." + nodeID); Binding queueBinding = postOffice.getBinding(queueName); Queue queue; if (queueBinding != null) { queue = (Queue) queueBinding.getBindable(); } else { // Add binding in storage so the queue will get reloaded on startup and we can find it - // it's never // actually routed to at that address though queue = server.createQueue(queueName, queueName, null, true, false); } createNewRecord( topologyMember.getUniqueEventID(), nodeID, topologyMember.getLive(), queueName, queue, true); } else { if (isTrace) { HornetQServerLogger.LOGGER.trace( this + " ignored nodeUp record for " + topologyMember + " on nodeID=" + nodeID + " as the record already existed"); } } } catch (Exception e) { HornetQServerLogger.LOGGER.errorUpdatingTopology(e); } } }
private synchronized void activate() throws Exception { if (!started) { return; } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Activating cluster connection nodeID=" + nodeManager.getNodeId() + " for server=" + this.server); } liveNotifier = new LiveNotifier(); liveNotifier.updateAsLive(); liveNotifier.schedule(); serverLocator = clusterConnector.createServerLocator(); if (serverLocator != null) { if (!useDuplicateDetection) { HornetQServerLogger.LOGGER.debug( "DuplicateDetection is disabled, sending clustered messages blocked"); } final TopologyMember currentMember = topology.getMember(manager.getNodeId()); if (currentMember == null) { // sanity check only throw new IllegalStateException( "InternalError! The ClusterConnection doesn't know about its own node = " + this); } serverLocator.setNodeID(nodeManager.getNodeId().toString()); serverLocator.setIdentity("(main-ClusterConnection::" + server.toString() + ")"); serverLocator.setReconnectAttempts(0); serverLocator.setClusterConnection(true); serverLocator.setClusterTransportConfiguration(connector); serverLocator.setInitialConnectAttempts(-1); serverLocator.setClientFailureCheckPeriod(clientFailureCheckPeriod); serverLocator.setConnectionTTL(connectionTTL); serverLocator.setConfirmationWindowSize(confirmationWindowSize); // if not using duplicate detection, we will send blocked serverLocator.setBlockOnDurableSend(!useDuplicateDetection); serverLocator.setBlockOnNonDurableSend(!useDuplicateDetection); serverLocator.setCallTimeout(callTimeout); serverLocator.setCallFailoverTimeout(callFailoverTimeout); // No producer flow control on the bridges, as we don't want to lock the queues serverLocator.setProducerWindowSize(-1); if (retryInterval > 0) { this.serverLocator.setRetryInterval(retryInterval); } addClusterTopologyListener(this); serverLocator.setAfterConnectionInternalListener(this); serverLocator.start(server.getExecutorFactory().getExecutor()); } if (managementService != null) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(new SimpleString("name"), name); Notification notification = new Notification( nodeManager.getNodeId().toString(), NotificationType.CLUSTER_CONNECTION_STARTED, props); HornetQServerLogger.LOGGER.debug("sending notification: " + notification); managementService.sendNotification(notification); } }
/** @author <a href="mailto:[email protected]">Clebert Suconic</a> */ public final class Page implements Comparable<Page> { // Constants ----------------------------------------------------- private static final boolean isTrace = HornetQServerLogger.LOGGER.isTraceEnabled(); private static final boolean isDebug = HornetQServerLogger.LOGGER.isDebugEnabled(); public static final int SIZE_RECORD = DataConstants.SIZE_BYTE + DataConstants.SIZE_INT + DataConstants.SIZE_BYTE; private static final byte START_BYTE = (byte) '{'; private static final byte END_BYTE = (byte) '}'; // Attributes ---------------------------------------------------- private final int pageId; private boolean suspiciousRecords = false; private final AtomicInteger numberOfMessages = new AtomicInteger(0); private final SequentialFile file; private final SequentialFileFactory fileFactory; /** The page cache that will be filled with data as we write more data */ private volatile LivePageCache pageCache; private final AtomicInteger size = new AtomicInteger(0); private final StorageManager storageManager; private final SimpleString storeName; public Page( final SimpleString storeName, final StorageManager storageManager, final SequentialFileFactory factory, final SequentialFile file, final int pageId) throws Exception { this.pageId = pageId; this.file = file; fileFactory = factory; this.storageManager = storageManager; this.storeName = storeName; } public int getPageId() { return pageId; } public void setLiveCache(LivePageCache pageCache) { this.pageCache = pageCache; } public synchronized List<PagedMessage> read(StorageManager storage) throws Exception { if (isDebug) { HornetQServerLogger.LOGGER.debug( "reading page " + this.pageId + " on address = " + storeName); } if (!file.isOpen()) { throw HornetQMessageBundle.BUNDLE.invalidPageIO(); } ArrayList<PagedMessage> messages = new ArrayList<PagedMessage>(); size.set((int) file.size()); // Using direct buffer, as described on https://jira.jboss.org/browse/HORNETQ-467 ByteBuffer directBuffer = storage.allocateDirectBuffer((int) file.size()); try { file.position(0); file.read(directBuffer); directBuffer.rewind(); HornetQBuffer fileBuffer = HornetQBuffers.wrappedBuffer(directBuffer); fileBuffer.writerIndex(fileBuffer.capacity()); while (fileBuffer.readable()) { final int position = fileBuffer.readerIndex(); byte byteRead = fileBuffer.readByte(); if (byteRead == Page.START_BYTE) { if (fileBuffer.readerIndex() + DataConstants.SIZE_INT < fileBuffer.capacity()) { int messageSize = fileBuffer.readInt(); int oldPos = fileBuffer.readerIndex(); if (fileBuffer.readerIndex() + messageSize < fileBuffer.capacity() && fileBuffer.getByte(oldPos + messageSize) == Page.END_BYTE) { PagedMessage msg = new PagedMessageImpl(); msg.decode(fileBuffer); byte b = fileBuffer.readByte(); if (b != Page.END_BYTE) { // Sanity Check: This would only happen if there is a bug on decode or any internal // code, as // this // constraint was already checked throw new IllegalStateException( "Internal error, it wasn't possible to locate END_BYTE " + b); } msg.initMessage(storage); if (isTrace) { HornetQServerLogger.LOGGER.trace( "Reading message " + msg + " on pageId=" + this.pageId + " for address=" + storeName); } messages.add(msg); } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } else { markFileAsSuspect(file.getFileName(), position, messages.size()); break; } } } finally { storage.freeDirectBuffer(directBuffer); } numberOfMessages.set(messages.size()); return messages; } public synchronized void write(final PagedMessage message) throws Exception { if (!file.isOpen()) { return; } ByteBuffer buffer = fileFactory.newBuffer(message.getEncodeSize() + Page.SIZE_RECORD); HornetQBuffer wrap = HornetQBuffers.wrappedBuffer(buffer); wrap.clear(); wrap.writeByte(Page.START_BYTE); wrap.writeInt(0); int startIndex = wrap.writerIndex(); message.encode(wrap); int endIndex = wrap.writerIndex(); wrap.setInt(1, endIndex - startIndex); // The encoded length wrap.writeByte(Page.END_BYTE); buffer.rewind(); file.writeDirect(buffer, false); if (pageCache != null) { pageCache.addLiveMessage(message); } numberOfMessages.incrementAndGet(); size.addAndGet(buffer.limit()); storageManager.pageWrite(message, pageId); } public void sync() throws Exception { file.sync(); } public void open() throws Exception { if (!file.isOpen()) { file.open(); } size.set((int) file.size()); file.position(0); } public synchronized void close() throws Exception { if (storageManager != null) { storageManager.pageClosed(storeName, pageId); } if (pageCache != null) { pageCache.close(); // leave it to the soft cache to decide when to release it now pageCache = null; } file.close(); } public boolean isLive() { return pageCache != null; } public boolean delete(final PagedMessage[] messages) throws Exception { if (storageManager != null) { storageManager.pageDeleted(storeName, pageId); } if (isDebug) { HornetQServerLogger.LOGGER.debug("Deleting pageId=" + pageId + " on store " + storeName); } if (messages != null) { for (PagedMessage msg : messages) { if (msg.getMessage().isLargeMessage()) { LargeServerMessage lmsg = (LargeServerMessage) msg.getMessage(); // Remember, cannot call delete directly here // Because the large-message may be linked to another message // or it may still being delivered even though it has been acked already lmsg.decrementDelayDeletionCount(); } } } try { if (suspiciousRecords) { HornetQServerLogger.LOGGER.pageInvalid(file.getFileName(), file.getFileName()); file.renameTo(file.getFileName() + ".invalidPage"); } else { file.delete(); } return true; } catch (Exception e) { HornetQServerLogger.LOGGER.pageDeleteError(e); return false; } } public int getNumberOfMessages() { return numberOfMessages.intValue(); } public int getSize() { return size.intValue(); } @Override public String toString() { return "Page::pageID=" + this.pageId + ", file=" + this.file; } public int compareTo(Page otherPage) { return otherPage.getPageId() - this.pageId; } @Override protected void finalize() { try { if (file != null && file.isOpen()) { file.close(); } } catch (Exception e) { HornetQServerLogger.LOGGER.pageFinaliseError(e); } } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + pageId; return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Page other = (Page) obj; if (pageId != other.pageId) return false; return true; } /** * @param position * @param msgNumber */ private void markFileAsSuspect(final String fileName, final int position, final int msgNumber) { HornetQServerLogger.LOGGER.pageSuspectFile(fileName, position, msgNumber); suspiciousRecords = true; } public SequentialFile getFile() { return file; } }
public synchronized void onMessage(final ClientMessage message) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "ClusterCommunication::Flow record on " + clusterConnector + " Receiving message " + message); } try { // Reset the bindings if (message.containsProperty(PostOfficeImpl.HDR_RESET_QUEUE_DATA)) { clearBindings(); firstReset = true; return; } if (!firstReset) { return; } // TODO - optimised this by just passing int in header - but filter needs to be extended to // support IN with // a list of integers SimpleString type = message.getSimpleStringProperty(ManagementHelper.HDR_NOTIFICATION_TYPE); NotificationType ntype = NotificationType.valueOf(type.toString()); switch (ntype) { case BINDING_ADDED: { doBindingAdded(message); break; } case BINDING_REMOVED: { doBindingRemoved(message); break; } case CONSUMER_CREATED: { doConsumerCreated(message); break; } case CONSUMER_CLOSED: { doConsumerClosed(message); break; } case PROPOSAL: { doProposalReceived(message); break; } case PROPOSAL_RESPONSE: { doProposalResponseReceived(message); break; } default: { throw HornetQMessageBundle.BUNDLE.invalidType(ntype); } } } catch (Exception e) { HornetQServerLogger.LOGGER.errorHandlingMessage(e); } }
public void cleanup() { ArrayList<Page> depagedPages = new ArrayList<Page>(); while (true) { if (pagingStore.lock(100)) { break; } if (!pagingStore.isStarted()) return; } synchronized (this) { try { if (!pagingStore.isStarted()) { return; } if (pagingStore.getNumberOfPages() == 0) { return; } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Asserting cleanup for address " + this.pagingStore.getAddress()); } ArrayList<PageSubscription> cursorList = cloneSubscriptions(); long minPage = checkMinPage(cursorList); // if the current page is being written... // on that case we need to move to verify it in a different way if (minPage == pagingStore.getCurrentWritingPage() && pagingStore.getCurrentPage().getNumberOfMessages() > 0) { boolean complete = true; for (PageSubscription cursor : cursorList) { if (!cursor.isComplete(minPage)) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Cursor " + cursor + " was considered incomplete at page " + minPage); } complete = false; break; } else { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Cursor " + cursor + "was considered **complete** at page " + minPage); } } } if (!pagingStore.isStarted()) { return; } // All the pages on the cursor are complete.. so we will cleanup everything and store a // bookmark if (complete) { if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Address " + pagingStore.getAddress() + " is leaving page mode as all messages are consumed and acknowledged from the page store"); } pagingStore.forceAnotherPage(); Page currentPage = pagingStore.getCurrentPage(); storeBookmark(cursorList, currentPage); pagingStore.stopPaging(); } } for (long i = pagingStore.getFirstPage(); i < minPage; i++) { Page page = pagingStore.depage(); if (page == null) { break; } depagedPages.add(page); } if (pagingStore.getNumberOfPages() == 0 || pagingStore.getNumberOfPages() == 1 && pagingStore.getCurrentPage().getNumberOfMessages() == 0) { pagingStore.stopPaging(); } else { if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( "Couldn't cleanup page on address " + this.pagingStore.getAddress() + " as numberOfPages == " + pagingStore.getNumberOfPages() + " and currentPage.numberOfMessages = " + pagingStore.getCurrentPage().getNumberOfMessages()); } } } catch (Exception ex) { HornetQServerLogger.LOGGER.problemCleaningPageAddress(ex, pagingStore.getAddress()); return; } finally { pagingStore.unlock(); } } try { for (Page depagedPage : depagedPages) { PageCache cache; PagedMessage[] pgdMessages; synchronized (softCache) { cache = softCache.get((long) depagedPage.getPageId()); } if (isTrace) { HornetQServerLogger.LOGGER.trace( "Removing page " + depagedPage.getPageId() + " from page-cache"); } if (cache == null) { // The page is not on cache any more // We need to read the page-file before deleting it // to make sure we remove any large-messages pending storageManager.beforePageRead(); List<PagedMessage> pgdMessagesList = null; try { depagedPage.open(); pgdMessagesList = depagedPage.read(storageManager); } finally { try { depagedPage.close(); } catch (Exception e) { } storageManager.afterPageRead(); } depagedPage.close(); pgdMessages = pgdMessagesList.toArray(new PagedMessage[pgdMessagesList.size()]); } else { pgdMessages = cache.getMessages(); } depagedPage.delete(pgdMessages); onDeletePage(depagedPage); synchronized (softCache) { softCache.remove((long) depagedPage.getPageId()); } } } catch (Exception ex) { HornetQServerLogger.LOGGER.problemCleaningPageAddress(ex, pagingStore.getAddress()); return; } }
private void deployClusterConnection(final ClusterConnectionConfiguration config) throws Exception { if (config.getName() == null) { HornetQServerLogger.LOGGER.clusterConnectionNotUnique(); return; } if (config.getAddress() == null) { HornetQServerLogger.LOGGER.clusterConnectionNoForwardAddress(); return; } TransportConfiguration connector = configuration.getConnectorConfigurations().get(config.getConnectorName()); if (connector == null) { HornetQServerLogger.LOGGER.clusterConnectionNoConnector(config.getConnectorName()); return; } if (clusterConnections.containsKey(config.getName())) { HornetQServerLogger.LOGGER.clusterConnectionAlreadyExists(config.getConnectorName()); return; } ClusterConnectionImpl clusterConnection; DiscoveryGroupConfiguration dg; if (config.getDiscoveryGroupName() != null) { dg = configuration.getDiscoveryGroupConfigurations().get(config.getDiscoveryGroupName()); if (dg == null) { HornetQServerLogger.LOGGER.clusterConnectionNoDiscoveryGroup( config.getDiscoveryGroupName()); return; } if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + " Starting a Discovery Group Cluster Connection, name=" + config.getDiscoveryGroupName() + ", dg=" + dg); } clusterConnection = new ClusterConnectionImpl( this, dg, connector, new SimpleString(config.getName()), new SimpleString(config.getAddress()), config.getMinLargeMessageSize(), config.getClientFailureCheckPeriod(), config.getConnectionTTL(), config.getRetryInterval(), config.getRetryIntervalMultiplier(), config.getMaxRetryInterval(), config.getReconnectAttempts(), config.getCallTimeout(), config.getCallFailoverTimeout(), config.isDuplicateDetection(), config.isForwardWhenNoConsumers(), config.getConfirmationWindowSize(), executorFactory, threadPool, server, postOffice, managementService, scheduledExecutor, config.getMaxHops(), nodeManager, backup, server.getConfiguration().getClusterUser(), server.getConfiguration().getClusterPassword(), config.isAllowDirectConnectionsOnly(), config.getClusterNotificationInterval(), config.getClusterNotificationAttempts()); } else { TransportConfiguration[] tcConfigs = config.getStaticConnectors() != null ? connectorNameListToArray(config.getStaticConnectors()) : null; if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( this + " defining cluster connection towards " + Arrays.toString(tcConfigs)); } clusterConnection = new ClusterConnectionImpl( this, tcConfigs, connector, new SimpleString(config.getName()), new SimpleString(config.getAddress()), config.getMinLargeMessageSize(), config.getClientFailureCheckPeriod(), config.getConnectionTTL(), config.getRetryInterval(), config.getRetryIntervalMultiplier(), config.getMaxRetryInterval(), config.getReconnectAttempts(), config.getCallTimeout(), config.getCallFailoverTimeout(), config.isDuplicateDetection(), config.isForwardWhenNoConsumers(), config.getConfirmationWindowSize(), executorFactory, threadPool, server, postOffice, managementService, scheduledExecutor, config.getMaxHops(), nodeManager, backup, server.getConfiguration().getClusterUser(), server.getConfiguration().getClusterPassword(), config.isAllowDirectConnectionsOnly(), config.getClusterNotificationInterval(), config.getClusterNotificationAttempts()); } if (defaultClusterConnection == null) { defaultClusterConnection = clusterConnection; } managementService.registerCluster(clusterConnection, config); clusterConnections.put(config.getName(), clusterConnection); if (HornetQServerLogger.LOGGER.isTraceEnabled()) { HornetQServerLogger.LOGGER.trace( "ClusterConnection.start at " + clusterConnection, new Exception("trace")); } }
public void createConsumer( final long consumerID, final SimpleString queueName, final SimpleString filterString, final boolean browseOnly, final boolean supportLargeMessage, final Integer credits) throws Exception { Binding binding = postOffice.getBinding(queueName); if (binding == null || binding.getType() != BindingType.LOCAL_QUEUE) { throw HornetQMessageBundle.BUNDLE.noSuchQueue(queueName); } securityStore.check(binding.getAddress(), CheckType.CONSUME, this); Filter filter = FilterImpl.createFilter(filterString); ServerConsumer consumer = new ServerConsumerImpl( consumerID, this, (QueueBinding) binding, filter, started, browseOnly, storageManager, callback, preAcknowledge, strictUpdateDeliveryCount, managementService, supportLargeMessage, credits); consumers.put(consumer.getID(), consumer); if (!browseOnly) { TypedProperties props = new TypedProperties(); props.putSimpleStringProperty(ManagementHelper.HDR_ADDRESS, binding.getAddress()); props.putSimpleStringProperty(ManagementHelper.HDR_CLUSTER_NAME, binding.getClusterName()); props.putSimpleStringProperty(ManagementHelper.HDR_ROUTING_NAME, binding.getRoutingName()); props.putIntProperty(ManagementHelper.HDR_DISTANCE, binding.getDistance()); Queue theQueue = (Queue) binding.getBindable(); props.putIntProperty(ManagementHelper.HDR_CONSUMER_COUNT, theQueue.getConsumerCount()); // HORNETQ-946 props.putSimpleStringProperty( ManagementHelper.HDR_USER, SimpleString.toSimpleString(username)); props.putSimpleStringProperty( ManagementHelper.HDR_REMOTE_ADDRESS, SimpleString.toSimpleString(this.remotingConnection.getRemoteAddress())); props.putSimpleStringProperty( ManagementHelper.HDR_SESSION_NAME, SimpleString.toSimpleString(name)); if (filterString != null) { props.putSimpleStringProperty(ManagementHelper.HDR_FILTERSTRING, filterString); } Notification notification = new Notification(null, CONSUMER_CREATED, props); if (HornetQServerLogger.LOGGER.isDebugEnabled()) { HornetQServerLogger.LOGGER.debug( "Session with user="******", connection=" + this.remotingConnection + " created a consumer on queue " + queueName + ", filter = " + filterString); } managementService.sendNotification(notification); } }