/** {@inheritDoc} */ @Override public int deliverMessageToSubscriptions(MessageDeliveryInfo messageDeliveryInfo) throws AndesException { Set<DeliverableAndesMetadata> messages = messageDeliveryInfo.getReadButUndeliveredMessages(); int sentMessageCount = 0; Iterator<DeliverableAndesMetadata> iterator = messages.iterator(); List<DeliverableAndesMetadata> droppedTopicMessagesList = new ArrayList<>(); /** * get all relevant type of subscriptions. This call does NOT return hierarchical subscriptions * for the destination. There are duplicated messages for each different subscribed destination. * For durable topic subscriptions this should return queue subscription bound to unique queue * based on subscription id */ Collection<LocalSubscription> subscriptions = subscriptionEngine.getActiveLocalSubscribers( messageDeliveryInfo.getDestination(), messageDeliveryInfo.getProtocolType(), messageDeliveryInfo.getDestinationType()); while (iterator.hasNext()) { try { DeliverableAndesMetadata message = iterator.next(); // All subscription filtering logic for topics goes here List<LocalSubscription> subscriptionsToDeliver = new ArrayList<>(); for (LocalSubscription subscription : subscriptions) { /* * If this is a topic message, remove all durable topic subscriptions here * because durable topic subscriptions will get messages via queue path. * Also need to consider the arrival time of the message. Only topic * subscribers which appeared before publishing this message should receive it */ if (subscription.isDurable() || (subscription.getSubscribeTime() > message.getArrivalTime())) { continue; } // Avoid sending if the selector of subscriber does not match if (!subscription.isMessageAcceptedBySelector(message)) { continue; } subscriptionsToDeliver.add(subscription); } if (subscriptionsToDeliver.size() == 0) { iterator.remove(); // remove buffer droppedTopicMessagesList.add(message); continue; // skip this iteration if no subscriptions for the message } /** * For normal non-durable topic we pre evaluate room for all subscribers and if all subs has * room to accept messages we send them. This means we operate to the speed of slowest * subscriber (to prevent OOM). If it is too slow to make others fast, make that topic * subscriber a durable topic subscriber. */ boolean allTopicSubscriptionsHasRoom = true; for (LocalSubscription subscription : subscriptionsToDeliver) { if (!subscription.hasRoomToAcceptMessages()) { allTopicSubscriptionsHasRoom = false; break; } } if (allTopicSubscriptionsHasRoom) { message.markAsScheduledToDeliver(subscriptionsToDeliver); // schedule message to all subscribers for (LocalSubscription localSubscription : subscriptionsToDeliver) { MessageFlusher.getInstance().deliverMessageAsynchronously(localSubscription, message); } iterator.remove(); if (log.isDebugEnabled()) { log.debug( "Removing Scheduled to send message from buffer. MsgId= " + message.getMessageID()); } sentMessageCount++; } else { if (log.isDebugEnabled()) { log.debug( "Some subscriptions for destination " + messageDeliveryInfo.getDestination() + " have max unacked messages " + message.getDestination()); } // if we continue message order will break break; } } catch (NoSuchElementException ex) { // This exception can occur because the iterator of ConcurrentSkipListSet loads the // at-the-time snapshot. // Some records could be deleted by the time the iterator reaches them. // However, this can only happen at the tail of the collection, not in middle, and it would // cause the loop // to blindly check for a batch of deleted records. // Given this situation, this loop should break so the sendFlusher can re-trigger it. // for tracing purposes can use this : log.warn("NoSuchElementException thrown",ex); log.warn("NoSuchElementException thrown. ", ex); break; } } /** * Here we do not need to have orphaned slot scenario (return slot). If there are no subscribers * slot will be consumed and metadata will be removed. We duplicate topic messages per node */ /** * delete topic messages that were dropped due to no subscriptions for the message and due to * has no room to enqueue the message. Delete call is blocking and then slot message count is * dropped in order */ MessagingEngine.getInstance().deleteMessages(droppedTopicMessagesList); return sentMessageCount; }
public void run() { SubscriptionStore subscriptionStore = MessagingEngine.getInstance().getSubscriptionStore(); int queueWorkerWaitTime = ClusterResourceHolder.getInstance().getClusterConfiguration().getQueueWorkerInterval(); int repeatedSleepingCounter = 0; while (true) { if (running) { try { /** * Steps * * <p>1)Poll Global queue and get chunk of messages 2) Put messages one by one to node * queues and delete them */ QueueAddress sourceQueueAddress = new QueueAddress(QueueAddress.QueueType.GLOBAL_QUEUE, globalQueueName); List<AndesMessageMetadata> messageList = messageStore.getNextNMessageMetadataFromQueue( sourceQueueAddress, lastProcessedMessageID, messageCountToReadFromCasssandra); while (messageList.size() != 0) { Iterator<AndesMessageMetadata> metadataIterator = messageList.iterator(); while (metadataIterator.hasNext()) { AndesMessageMetadata metadata = metadataIterator.next(); /** * check if the cluster has some subscriptions for that message and distribute to * relevant node queues */ String destinationQueue = metadata.getDestination(); Random random = new Random(); // TODO remove this list to set conversion List<String> nodeQueuesHavingSubscriptionsForQueue = new ArrayList<String>( subscriptionStore.getNodeQueuesHavingSubscriptionsForQueue(destinationQueue)); if (nodeQueuesHavingSubscriptionsForQueue != null && nodeQueuesHavingSubscriptionsForQueue.size() > 0) { int index = random.nextInt(nodeQueuesHavingSubscriptionsForQueue.size()); String nodeQueue = nodeQueuesHavingSubscriptionsForQueue.get(index); metadata.queueAddress = new QueueAddress(QueueAddress.QueueType.QUEUE_NODE_QUEUE, nodeQueue); // if (log.isDebugEnabled()) { String msgID = (String) metadata.getMessageHeader("msgID"); log.info( "TRACING>> GQW " + globalQueueName + ">> copying message-" + (msgID == null ? "" : msgID) + " to " + nodeQueue + " message ID: " + metadata.getMessageID()); // } } else { // if there is no node queue to move message we skip metadataIterator.remove(); } lastProcessedMessageID = metadata.getMessageID(); } messageStore.moveMessageMetaData(sourceQueueAddress, null, messageList); PerformanceCounter.recordGlobalQueueMsgMove(messageList.size()); messageList = messageStore.getNextNMessageMetadataFromQueue( sourceQueueAddress, lastProcessedMessageID, messageCountToReadFromCasssandra); } try { Thread.sleep(queueWorkerWaitTime); repeatedSleepingCounter++; if (repeatedSleepingCounter > 1) { resetMessageReading(); } } catch (InterruptedException e) { // ignore } } catch (Exception e) { log.error("Error in moving messages from global queue to node queue", e); } } else { try { Thread.sleep(2000); } catch (InterruptedException e) { // silently ignore } } } }