private void doRegisterConsumer( String name, MessageChannel moduleInputChannel, Queue queue, RabbitPropertiesAccessor properties, boolean isPubSub) { // Fix for XD-2503 // Temporarily overrides the thread context classloader with the one where the // SimpleMessageListenerContainer // is defined // This allows for the proxying that happens while initializing the // SimpleMessageListenerContainer to work // correctly ClassLoader originalClassloader = Thread.currentThread().getContextClassLoader(); try { ClassUtils.overrideThreadContextClassLoader( SimpleMessageListenerContainer.class.getClassLoader()); SimpleMessageListenerContainer listenerContainer = new SimpleMessageListenerContainer(this.connectionFactory); listenerContainer.setAcknowledgeMode( properties.getAcknowledgeMode(this.defaultAcknowledgeMode)); listenerContainer.setChannelTransacted( properties.getTransacted(this.defaultChannelTransacted)); listenerContainer.setDefaultRequeueRejected( properties.getRequeueRejected(this.defaultDefaultRequeueRejected)); if (!isPubSub) { int concurrency = properties.getConcurrency(this.defaultConcurrency); concurrency = concurrency > 0 ? concurrency : 1; listenerContainer.setConcurrentConsumers(concurrency); int maxConcurrency = properties.getMaxConcurrency(this.defaultMaxConcurrency); if (maxConcurrency > concurrency) { listenerContainer.setMaxConcurrentConsumers(maxConcurrency); } } listenerContainer.setPrefetchCount(properties.getPrefetchCount(this.defaultPrefetchCount)); listenerContainer.setTxSize(properties.getTxSize(this.defaultTxSize)); listenerContainer.setTaskExecutor(new SimpleAsyncTaskExecutor(queue.getName() + "-")); listenerContainer.setQueues(queue); int maxAttempts = properties.getMaxAttempts(this.defaultMaxAttempts); if (maxAttempts > 1 || properties.getRepublishToDLQ(this.defaultRepublishToDLQ)) { RetryOperationsInterceptor retryInterceptor = RetryInterceptorBuilder.stateless() .maxAttempts(maxAttempts) .backOffOptions( properties.getBackOffInitialInterval(this.defaultBackOffInitialInterval), properties.getBackOffMultiplier(this.defaultBackOffMultiplier), properties.getBackOffMaxInterval(this.defaultBackOffMaxInterval)) .recoverer(determineRecoverer(name, properties)) .build(); listenerContainer.setAdviceChain(new Advice[] {retryInterceptor}); } listenerContainer.setAfterReceivePostProcessors(this.decompressingPostProcessor); listenerContainer.setMessagePropertiesConverter( RabbitMessageChannelBinder.inboundMessagePropertiesConverter); listenerContainer.afterPropertiesSet(); AmqpInboundChannelAdapter adapter = new AmqpInboundChannelAdapter(listenerContainer); adapter.setBeanFactory(this.getBeanFactory()); DirectChannel bridgeToModuleChannel = new DirectChannel(); bridgeToModuleChannel.setBeanFactory(this.getBeanFactory()); bridgeToModuleChannel.setBeanName(name + ".bridge"); adapter.setOutputChannel(bridgeToModuleChannel); adapter.setBeanName("inbound." + name); DefaultAmqpHeaderMapper mapper = new DefaultAmqpHeaderMapper(); mapper.setRequestHeaderNames( properties.getRequestHeaderPattens(this.defaultRequestHeaderPatterns)); mapper.setReplyHeaderNames(properties.getReplyHeaderPattens(this.defaultReplyHeaderPatterns)); adapter.setHeaderMapper(mapper); adapter.afterPropertiesSet(); Binding consumerBinding = Binding.forConsumer(name, adapter, moduleInputChannel, properties); addBinding(consumerBinding); ReceivingHandler convertingBridge = new ReceivingHandler(); convertingBridge.setOutputChannel(moduleInputChannel); convertingBridge.setBeanName(name + ".convert.bridge"); convertingBridge.afterPropertiesSet(); bridgeToModuleChannel.subscribe(convertingBridge); consumerBinding.start(); } finally { Thread.currentThread().setContextClassLoader(originalClassloader); } }
private Binding<MessageChannel> createKafkaConsumer( String name, final MessageChannel moduleInputChannel, Properties properties, String group, long referencePoint) { validateConsumerProperties(groupedName(name, group), properties, SUPPORTED_CONSUMER_PROPERTIES); KafkaPropertiesAccessor accessor = new KafkaPropertiesAccessor(properties); int maxConcurrency = accessor.getConcurrency(defaultConcurrency); String topic = escapeTopicName(name); int numPartitions = accessor.getNumberOfKafkaPartitionsForConsumer(); Collection<Partition> allPartitions = ensureTopicCreated(topic, numPartitions, defaultReplicationFactor); Decoder<byte[]> valueDecoder = new DefaultDecoder(null); Decoder<byte[]> keyDecoder = new DefaultDecoder(null); Collection<Partition> listenedPartitions; int moduleCount = accessor.getCount(); if (moduleCount == 1) { listenedPartitions = allPartitions; } else { listenedPartitions = new ArrayList<Partition>(); for (Partition partition : allPartitions) { // divide partitions across modules if (accessor.getPartitionIndex() != -1) { if ((partition.getId() % moduleCount) == accessor.getPartitionIndex()) { listenedPartitions.add(partition); } } else { int moduleSequence = accessor.getSequence(); if (moduleCount == 0) { throw new IllegalArgumentException( "The Kafka transport does not support 0-count modules"); } else { // sequence numbers are zero-based if ((partition.getId() % moduleCount) == (moduleSequence - 1)) { listenedPartitions.add(partition); } } } } } ReceivingHandler rh = new ReceivingHandler(); rh.setOutputChannel(moduleInputChannel); final FixedSubscriberChannel bridge = new FixedSubscriberChannel(rh); bridge.setBeanName("bridge." + name); final KafkaMessageListenerContainer messageListenerContainer = createMessageListenerContainer( accessor, group, maxConcurrency, listenedPartitions, referencePoint); final KafkaMessageDrivenChannelAdapter kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter(messageListenerContainer); kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory()); kafkaMessageDrivenChannelAdapter.setKeyDecoder(keyDecoder); kafkaMessageDrivenChannelAdapter.setPayloadDecoder(valueDecoder); kafkaMessageDrivenChannelAdapter.setOutputChannel(bridge); kafkaMessageDrivenChannelAdapter.setAutoCommitOffset( accessor.getDefaultAutoCommitEnabled(this.defaultAutoCommitEnabled)); kafkaMessageDrivenChannelAdapter.afterPropertiesSet(); kafkaMessageDrivenChannelAdapter.start(); EventDrivenConsumer edc = new EventDrivenConsumer(bridge, rh) { @Override protected void doStop() { // stop the offset manager and the channel adapter before unbinding // this means that the upstream channel adapter has a chance to stop kafkaMessageDrivenChannelAdapter.stop(); if (messageListenerContainer.getOffsetManager() instanceof DisposableBean) { try { ((DisposableBean) messageListenerContainer.getOffsetManager()).destroy(); } catch (Exception e) { logger.error("Error while closing the offset manager", e); } } super.doStop(); } }; String groupedName = groupedName(name, group); edc.setBeanName("inbound." + groupedName); Binding<MessageChannel> consumerBinding = Binding.forConsumer(name, group, edc, moduleInputChannel, accessor); addBinding(consumerBinding); consumerBinding.start(); return consumerBinding; }