private Message<?> getAMessageFrom(KafkaMessageDrivenChannelAdapter adapter, Method toMessage) throws Exception { KafkaMessageMetadata meta = mock(KafkaMessageMetadata.class); Partition partition = mock(Partition.class); when(partition.getTopic()).thenReturn("topic"); when(partition.getId()).thenReturn(42); when(meta.getPartition()).thenReturn(partition); when(meta.getOffset()).thenReturn(1L); when(meta.getNextOffset()).thenReturn(2L); Acknowledgment ack = mock(Acknowledgment.class); return (Message<?>) toMessage.invoke(adapter, "foo", "bar", meta, ack); }
private Binding<MessageChannel> createKafkaConsumer( String name, final MessageChannel moduleInputChannel, Properties properties, String group, long referencePoint) { validateConsumerProperties(groupedName(name, group), properties, SUPPORTED_CONSUMER_PROPERTIES); KafkaPropertiesAccessor accessor = new KafkaPropertiesAccessor(properties); int maxConcurrency = accessor.getConcurrency(defaultConcurrency); String topic = escapeTopicName(name); int numPartitions = accessor.getNumberOfKafkaPartitionsForConsumer(); Collection<Partition> allPartitions = ensureTopicCreated(topic, numPartitions, defaultReplicationFactor); Decoder<byte[]> valueDecoder = new DefaultDecoder(null); Decoder<byte[]> keyDecoder = new DefaultDecoder(null); Collection<Partition> listenedPartitions; int moduleCount = accessor.getCount(); if (moduleCount == 1) { listenedPartitions = allPartitions; } else { listenedPartitions = new ArrayList<Partition>(); for (Partition partition : allPartitions) { // divide partitions across modules if (accessor.getPartitionIndex() != -1) { if ((partition.getId() % moduleCount) == accessor.getPartitionIndex()) { listenedPartitions.add(partition); } } else { int moduleSequence = accessor.getSequence(); if (moduleCount == 0) { throw new IllegalArgumentException( "The Kafka transport does not support 0-count modules"); } else { // sequence numbers are zero-based if ((partition.getId() % moduleCount) == (moduleSequence - 1)) { listenedPartitions.add(partition); } } } } } ReceivingHandler rh = new ReceivingHandler(); rh.setOutputChannel(moduleInputChannel); final FixedSubscriberChannel bridge = new FixedSubscriberChannel(rh); bridge.setBeanName("bridge." + name); final KafkaMessageListenerContainer messageListenerContainer = createMessageListenerContainer( accessor, group, maxConcurrency, listenedPartitions, referencePoint); final KafkaMessageDrivenChannelAdapter kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter(messageListenerContainer); kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory()); kafkaMessageDrivenChannelAdapter.setKeyDecoder(keyDecoder); kafkaMessageDrivenChannelAdapter.setPayloadDecoder(valueDecoder); kafkaMessageDrivenChannelAdapter.setOutputChannel(bridge); kafkaMessageDrivenChannelAdapter.setAutoCommitOffset( accessor.getDefaultAutoCommitEnabled(this.defaultAutoCommitEnabled)); kafkaMessageDrivenChannelAdapter.afterPropertiesSet(); kafkaMessageDrivenChannelAdapter.start(); EventDrivenConsumer edc = new EventDrivenConsumer(bridge, rh) { @Override protected void doStop() { // stop the offset manager and the channel adapter before unbinding // this means that the upstream channel adapter has a chance to stop kafkaMessageDrivenChannelAdapter.stop(); if (messageListenerContainer.getOffsetManager() instanceof DisposableBean) { try { ((DisposableBean) messageListenerContainer.getOffsetManager()).destroy(); } catch (Exception e) { logger.error("Error while closing the offset manager", e); } } super.doStop(); } }; String groupedName = groupedName(name, group); edc.setBeanName("inbound." + groupedName); Binding<MessageChannel> consumerBinding = Binding.forConsumer(name, group, edc, moduleInputChannel, accessor); addBinding(consumerBinding); consumerBinding.start(); return consumerBinding; }