private ConsumerConfig getConsumerConfig(DestinationConfiguration destination) { destination.getUrl(); String queue; String zkHost = properties.get("broker.zk.servers"); String zkRoot = properties.get("broker.zk.root"); ZkHosts zkHosts = new ZkHosts(zkHost, zkRoot); if (!destination.isGrouped()) { queue = destination.getSite() + "." + destination.getSensor() + "." + destination.getSensorId() + "." + destination.getProperty("topic"); } else { queue = destination.getSite() + "." + destination.getSensor() + "." + destination.getProperty("topic"); } ConsumerConfig consumerConfig = new ConsumerConfig(zkHosts, queue, "/iot/broker", queue); Iterable<String> iterable = Splitter.on(",").split(zkHost); Iterator<String> it = iterable.iterator(); consumerConfig.zkServers = new ArrayList<String>(); while (it.hasNext()) { consumerConfig.zkServers.add(it.next()); } return consumerConfig; }
private void connectZk() { Xnd.logConsumer("连接ZK"); logger.info("Connecting to zookeeper instance at " + config.getZkConnect()); this.zkClient = new ZkClient( config.getZkConnect(), config.getZkSessionTimeoutMs(), config.getZkConnectionTimeoutMs()); logger.info("Connected to zookeeper at " + config.getZkConnect()); }
public SimpleMessageConsumer( final MetaMessageSessionFactory messageSessionFactory, final RemotingClientWrapper remotingClient, final ConsumerConfig consumerConfig, final ConsumerZooKeeper consumerZooKeeper, final ProducerZooKeeper producerZooKeeper, final SubscribeInfoManager subscribeInfoManager, final RecoverManager recoverManager, final OffsetStorage offsetStorage, final LoadBalanceStrategy loadBalanceStrategy) { super(); this.messageSessionFactory = messageSessionFactory; this.remotingClient = remotingClient; this.consumerConfig = consumerConfig; this.producerZooKeeper = producerZooKeeper; this.consumerZooKeeper = consumerZooKeeper; this.offsetStorage = offsetStorage; this.subscribeInfoManager = subscribeInfoManager; this.recoverStorageManager = recoverManager; this.fetchManager = new SimpleFetchManager(consumerConfig, this); this.scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); this.loadBalanceStrategy = loadBalanceStrategy; this.scheduledExecutorService.scheduleAtFixedRate( new Runnable() { @Override public void run() { SimpleMessageConsumer.this.consumerZooKeeper.commitOffsets( SimpleMessageConsumer.this.fetchManager); } }, consumerConfig.getCommitOffsetPeriodInMills(), consumerConfig.getCommitOffsetPeriodInMills(), TimeUnit.MILLISECONDS); }
public ZookeeperTopicEventWatcher( ConsumerConfig consumerConfig, TopicEventHandler<String> eventHandler, ServerStartable serverStartable) { super(); this.eventHandler = eventHandler; this.serverStartable = serverStartable; this.zkClient = new ZkClient( consumerConfig.getZkConnect(), consumerConfig.getZkSessionTimeoutMs(), consumerConfig.getZkConnectionTimeoutMs(), ZkStringSerializer.getInstance()); startWatchingTopicEvents(); }
public void close() throws IOException { if (isShuttingDown.compareAndSet(false, true)) { logger.info("ZkConsumerConnector shutting down"); try { scheduler.shutdown(); if (fetcher != null) { fetcher.stopConnectionsToAllBrokers(); } sendShutdownToAllQueues(); if (config.isAutoCommit()) { commitOffsets(); } // waiting rebalance listener to closed and then shutdown the zkclient for (ZKRebalancerListener<?> listener : this.rebalancerListeners) { Closer.closeQuietly(listener); } if (this.zkClient != null) { this.zkClient.close(); zkClient = null; } } catch (Exception e) { logger.error("error during consumer connector shutdown", e); } logger.info("ZkConsumerConnector shut down completed"); } }
public void commitOffsets() { Xnd.logConsumer("AutoCommitTask自动提交执行..."); if (zkClient == null) { logger.error("zk client is null. Cannot commit offsets"); return; } for (Entry<String, Pool<Partition, PartitionTopicInfo>> e : topicRegistry.entrySet()) { ZkGroupTopicDirs topicDirs = new ZkGroupTopicDirs(config.getGroupId(), e.getKey()); // for (PartitionTopicInfo info : e.getValue().values()) { final long lastChanged = info.getConsumedOffsetChanged().get(); if (lastChanged == 0) { logger.trace("consume offset not changed"); continue; } final long newOffset = info.getConsumedOffset(); // path: /consumers/<group>/offsets/<topic>/<brokerid-partition> final String path = topicDirs.consumerOffsetDir + "/" + info.partition.getName(); try { ZkUtils.updatePersistentPath(zkClient, path, "" + newOffset); } catch (Throwable t) { logger.warn("exception during commitOffsets, path=" + path + ",offset=" + newOffset, t); } finally { info.resetComsumedOffsetChanged(lastChanged); if (logger.isDebugEnabled()) { logger.debug("Committed [" + path + "] for topic " + info); } } } // } }
public ZookeeperConsumerConnector(ConsumerConfig config, boolean enableFetcher) { Xnd.logConsumer("创建ZookeeperConsumerConnector连接对象"); this.config = config; this.enableFetcher = enableFetcher; // this.topicRegistry = new Pool<String, Pool<Partition, PartitionTopicInfo>>(); this.queues = new Pool<StringTuple, BlockingQueue<FetchedDataChunk>>(); // connectZk(); createFetcher(); if (this.config.isAutoCommit()) { logger.info("starting auto committer every " + config.getAutoCommitIntervalMs() + " ms"); Xnd.logConsumer("启动自动commit任务"); scheduler.scheduleWithRate( new AutoCommitTask(), config.getAutoCommitIntervalMs(), config.getAutoCommitIntervalMs()); } }
private KafkaConsumer( ConsumerConfig config, ConsumerRebalanceCallback callback, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer) { log.trace("Starting the Kafka consumer"); subscribedTopics = new HashSet<String>(); subscribedPartitions = new HashSet<TopicPartition>(); this.metrics = new Metrics( new MetricConfig(), Collections.singletonList((MetricsReporter) new JmxReporter("kafka.consumer.")), new SystemTime()); this.metadataFetchTimeoutMs = config.getLong(ConsumerConfig.METADATA_FETCH_TIMEOUT_CONFIG); this.totalMemorySize = config.getLong(ConsumerConfig.TOTAL_BUFFER_MEMORY_CONFIG); List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses( config.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)); if (keyDeserializer == null) this.keyDeserializer = config.getConfiguredInstance( ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); else { this.keyDeserializer = keyDeserializer; } if (valueDeserializer == null) this.valueDeserializer = config.getConfiguredInstance( ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); else { this.valueDeserializer = valueDeserializer; } config.logUnused(); log.debug("Kafka consumer started"); }
private <T> Map<String, List<MessageStream<T>>> consume( Map<String, Integer> topicCountMap, Decoder<T> decoder) { if (topicCountMap == null) { throw new IllegalArgumentException("topicCountMap is null"); } // ZkGroupDirs dirs = new ZkGroupDirs(config.getGroupId()); Map<String, List<MessageStream<T>>> ret = new HashMap<String, List<MessageStream<T>>>(); String consumerUuid = config.getConsumerId(); if (consumerUuid == null) { consumerUuid = generateConsumerId(); } logger.info( format( "create message stream by consumerid [%s] with groupid [%s]", consumerUuid, config.getGroupId())); // // consumerIdString => groupid_consumerid final String consumerIdString = config.getGroupId() + "_" + consumerUuid; final TopicCount topicCount = new TopicCount(consumerIdString, topicCountMap); // 查询一个主题消费者数 // 遍历主题和消费者数 for (Map.Entry<String, Set<String>> e : topicCount.getConsumerThreadIdsPerTopic().entrySet()) { final String topic = e.getKey(); final Set<String> threadIdSet = e.getValue(); final List<MessageStream<T>> streamList = new ArrayList<MessageStream<T>>(); for (String threadId : threadIdSet) { LinkedBlockingQueue<FetchedDataChunk> stream = new LinkedBlockingQueue<FetchedDataChunk>(config.getMaxQueuedChunks()); queues.put(new StringTuple(topic, threadId), stream); streamList.add(new MessageStream<T>(topic, stream, config.getConsumerTimeoutMs(), decoder)); } ret.put(topic, streamList); logger.debug("adding topic " + topic + " and stream to map."); } // // listener to consumer and partition changes ZKRebalancerListener<T> loadBalancerListener = new ZKRebalancerListener<T>(config.getGroupId(), consumerIdString, ret); this.rebalancerListeners.add(loadBalancerListener); loadBalancerListener.start(); registerConsumerInZK(dirs, consumerIdString, topicCount); // // register listener for session expired event zkClient.subscribeStateChanges( new ZKSessionExpireListener<T>(dirs, consumerIdString, topicCount, loadBalancerListener)); zkClient.subscribeChildChanges(dirs.consumerRegistryDir, loadBalancerListener); // for (String topic : ret.keySet()) { // register on broker partition path changes final String partitionPath = ZkUtils.BrokerTopicsPath + "/" + topic; zkClient.subscribeChildChanges(partitionPath, loadBalancerListener); } // explicitly grigger load balancing for this consumer loadBalancerListener.syncedRebalance(); return ret; }