public void commitOffsets() { Xnd.logConsumer("AutoCommitTask自动提交执行..."); if (zkClient == null) { logger.error("zk client is null. Cannot commit offsets"); return; } for (Entry<String, Pool<Partition, PartitionTopicInfo>> e : topicRegistry.entrySet()) { ZkGroupTopicDirs topicDirs = new ZkGroupTopicDirs(config.getGroupId(), e.getKey()); // for (PartitionTopicInfo info : e.getValue().values()) { final long lastChanged = info.getConsumedOffsetChanged().get(); if (lastChanged == 0) { logger.trace("consume offset not changed"); continue; } final long newOffset = info.getConsumedOffset(); // path: /consumers/<group>/offsets/<topic>/<brokerid-partition> final String path = topicDirs.consumerOffsetDir + "/" + info.partition.getName(); try { ZkUtils.updatePersistentPath(zkClient, path, "" + newOffset); } catch (Throwable t) { logger.warn("exception during commitOffsets, path=" + path + ",offset=" + newOffset, t); } finally { info.resetComsumedOffsetChanged(lastChanged); if (logger.isDebugEnabled()) { logger.debug("Committed [" + path + "] for topic " + info); } } } // } }
public ZookeeperConsumerConnector(ConsumerConfig config, boolean enableFetcher) { Xnd.logConsumer("创建ZookeeperConsumerConnector连接对象"); this.config = config; this.enableFetcher = enableFetcher; // this.topicRegistry = new Pool<String, Pool<Partition, PartitionTopicInfo>>(); this.queues = new Pool<StringTuple, BlockingQueue<FetchedDataChunk>>(); // connectZk(); createFetcher(); if (this.config.isAutoCommit()) { logger.info("starting auto committer every " + config.getAutoCommitIntervalMs() + " ms"); Xnd.logConsumer("启动自动commit任务"); scheduler.scheduleWithRate( new AutoCommitTask(), config.getAutoCommitIntervalMs(), config.getAutoCommitIntervalMs()); } }
private void connectZk() { Xnd.logConsumer("连接ZK"); logger.info("Connecting to zookeeper instance at " + config.getZkConnect()); this.zkClient = new ZkClient( config.getZkConnect(), config.getZkSessionTimeoutMs(), config.getZkConnectionTimeoutMs()); logger.info("Connected to zookeeper at " + config.getZkConnect()); }
public void syncedRebalance() { Xnd.logConsumer("ZKRebalancerListener.syncedRebalance() 同步开始"); synchronized (rebalanceLock) { for (int i = 0; i < config.getMaxRebalanceRetries(); i++) { if (isShuttingDown.get()) { // do nothing while shutting down return; } logger.info(format("[%s] rebalancing starting. try #%d", consumerIdString, i)); final long start = System.currentTimeMillis(); boolean done = false; // 查询所有的Brokers Cluster cluster = ZkUtils.getCluster(zkClient); try { done = rebalance(cluster); } catch (Exception e) { /** * occasionally, we may hit a ZK exception because the ZK state is changing while we are * iterating. For example, a ZK node can disappear between the time we get all children * and the time we try to get the value of a child. Just let this go since another * rebalance will be triggered. */ logger.info("exception during rebalance ", e); } logger.info( format( "[%s] rebalanced %s. try #%d, cost %d ms", // consumerIdString, done ? "OK" : "FAILED", // i, System.currentTimeMillis() - start)); // if (done) { return; } else { /* Here the cache is at a risk of being stale. To take future rebalancing decisions correctly, we should * clear the cache */ logger.warn( "Rebalancing attempt failed. Clearing the cache before the next rebalancing operation is triggered"); } // closeFetchersForQueues(cluster, messagesStreams, queues.values()); try { Thread.sleep(config.getRebalanceBackoffMs()); } catch (InterruptedException e) { logger.warn(e.getMessage()); } } } throw new ConsumerRebalanceFailedException( consumerIdString + " can't rebalance after " + config.getMaxRebalanceRetries() + " retries"); }
public void start() { this.watcherExecutorThread.start(); Xnd.logConsumer("ZKRebalancerListener 启动消费者同步负载线程"); }