private void read(final KafkaStream<String, String> stream) {
    while (stream.iterator().hasNext()) {
      final int phase = phaser.register();

      final MessageAndMetadata<String, String> msg = stream.iterator().next();
      final long offset = msg.offset();
      final long partition = msg.partition();
      unacknowledgedOffsets.add(offset);
      lastCommittedOffset.compareAndSet(0, offset);
      currentPartition.compareAndSet(-1, partition);

      final String jsonString = msg.message();

      handler.handle(
          configuration.getVertxAddress(),
          jsonString,
          () -> {
            unacknowledgedOffsets.remove(offset);
            phaser.arriveAndDeregister();
          });

      if (unacknowledgedOffsets.size() >= configuration.getMaxUnacknowledged()
          || partititionChanged(partition)
          || tooManyUncommittedOffsets(offset)) {
        LOG.info(
            "Got {} unacknowledged messages, waiting for ACKs in order to commit",
            unacknowledgedOffsets.size());
        if (!waitForAcks(phase)) {
          return;
        }
        LOG.info("Continuing message processing");
        commitOffsetsIfAllAcknowledged(offset);
      }
    }
  }
  public static VertxKafkaConsumer create(
      final KafkaConfiguration configuration, final KafkaHandler handler) {
    final Properties properties =
        createProperties(
            configuration.getZookeeper(),
            configuration.getGroupId(),
            configuration.getZookeeperTimeout());

    final ConsumerConfig config = new ConsumerConfig(properties);

    final ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

    return new VertxKafkaConsumer(connector, configuration, handler);
  }
  public void start() {
    final String kafkaTopic = configuration.getKafkaTopic();
    final Map<String, List<KafkaStream<String, String>>> messageStreams =
        connector.createMessageStreams(
            ImmutableMap.of(kafkaTopic, 1), new StringDecoder(null), new StringDecoder(null));
    final List<KafkaStream<String, String>> topicStreams = messageStreams.get(kafkaTopic);
    final KafkaStream<String, String> topicStream = Iterables.getOnlyElement(topicStreams);

    messageProcessorExececutor.submit(() -> read(topicStream));
  }
 private boolean waitForAcks(int phase) {
   try {
     phaser.awaitAdvanceInterruptibly(phase, 10, TimeUnit.MINUTES);
     return true;
   } catch (InterruptedException e) {
     LOG.error("Interrupted while waiting for ACKs", e);
     return false;
   } catch (TimeoutException e) {
     LOG.error(
         "Waited for {} ACKs for longer than {} minutes, not making any progress ({}/{})",
         new Object[] {
           Integer.valueOf(unacknowledgedOffsets.size()),
               Long.valueOf(configuration.getAckTimeoutMinutes()),
           Integer.valueOf(phase), Integer.valueOf(phaser.getPhase())
         });
     return waitForAcks(phase);
   }
 }
 private boolean tooManyUncommittedOffsets(final long offset) {
   return lastCommittedOffset.get() + configuration.getMaxUncommitedOffsets() <= offset;
 }