// Emits one tuple per record
  // @return true if tuple was emitted
  private boolean emitTupleIfNotEmitted(ConsumerRecord<K, V> record) {
    final TopicPartition tp = new TopicPartition(record.topic(), record.partition());
    final KafkaSpoutMessageId msgId = new KafkaSpoutMessageId(record);

    if (acked.containsKey(tp) && acked.get(tp).contains(msgId)) { // has been acked
      LOG.trace("Tuple for record [{}] has already been acked. Skipping", record);
    } else if (emitted.contains(msgId)) { // has been emitted and it's pending ack or fail
      LOG.trace("Tuple for record [{}] has already been emitted. Skipping", record);
    } else {
      boolean isScheduled = retryService.isScheduled(msgId);
      if (!isScheduled
          || retryService.isReady(
              msgId)) { // not scheduled <=> never failed (i.e. never emitted) or ready to be
                        // retried
        final List<Object> tuple = tuplesBuilder.buildTuple(record);
        kafkaSpoutStreams.emit(collector, tuple, msgId);
        emitted.add(msgId);
        numUncommittedOffsets++;
        if (isScheduled) { // Was scheduled for retry, now being re-emitted. Remove from schedule.
          retryService.remove(msgId);
        }
        LOG.trace("Emitted tuple [{}] for record [{}]", tuple, record);
        return true;
      }
    }
    return false;
  }
  private void doSeekRetriableTopicPartitions() {
    final Set<TopicPartition> retriableTopicPartitions = retryService.retriableTopicPartitions();

    for (TopicPartition rtp : retriableTopicPartitions) {
      final OffsetAndMetadata offsetAndMeta = acked.get(rtp).findNextCommitOffset();
      if (offsetAndMeta != null) {
        kafkaConsumer.seek(
            rtp,
            offsetAndMeta.offset()
                + 1); // seek to the next offset that is ready to commit in next commit cycle
      } else {
        kafkaConsumer.seek(
            rtp, acked.get(rtp).committedOffset + 1); // Seek to last committed offset
      }
    }
  }
 @Override
 public void fail(Object messageId) {
   final KafkaSpoutMessageId msgId = (KafkaSpoutMessageId) messageId;
   if (!emitted.contains(msgId)) {
     LOG.debug(
         "Received fail for tuple this spout is no longer tracking. Partitions may have been reassigned. Ignoring message [{}]",
         msgId);
     return;
   }
   if (msgId.numFails() < maxRetries) {
     emitted.remove(msgId);
     msgId.incrementNumFails();
     retryService.schedule(msgId);
   } else { // limit to max number of retries
     LOG.debug("Reached maximum number of retries. Message [{}] being marked as acked.", msgId);
     ack(msgId);
   }
 }