Example #1
0
  // Emits one tuple per record
  // @return true if tuple was emitted
  private boolean emitTupleIfNotEmitted(ConsumerRecord<K, V> record) {
    final TopicPartition tp = new TopicPartition(record.topic(), record.partition());
    final KafkaSpoutMessageId msgId = new KafkaSpoutMessageId(record);

    if (acked.containsKey(tp) && acked.get(tp).contains(msgId)) { // has been acked
      LOG.trace("Tuple for record [{}] has already been acked. Skipping", record);
    } else if (emitted.contains(msgId)) { // has been emitted and it's pending ack or fail
      LOG.trace("Tuple for record [{}] has already been emitted. Skipping", record);
    } else {
      boolean isScheduled = retryService.isScheduled(msgId);
      if (!isScheduled
          || retryService.isReady(
              msgId)) { // not scheduled <=> never failed (i.e. never emitted) or ready to be
                        // retried
        final List<Object> tuple = tuplesBuilder.buildTuple(record);
        kafkaSpoutStreams.emit(collector, tuple, msgId);
        emitted.add(msgId);
        numUncommittedOffsets++;
        if (isScheduled) { // Was scheduled for retry, now being re-emitted. Remove from schedule.
          retryService.remove(msgId);
        }
        LOG.trace("Emitted tuple [{}] for record [{}]", tuple, record);
        return true;
      }
    }
    return false;
  }
Example #2
0
 @Override
 public void declareOutputFields(OutputFieldsDeclarer declarer) {
   kafkaSpoutStreams.declareOutputFields(declarer);
 }