private void commitOffsetsForAckedTuples() { // Find offsets that are ready to be committed for every topic partition final Map<TopicPartition, OffsetAndMetadata> nextCommitOffsets = new HashMap<>(); for (Map.Entry<TopicPartition, OffsetEntry> tpOffset : acked.entrySet()) { final OffsetAndMetadata nextCommitOffset = tpOffset.getValue().findNextCommitOffset(); if (nextCommitOffset != null) { nextCommitOffsets.put(tpOffset.getKey(), nextCommitOffset); } } // Commit offsets that are ready to be committed for every topic partition if (!nextCommitOffsets.isEmpty()) { kafkaConsumer.commitSync(nextCommitOffsets); LOG.debug("Offsets successfully committed to Kafka [{}]", nextCommitOffsets); // Instead of iterating again, it would be possible to commit and update the state for each // TopicPartition // in the prior loop, but the multiple network calls should be more expensive than iterating // twice over a small loop for (Map.Entry<TopicPartition, OffsetEntry> tpOffset : acked.entrySet()) { final OffsetEntry offsetEntry = tpOffset.getValue(); offsetEntry.commit(nextCommitOffsets.get(tpOffset.getKey())); } } else { LOG.trace("No offsets to commit. {}", this); } }
private static void processRecords(KafkaConsumer<String, String> consumer) throws InterruptedException { while (true) { ConsumerRecords<String, String> records = consumer.poll(100); long lastOffset = 0; for (ConsumerRecord<String, String> record : records) { System.out.printf( "\n\roffset = %d, key = %s, value = %s", record.offset(), record.key(), record.value()); lastOffset = record.offset(); } System.out.println("lastOffset read: " + lastOffset); process(); // Below call is important to control the offset commit. Do this call after you // finish processing the business process to get the at least once guarantee. consumer.commitSync(); } }