public void verifySequences(long fromOffset, long toOffset) throws Exception { populateTopicPartitionToOffsetToFiles(); filterOffsets(fromOffset, toOffset); Iterator iterator = mTopicPartitionToOffsetToFiles.entrySet().iterator(); while (iterator.hasNext()) { TreeSet<Long> offsets = new TreeSet<Long>(); Map.Entry entry = (Map.Entry) iterator.next(); TopicPartition topicPartition = (TopicPartition) entry.getKey(); SortedMap<Long, HashSet<LogFilePath>> offsetToFiles = (SortedMap<Long, HashSet<LogFilePath>>) entry.getValue(); for (HashSet<LogFilePath> logFilePaths : offsetToFiles.values()) { for (LogFilePath logFilePath : logFilePaths) { getOffsets(logFilePath, offsets); } } long lastOffset = -2; for (Long offset : offsets) { if (lastOffset != -2) { assert lastOffset + 1 == offset : Long.toString(offset) + " + 1 == " + offset + " for topic " + topicPartition.getTopic() + " partition " + topicPartition.getPartition(); } lastOffset = offset; } } }
private long getCommittedTimestampMillis(TopicPartition topicPartition) throws Exception { Message message = mKafkaClient.getCommittedMessage(topicPartition); if (message == null) { LOG.error( "No message found for topic " + topicPartition.getTopic() + " partition " + topicPartition.getPartition()); return -1; } return mMessageParser.extractTimestampMillis(message); }
private long getLastTimestampMillis(TopicPartition topicPartition) throws Exception { Message message = mKafkaClient.getLastMessage(topicPartition); if (message == null) { // This will happen if no messages have been posted to the given topic partition. LOG.error( "No message found for topic " + topicPartition.getTopic() + " partition " + topicPartition.getPartition()); return -1; } return mMessageParser.extractTimestampMillis(message); }
public void verifyCounts(long fromOffset, long toOffset, int numMessages) throws Exception { populateTopicPartitionToOffsetToFiles(); filterOffsets(fromOffset, toOffset); Iterator iterator = mTopicPartitionToOffsetToFiles.entrySet().iterator(); int aggregateMessageCount = 0; while (iterator.hasNext()) { long previousOffset = -2L; long previousMessageCount = -2L; Map.Entry entry = (Map.Entry) iterator.next(); SortedMap<Long, HashSet<LogFilePath>> offsetToFiles = (SortedMap<Long, HashSet<LogFilePath>>) entry.getValue(); for (HashSet<LogFilePath> logFilePaths : offsetToFiles.values()) { int messageCount = 0; long offset = -2; for (LogFilePath logFilePath : logFilePaths) { assert offset == -2 || offset == logFilePath.getOffset() : Long.toString(offset) + " || " + offset + " == " + logFilePath.getOffset(); messageCount += getMessageCount(logFilePath); offset = logFilePath.getOffset(); } if (previousOffset != -2 && offset - previousOffset != previousMessageCount) { TopicPartition topicPartition = (TopicPartition) entry.getKey(); throw new RuntimeException( "Message count of " + previousMessageCount + " in topic " + topicPartition.getTopic() + " partition " + topicPartition.getPartition() + " does not agree with adjacent offsets " + previousOffset + " and " + offset); } previousOffset = offset; previousMessageCount = messageCount; aggregateMessageCount += messageCount; } } if (numMessages != -1 && aggregateMessageCount != numMessages) { throw new RuntimeException( "Message count " + aggregateMessageCount + " does not agree with the expected count " + numMessages); } }