/** Get offset ranges */ protected long[] getOffsetRange() throws IOException { /* get smallest and largest offsets*/ long[] range = new long[2]; TopicAndPartition topicAndPartition = new TopicAndPartition(_request.getTopic(), _request.getPartition()); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put( topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.EarliestTime(), 1)); OffsetRequest request = new OffsetRequest( requestInfo, kafka.api.OffsetRequest.CurrentVersion(), kafka.api.OffsetRequest.DefaultClientId()); long[] startOffsets = _consumer.getOffsetsBefore(request).offsets(_request.getTopic(), _request.getPartition()); if (startOffsets.length != 1) throw new IOException( "input:" + _input + " Expect one smallest offset but get " + startOffsets.length); range[0] = startOffsets[0]; requestInfo.clear(); requestInfo.put( topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); request = new OffsetRequest( requestInfo, kafka.api.OffsetRequest.CurrentVersion(), kafka.api.OffsetRequest.DefaultClientId()); long[] endOffsets = _consumer.getOffsetsBefore(request).offsets(_request.getTopic(), _request.getPartition()); if (endOffsets.length != 1) throw new IOException( "input:" + _input + " Expect one latest offset but get " + endOffsets.length); range[1] = endOffsets[0]; /*adjust range based on input offsets*/ if (_request.isValidOffset()) { long startOffset = _request.getOffset(); if (startOffset > range[0]) { System.out.println("Update starting offset with " + startOffset); range[0] = startOffset; } else { System.out.println( "WARNING: given starting offset " + startOffset + " is smaller than the smallest one " + range[0] + ". Will ignore it."); } } System.out.println("Using offset range [" + range[0] + ", " + range[1] + "]"); return range; }
/** Get offset ranges */ protected long[] getOffsetRange() throws IOException { /* get smallest and largest offsets*/ long[] range = new long[2]; long[] offsets = _consumer.getOffsetsBefore( _request.getTopic(), _request.getPartition(), OffsetRequest.EARLIEST_TIME(), 1); if (offsets.length != 1) throw new IOException( "input:" + _input + " Expect one smallest offset but get " + offsets.length); range[0] = offsets[0]; offsets = _consumer.getOffsetsBefore( _request.getTopic(), _request.getPartition(), OffsetRequest.LATEST_TIME(), 1); if (offsets.length != 1) throw new IOException( "input:" + _input + " Expect one latest offset but get " + offsets.length); range[1] = offsets[0]; /*adjust range based on input offsets*/ if (_request.isValidOffset()) { long startOffset = _request.getOffset(); if (startOffset > range[0]) { System.out.println("Update starting offset with " + startOffset); range[0] = startOffset; } else { System.out.println( "WARNING: given starting offset " + startOffset + " is smaller than the smallest one " + range[0] + ". Will ignore it."); } } return range; }
public PartitionManager(int partition) { _partition = partition; ZooMeta zooMeta = (ZooMeta) _state.getData(committedPath()); SimpleConsumer consumer = _partitions.getConsumer(_partition); int hostPartition = _partitions.getHostPartition(_partition); // the id stuff makes sure the spout doesn't reset the offset if it restarts // if(zooMeta==null || (!_uuid.equals(zooMeta.id) && _spoutConfig.forceFromStart)) { if (zooMeta == null || (_spoutConfig.forceFromStart)) { _committedTo = consumer .getOffsetsBefore( _spoutConfig.topic, hostPartition, _spoutConfig.startOffsetTime, 1)[0]; } else { _committedTo = zooMeta.offset; } _emittedToOffset = _committedTo; }
public static long getLastOffset( SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest( requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { System.out.println( "Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition)); return 0; } long[] offsets = response.offsets(topic, partition); return offsets[0]; }
public long getOffset(String topic, int partition, long startOffsetTime) { SimpleConsumer simpleConsumer = findLeaderConsumer(partition); if (simpleConsumer == null) { LOG.error("Error consumer is null get offset from partition:" + partition); return -1; } TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1)); OffsetRequest request = new OffsetRequest( requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId()); long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition); if (offsets.length > 0) { return offsets[0]; } else { return NO_OFFSET; } }