Ejemplo n.º 1
0
 public static List<kafka.javaapi.TopicMetadata> MetadataDump() {
   kafka.javaapi.consumer.SimpleConsumer consumer =
       new SimpleConsumer("0.0.0.0", 9092, 100000, 64 * 1024, "metadata");
   List<String> topics = new ArrayList<String>();
   TopicMetadataRequest req = new TopicMetadataRequest(topics);
   kafka.javaapi.TopicMetadataResponse res = consumer.send(req);
   return res.topicsMetadata();
 };
Ejemplo n.º 2
0
  /** Get offset ranges */
  protected long[] getOffsetRange() throws IOException {

    /* get smallest and largest offsets*/
    long[] range = new long[2];

    TopicAndPartition topicAndPartition =
        new TopicAndPartition(_request.getTopic(), _request.getPartition());
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
        new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put(
        topicAndPartition,
        new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.EarliestTime(), 1));
    OffsetRequest request =
        new OffsetRequest(
            requestInfo,
            kafka.api.OffsetRequest.CurrentVersion(),
            kafka.api.OffsetRequest.DefaultClientId());
    long[] startOffsets =
        _consumer.getOffsetsBefore(request).offsets(_request.getTopic(), _request.getPartition());
    if (startOffsets.length != 1)
      throw new IOException(
          "input:" + _input + " Expect one smallest offset but get " + startOffsets.length);
    range[0] = startOffsets[0];

    requestInfo.clear();
    requestInfo.put(
        topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
    request =
        new OffsetRequest(
            requestInfo,
            kafka.api.OffsetRequest.CurrentVersion(),
            kafka.api.OffsetRequest.DefaultClientId());
    long[] endOffsets =
        _consumer.getOffsetsBefore(request).offsets(_request.getTopic(), _request.getPartition());
    if (endOffsets.length != 1)
      throw new IOException(
          "input:" + _input + " Expect one latest offset but get " + endOffsets.length);
    range[1] = endOffsets[0];

    /*adjust range based on input offsets*/
    if (_request.isValidOffset()) {
      long startOffset = _request.getOffset();
      if (startOffset > range[0]) {
        System.out.println("Update starting offset with " + startOffset);
        range[0] = startOffset;
      } else {
        System.out.println(
            "WARNING: given starting offset "
                + startOffset
                + " is smaller than the smallest one "
                + range[0]
                + ". Will ignore it.");
      }
    }
    System.out.println("Using offset range [" + range[0] + ", " + range[1] + "]");
    return range;
  }
Ejemplo n.º 3
0
  @Override
  public void run() {
    long offset = 0;
    while (isAlive) {
      // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of
      // 1MB
      FetchRequest fetchRequest =
          new FetchRequestBuilder()
              .clientId("default_client")
              .addFetch("topic1", 1, offset, 1000000)
              .build();

      //      FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);

      // get the message set from the consumer and print them out
      ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
      Iterator<MessageAndOffset> itr = messages.iterator();

      while (itr.hasNext() && isAlive) {
        MessageAndOffset msg = itr.next();
        // advance the offset after consuming each message
        offset = msg.offset();
        logger.debug(
            "consumed: {} offset: {}",
            byteBufferToString(msg.message().payload()).toString(),
            offset);
        receiveCount++;
      }
    }
  }
Ejemplo n.º 4
0
 //    @Ignore
 @After
 public void tearDown() throws IOException {
   kafkaSink.stop();
   simpleConsumer.close();
   //        kafkaServer.shutdown();
   //        zookeeperServer.shutdown();
 }
Ejemplo n.º 5
0
  public Map<Partition, Broker> findPartBrokers(List<String> topics) throws IOException {
    Set<Broker> newMetaBrokers = new HashSet<Broker>();
    Map<Partition, Broker> partBrokers = null;
    String error = null;

    for (Broker metaBroker : metaBrokers) {
      SimpleConsumer consumer = null;
      try {
        consumer = clusterConsumer.connections.getConsumer(metaBroker);
        TopicMetadataRequest request = new TopicMetadataRequest(topics);
        TopicMetadataResponse response = consumer.send(request);

        List<TopicMetadata> metadatas = response.topicsMetadata();
        Map<Partition, Broker> _partBrokers = new TreeMap<Partition, Broker>();

        for (TopicMetadata metadata : metadatas) {
          for (PartitionMetadata partitionMetadata : metadata.partitionsMetadata()) {
            Partition partition = new Partition(metadata.topic(), partitionMetadata.partitionId());
            Broker broker = new Broker(partitionMetadata.leader());
            _partBrokers.put(partition, broker);
            if (LOG.isDebugEnabled()) {
              LOG.debug("find partition=" + partition + " at broker=" + broker);
            }

            if (!metaBrokers.contains(broker)) {
              LOG.info("find new meta broker=" + broker);
              newMetaBrokers.add(broker);
            }
          }
        }
        partBrokers = _partBrokers;
        break;
      } catch (Exception e) {
        LOG.warn("topic meta data request fail=" + topics + " at broker=" + metaBroker, e);
        error = e.getMessage();
      } finally {
        clusterConsumer.connections.returnConsumer(metaBroker, consumer);
      }
    }

    metaBrokers.addAll(newMetaBrokers);
    if (partBrokers != null) {
      return partBrokers;
    } else {
      throw new IOException("meta request fail=" + error + " for topics=" + topics);
    }
  }
Ejemplo n.º 6
0
  private PartitionMetadata findLeader(
      List<String> a_seedBrokers, int a_port, String a_topic, int a_partition) {
    PartitionMetadata returnMetaData = null;
    loop:
    for (String seed : a_seedBrokers) {
      SimpleConsumer consumer = null;
      try {
        consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
        List<String> topics = Collections.singletonList(a_topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

        List<TopicMetadata> metaData = resp.topicsMetadata();
        for (TopicMetadata item : metaData) {
          for (PartitionMetadata part : item.partitionsMetadata()) {
            if (part.partitionId() == a_partition) {
              returnMetaData = part;
              break loop;
            }
          }
        }
      } catch (Exception e) {
        System.out.println(
            "Error communicating with Broker ["
                + seed
                + "] to find Leader for ["
                + a_topic
                + ", "
                + a_partition
                + "] Reason: "
                + e);
      } finally {
        if (consumer != null) {
          consumer.close();
        }
      }
    }
    if (returnMetaData != null) {
      m_replicaBrokers.clear();
      for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
        m_replicaBrokers.add(replica.host());
      }
    }
    return returnMetaData;
  }
    public PartitionManager(int partition) {
      _partition = partition;
      ZooMeta zooMeta = (ZooMeta) _state.getData(committedPath());
      SimpleConsumer consumer = _partitions.getConsumer(_partition);
      int hostPartition = _partitions.getHostPartition(_partition);

      // the id stuff makes sure the spout doesn't reset the offset if it restarts
      // if(zooMeta==null || (!_uuid.equals(zooMeta.id) && _spoutConfig.forceFromStart)) {
      if (zooMeta == null || (_spoutConfig.forceFromStart)) {
        _committedTo =
            consumer
                .getOffsetsBefore(
                    _spoutConfig.topic, hostPartition, _spoutConfig.startOffsetTime, 1)[0];
      } else {
        _committedTo = zooMeta.offset;
      }
      _emittedToOffset = _committedTo;
    }
    /** consume data from kafka to list */
    private void fill() {
      SimpleConsumer consumer = _partitions.getConsumer(_partition);
      int hostPartition = _partitions.getHostPartition(_partition);
      ByteBufferMessageSet msgs =
          consumer.fetch(
              new FetchRequest(
                  _spoutConfig.topic,
                  hostPartition,
                  _emittedToOffset,
                  _spoutConfig.fetchSizeBytes));

      // LOG.info("partition fill msgs " + msgs);
      for (MessageAndOffset msg : msgs) {
        _pending.add(_emittedToOffset);
        _waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
        _emittedToOffset = msg.offset();
        // LOG.info("parition fill msg " + msg + " offset" + _emittedToOffset);
      }
    }
Ejemplo n.º 9
0
  public void close() throws IOException {
    if (_consumer != null) _consumer.close();

    String topic = _request.getTopic();
    long endTime = System.currentTimeMillis();
    _reporter.incrCounter(topic, "read-time(ms)", endTime - _startTime);
    _reporter.incrCounter(topic, "request-time(ms)", _requestTime);

    long bytesRead = _offset - _offsetRange[0];
    double megaRead = bytesRead / (1024.0 * 1024.0);
    _reporter.incrCounter(topic, "data-read(mb)", (long) megaRead);
    _reporter.incrCounter(topic, "event-count", _count);
  }
Ejemplo n.º 10
0
  /** Get offset ranges */
  protected long[] getOffsetRange() throws IOException {

    /* get smallest and largest offsets*/
    long[] range = new long[2];

    long[] offsets =
        _consumer.getOffsetsBefore(
            _request.getTopic(), _request.getPartition(), OffsetRequest.EARLIEST_TIME(), 1);
    if (offsets.length != 1)
      throw new IOException(
          "input:" + _input + " Expect one smallest offset but get " + offsets.length);
    range[0] = offsets[0];

    offsets =
        _consumer.getOffsetsBefore(
            _request.getTopic(), _request.getPartition(), OffsetRequest.LATEST_TIME(), 1);
    if (offsets.length != 1)
      throw new IOException(
          "input:" + _input + " Expect one latest offset but get " + offsets.length);
    range[1] = offsets[0];

    /*adjust range based on input offsets*/
    if (_request.isValidOffset()) {
      long startOffset = _request.getOffset();
      if (startOffset > range[0]) {
        System.out.println("Update starting offset with " + startOffset);
        range[0] = startOffset;
      } else {
        System.out.println(
            "WARNING: given starting offset "
                + startOffset
                + " is smaller than the smallest one "
                + range[0]
                + ". Will ignore it.");
      }
    }
    return range;
  }
Ejemplo n.º 11
0
  public boolean fetchMore() throws IOException {
    if (!hasMore()) return false;

    FetchRequest fetchRequest =
        new FetchRequest(_request.getTopic(), _request.getPartition(), _offset, _bufferSize);
    List<FetchRequest> array = new ArrayList<FetchRequest>();
    array.add(fetchRequest);

    long tempTime = System.currentTimeMillis();
    _response = _consumer.multifetch(array);
    _requestTime += (System.currentTimeMillis() - tempTime);

    return true;
  }
Ejemplo n.º 12
0
  public long getOffset(String topic, int partition, long startOffsetTime) {
    SimpleConsumer simpleConsumer = findLeaderConsumer(partition);

    if (simpleConsumer == null) {
      LOG.error("Error consumer is null get offset from partition:" + partition);
      return -1;
    }

    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
        new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
    OffsetRequest request =
        new OffsetRequest(
            requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());

    long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition);
    if (offsets.length > 0) {
      return offsets[0];
    } else {
      return NO_OFFSET;
    }
  }
Ejemplo n.º 13
0
  public static long getLastOffset(
      SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
        new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
    kafka.javaapi.OffsetRequest request =
        new kafka.javaapi.OffsetRequest(
            requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
    OffsetResponse response = consumer.getOffsetsBefore(request);

    if (response.hasError()) {
      System.out.println(
          "Error fetching data Offset Data the Broker. Reason: "
              + response.errorCode(topic, partition));
      return 0;
    }
    long[] offsets = response.offsets(topic, partition);
    return offsets[0];
  }
Ejemplo n.º 14
0
  //    @Ignore
  @Test
  public void test() throws EventDeliveryException, UnsupportedEncodingException {
    Transaction tx = channel.getTransaction();
    tx.begin();

    ObjectNode jsonBody = new ObjectNode(JsonNodeFactory.instance);
    jsonBody.put("myString", "foo");
    jsonBody.put("myInt32", 32);

    Map<String, String> headers = new HashMap<String, String>();
    headers.put("myString", "bar");
    headers.put("myInt64", "64");
    headers.put("myBoolean", "true");
    headers.put("myDouble", "1.0");
    headers.put("myNull", "foobar");

    Event event = EventBuilder.withBody(jsonBody.toString().getBytes(Charsets.UTF_8), headers);
    channel.put(event);

    tx.commit();
    tx.close();

    kafkaSink.process();

    kafka.api.FetchRequest req =
        new FetchRequestBuilder().clientId(CLIENT_ID).addFetch("test", 0, 0L, 100).build();
    FetchResponse fetchResponse = simpleConsumer.fetch(req);
    ByteBufferMessageSet messageSet = fetchResponse.messageSet("test", 0);

    //        Assert.assertTrue(messageSet.sizeInBytes() > 0);
    for (MessageAndOffset messageAndOffset : messageSet) {
      ByteBuffer payload = messageAndOffset.message().payload();
      byte[] bytes = new byte[payload.limit()];
      payload.get(bytes);
      String message = new String(bytes, "UTF-8");
      Assert.assertNotNull(message);
      Assert.assertEquals(message, "{\"myString\":\"foo\",\"myInt32\":32}");
    }
  }
Ejemplo n.º 15
0
  public boolean fetchMore() throws IOException {
    if (!hasMore()) return false;

    FetchRequest fetchRequest =
        builder
            .clientId(_request.clientId())
            .addFetch(_request.getTopic(), _request.getPartition(), _offset, _bufferSize)
            .build();

    long tempTime = System.currentTimeMillis();
    _response = _consumer.fetch(fetchRequest);
    if (_response != null) {
      _respIterator =
          new ArrayList<ByteBufferMessageSet>() {
            {
              add(_response.messageSet(_request.getTopic(), _request.getPartition()));
            }
          }.iterator();
    }
    _requestTime += (System.currentTimeMillis() - tempTime);

    return true;
  }
Ejemplo n.º 16
0
  public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

    String topic = config.topic;
    FetchRequest req =
        new FetchRequestBuilder()
            .clientId(config.clientId)
            .addFetch(topic, partition, offset, config.fetchMaxBytes)
            .maxWait(config.fetchWaitMaxMs)
            .build();
    FetchResponse fetchResponse = null;
    SimpleConsumer simpleConsumer = null;
    try {
      simpleConsumer = findLeaderConsumer(partition);
      if (simpleConsumer == null) {
        // LOG.error(message);
        return null;
      }
      fetchResponse = simpleConsumer.fetch(req);
    } catch (Exception e) {
      if (e instanceof ConnectException
          || e instanceof SocketTimeoutException
          || e instanceof IOException
          || e instanceof UnresolvedAddressException) {
        LOG.warn("Network error when fetching messages:", e);
        if (simpleConsumer != null) {
          String host = simpleConsumer.host();
          int port = simpleConsumer.port();
          simpleConsumer = null;
          throw new KafkaException(
              "Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(),
              e);
        }

      } else {
        throw new RuntimeException(e);
      }
    }
    if (fetchResponse.hasError()) {
      short code = fetchResponse.errorCode(topic, partition);
      if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
        long startOffset = getOffset(topic, partition, config.startOffsetTime);
        offset = startOffset;
      }
      if (leaderBroker != null) {
        LOG.error(
            "fetch data from kafka topic["
                + config.topic
                + "] host["
                + leaderBroker.host()
                + ":"
                + leaderBroker.port()
                + "] partition["
                + partition
                + "] error:"
                + code);
      } else {

      }
      return null;
    } else {
      ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
      return msgs;
    }
  }
Ejemplo n.º 17
0
 public void close() {
   if (consumer != null) {
     consumer.close();
   }
 }
Ejemplo n.º 18
0
  protected PartitionMetadata findLeader(int partition) {
    PartitionMetadata returnMetaData = null;
    int errors = 0;
    int size = brokerList.size();

    Host brokerHost = brokerList.get(brokerIndex);
    try {
      if (consumer == null) {
        consumer =
            new SimpleConsumer(
                brokerHost.getHost(),
                brokerHost.getPort(),
                config.socketTimeoutMs,
                config.socketReceiveBufferBytes,
                config.clientId);
      }
    } catch (Exception e) {
      LOG.warn(e.getMessage(), e);
      consumer = null;
    }
    int i = brokerIndex;
    loop:
    while (i < size && errors < size + 1) {
      Host host = brokerList.get(i);
      i = (i + 1) % size;
      brokerIndex = i; // next index
      try {

        if (consumer == null) {
          consumer =
              new SimpleConsumer(
                  host.getHost(),
                  host.getPort(),
                  config.socketTimeoutMs,
                  config.socketReceiveBufferBytes,
                  config.clientId);
        }
        List<String> topics = Collections.singletonList(config.topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        kafka.javaapi.TopicMetadataResponse resp = null;
        try {
          resp = consumer.send(req);
        } catch (Exception e) {
          errors += 1;

          LOG.error(
              "findLeader error, broker:"
                  + host.toString()
                  + ", will change to next broker index:"
                  + (i + 1) % size);
          if (consumer != null) {
            consumer.close();
            consumer = null;
          }
          continue;
        }

        List<TopicMetadata> metaData = resp.topicsMetadata();
        for (TopicMetadata item : metaData) {
          for (PartitionMetadata part : item.partitionsMetadata()) {
            if (part.partitionId() == partition) {
              returnMetaData = part;
              break loop;
            }
          }
        }

      } catch (Exception e) {
        LOG.error(
            "Error communicating with Broker:"
                + host.toString()
                + ", find Leader for partition:"
                + partition);
      } finally {
        if (consumer != null) {
          consumer.close();
          consumer = null;
        }
      }
    }

    return returnMetaData;
  }
Ejemplo n.º 19
0
  public void run(
      long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, int a_port)
      throws Exception {
    // find the meta data about the topic and partition we are interested in
    //
    PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
    if (metadata == null) {
      System.out.println("Can't find metadata for Topic and Partition. Exiting");
      return;
    }
    if (metadata.leader() == null) {
      System.out.println("Can't find Leader for Topic and Partition. Exiting");
      return;
    }
    String leadBroker = metadata.leader().host();
    String clientName = "Client_" + a_topic + "_" + a_partition;

    SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
    long readOffset =
        getLastOffset(
            consumer, a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (a_maxReads > 0) {
      if (consumer == null) {
        consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
      }
      FetchRequest req =
          new FetchRequestBuilder()
              .clientId(clientName)
              .addFetch(
                  a_topic,
                  a_partition,
                  readOffset,
                  100000) // Note: this fetchSize of 100000 might need to
              // be increased if large batches are written to
              // Kafka
              .build();
      FetchResponse fetchResponse = consumer.fetch(req);

      if (fetchResponse.hasError()) {
        numErrors++;
        // Something went wrong!
        short code = fetchResponse.errorCode(a_topic, a_partition);
        System.out.println(
            "Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
        if (numErrors > 5) {
          break;
        }
        if (code == ErrorMapping.OffsetOutOfRangeCode()) {
          // We asked for an invalid offset. For simple case ask for the last element to reset
          readOffset =
              getLastOffset(
                  consumer, a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
          continue;
        }
        consumer.close();
        consumer = null;
        leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
        continue;
      }
      numErrors = 0;

      long numRead = 0;
      for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
        long currentOffset = messageAndOffset.offset();
        if (currentOffset < readOffset) {
          System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
          continue;
        }
        readOffset = messageAndOffset.nextOffset();
        ByteBuffer payload = messageAndOffset.message().payload();

        byte[] bytes = new byte[payload.limit()];
        payload.get(bytes);
        System.out.println(
            String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
        numRead++;
        a_maxReads--;
      }

      if (numRead == 0) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
        }
      }
    }
    if (consumer != null) {
      consumer.close();
    }
  }
Ejemplo n.º 20
0
 public void close() {
   consumer.close();
 }