Esempio n. 1
0
 //    @Ignore
 @After
 public void tearDown() throws IOException {
   kafkaSink.stop();
   simpleConsumer.close();
   //        kafkaServer.shutdown();
   //        zookeeperServer.shutdown();
 }
Esempio n. 2
0
  public void close() throws IOException {
    if (_consumer != null) _consumer.close();

    String topic = _request.getTopic();
    long endTime = System.currentTimeMillis();
    _reporter.incrCounter(topic, "read-time(ms)", endTime - _startTime);
    _reporter.incrCounter(topic, "request-time(ms)", _requestTime);

    long bytesRead = _offset - _offsetRange[0];
    double megaRead = bytesRead / (1024.0 * 1024.0);
    _reporter.incrCounter(topic, "data-read(mb)", (long) megaRead);
    _reporter.incrCounter(topic, "event-count", _count);
  }
Esempio n. 3
0
  private PartitionMetadata findLeader(
      List<String> a_seedBrokers, int a_port, String a_topic, int a_partition) {
    PartitionMetadata returnMetaData = null;
    loop:
    for (String seed : a_seedBrokers) {
      SimpleConsumer consumer = null;
      try {
        consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
        List<String> topics = Collections.singletonList(a_topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

        List<TopicMetadata> metaData = resp.topicsMetadata();
        for (TopicMetadata item : metaData) {
          for (PartitionMetadata part : item.partitionsMetadata()) {
            if (part.partitionId() == a_partition) {
              returnMetaData = part;
              break loop;
            }
          }
        }
      } catch (Exception e) {
        System.out.println(
            "Error communicating with Broker ["
                + seed
                + "] to find Leader for ["
                + a_topic
                + ", "
                + a_partition
                + "] Reason: "
                + e);
      } finally {
        if (consumer != null) {
          consumer.close();
        }
      }
    }
    if (returnMetaData != null) {
      m_replicaBrokers.clear();
      for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
        m_replicaBrokers.add(replica.host());
      }
    }
    return returnMetaData;
  }
Esempio n. 4
0
 public void close() {
   if (consumer != null) {
     consumer.close();
   }
 }
Esempio n. 5
0
  protected PartitionMetadata findLeader(int partition) {
    PartitionMetadata returnMetaData = null;
    int errors = 0;
    int size = brokerList.size();

    Host brokerHost = brokerList.get(brokerIndex);
    try {
      if (consumer == null) {
        consumer =
            new SimpleConsumer(
                brokerHost.getHost(),
                brokerHost.getPort(),
                config.socketTimeoutMs,
                config.socketReceiveBufferBytes,
                config.clientId);
      }
    } catch (Exception e) {
      LOG.warn(e.getMessage(), e);
      consumer = null;
    }
    int i = brokerIndex;
    loop:
    while (i < size && errors < size + 1) {
      Host host = brokerList.get(i);
      i = (i + 1) % size;
      brokerIndex = i; // next index
      try {

        if (consumer == null) {
          consumer =
              new SimpleConsumer(
                  host.getHost(),
                  host.getPort(),
                  config.socketTimeoutMs,
                  config.socketReceiveBufferBytes,
                  config.clientId);
        }
        List<String> topics = Collections.singletonList(config.topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        kafka.javaapi.TopicMetadataResponse resp = null;
        try {
          resp = consumer.send(req);
        } catch (Exception e) {
          errors += 1;

          LOG.error(
              "findLeader error, broker:"
                  + host.toString()
                  + ", will change to next broker index:"
                  + (i + 1) % size);
          if (consumer != null) {
            consumer.close();
            consumer = null;
          }
          continue;
        }

        List<TopicMetadata> metaData = resp.topicsMetadata();
        for (TopicMetadata item : metaData) {
          for (PartitionMetadata part : item.partitionsMetadata()) {
            if (part.partitionId() == partition) {
              returnMetaData = part;
              break loop;
            }
          }
        }

      } catch (Exception e) {
        LOG.error(
            "Error communicating with Broker:"
                + host.toString()
                + ", find Leader for partition:"
                + partition);
      } finally {
        if (consumer != null) {
          consumer.close();
          consumer = null;
        }
      }
    }

    return returnMetaData;
  }
Esempio n. 6
0
 public void close() {
   consumer.close();
 }
Esempio n. 7
0
  public void run(
      long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, int a_port)
      throws Exception {
    // find the meta data about the topic and partition we are interested in
    //
    PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
    if (metadata == null) {
      System.out.println("Can't find metadata for Topic and Partition. Exiting");
      return;
    }
    if (metadata.leader() == null) {
      System.out.println("Can't find Leader for Topic and Partition. Exiting");
      return;
    }
    String leadBroker = metadata.leader().host();
    String clientName = "Client_" + a_topic + "_" + a_partition;

    SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
    long readOffset =
        getLastOffset(
            consumer, a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (a_maxReads > 0) {
      if (consumer == null) {
        consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
      }
      FetchRequest req =
          new FetchRequestBuilder()
              .clientId(clientName)
              .addFetch(
                  a_topic,
                  a_partition,
                  readOffset,
                  100000) // Note: this fetchSize of 100000 might need to
              // be increased if large batches are written to
              // Kafka
              .build();
      FetchResponse fetchResponse = consumer.fetch(req);

      if (fetchResponse.hasError()) {
        numErrors++;
        // Something went wrong!
        short code = fetchResponse.errorCode(a_topic, a_partition);
        System.out.println(
            "Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
        if (numErrors > 5) {
          break;
        }
        if (code == ErrorMapping.OffsetOutOfRangeCode()) {
          // We asked for an invalid offset. For simple case ask for the last element to reset
          readOffset =
              getLastOffset(
                  consumer, a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
          continue;
        }
        consumer.close();
        consumer = null;
        leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
        continue;
      }
      numErrors = 0;

      long numRead = 0;
      for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
        long currentOffset = messageAndOffset.offset();
        if (currentOffset < readOffset) {
          System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
          continue;
        }
        readOffset = messageAndOffset.nextOffset();
        ByteBuffer payload = messageAndOffset.message().payload();

        byte[] bytes = new byte[payload.limit()];
        payload.get(bytes);
        System.out.println(
            String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
        numRead++;
        a_maxReads--;
      }

      if (numRead == 0) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
        }
      }
    }
    if (consumer != null) {
      consumer.close();
    }
  }