コード例 #1
0
  private SimpleConsumer findLeaderConsumer(int partition) {
    try {
      if (consumer != null) {
        return consumer;
      }
      PartitionMetadata metadata = findLeader(partition);
      if (metadata == null) {
        leaderBroker = null;
        consumer = null;
        return null;
      }
      leaderBroker = metadata.leader();
      consumer =
          new SimpleConsumer(
              leaderBroker.host(),
              leaderBroker.port(),
              config.socketTimeoutMs,
              config.socketReceiveBufferBytes,
              config.clientId);

      return consumer;
    } catch (Exception e) {
      LOG.error(e.getMessage(), e);
    }
    return null;
  }
コード例 #2
0
  public static void fetch() {
    HashMap<String, HashMap<String, String>> brokerDetails = new HashMap<>();
    List<kafka.javaapi.TopicMetadata> data = MetadataDump();
    HashMap<String, String> topicDetails = new HashMap<>();
    String topic;

    for (kafka.javaapi.TopicMetadata item : data) {
      topic = item.topic();
      for (kafka.javaapi.PartitionMetadata part : item.partitionsMetadata()) {
        String replicas = "";
        String isr = "";
        for (kafka.cluster.Broker replica : part.replicas()) {
          replicas += " " + replica.host();
        }
        for (kafka.cluster.Broker replica : part.isr()) {
          isr += " " + replica.host();
        }
        topicDetails.put("partition", String.valueOf(part.partitionId()));
        topicDetails.put("leader", part.leader().host());
        topicDetails.put("replicas", "[" + replicas + "]");
        topicDetails.put("isr", "[" + isr + "]");
      }

      brokerDetails.put(topic, topicDetails);
    }
    current = brokerDetails;
  }
コード例 #3
0
  private PartitionMetadata findLeader(
      List<String> a_seedBrokers, int a_port, String a_topic, int a_partition) {
    PartitionMetadata returnMetaData = null;
    loop:
    for (String seed : a_seedBrokers) {
      SimpleConsumer consumer = null;
      try {
        consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
        List<String> topics = Collections.singletonList(a_topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

        List<TopicMetadata> metaData = resp.topicsMetadata();
        for (TopicMetadata item : metaData) {
          for (PartitionMetadata part : item.partitionsMetadata()) {
            if (part.partitionId() == a_partition) {
              returnMetaData = part;
              break loop;
            }
          }
        }
      } catch (Exception e) {
        System.out.println(
            "Error communicating with Broker ["
                + seed
                + "] to find Leader for ["
                + a_topic
                + ", "
                + a_partition
                + "] Reason: "
                + e);
      } finally {
        if (consumer != null) {
          consumer.close();
        }
      }
    }
    if (returnMetaData != null) {
      m_replicaBrokers.clear();
      for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
        m_replicaBrokers.add(replica.host());
      }
    }
    return returnMetaData;
  }
コード例 #4
0
  public BlockingChannel create(ConsumerGroupId consumerGroupId) {
    ConsumerMetadataResponse metadataResponse = readConsumerMetadata(consumerGroupId);

    if (metadataResponse.errorCode() != ErrorMapping.NoError()) {
      throw new ReadingConsumerMetadataException(metadataResponse.errorCode());
    }

    Broker coordinator = metadataResponse.coordinator();

    BlockingChannel blockingChannel =
        new BlockingChannel(
            coordinator.host(),
            coordinator.port(),
            BlockingChannel.UseDefaultBufferSize(),
            BlockingChannel.UseDefaultBufferSize(),
            readTimeout);

    return blockingChannel;
  }
コード例 #5
0
  public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

    String topic = config.topic;
    FetchRequest req =
        new FetchRequestBuilder()
            .clientId(config.clientId)
            .addFetch(topic, partition, offset, config.fetchMaxBytes)
            .maxWait(config.fetchWaitMaxMs)
            .build();
    FetchResponse fetchResponse = null;
    SimpleConsumer simpleConsumer = null;
    try {
      simpleConsumer = findLeaderConsumer(partition);
      if (simpleConsumer == null) {
        // LOG.error(message);
        return null;
      }
      fetchResponse = simpleConsumer.fetch(req);
    } catch (Exception e) {
      if (e instanceof ConnectException
          || e instanceof SocketTimeoutException
          || e instanceof IOException
          || e instanceof UnresolvedAddressException) {
        LOG.warn("Network error when fetching messages:", e);
        if (simpleConsumer != null) {
          String host = simpleConsumer.host();
          int port = simpleConsumer.port();
          simpleConsumer = null;
          throw new KafkaException(
              "Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(),
              e);
        }

      } else {
        throw new RuntimeException(e);
      }
    }
    if (fetchResponse.hasError()) {
      short code = fetchResponse.errorCode(topic, partition);
      if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
        long startOffset = getOffset(topic, partition, config.startOffsetTime);
        offset = startOffset;
      }
      if (leaderBroker != null) {
        LOG.error(
            "fetch data from kafka topic["
                + config.topic
                + "] host["
                + leaderBroker.host()
                + ":"
                + leaderBroker.port()
                + "] partition["
                + partition
                + "] error:"
                + code);
      } else {

      }
      return null;
    } else {
      ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
      return msgs;
    }
  }