Пример #1
0
  public static void fetch() {
    HashMap<String, HashMap<String, String>> brokerDetails = new HashMap<>();
    List<kafka.javaapi.TopicMetadata> data = MetadataDump();
    HashMap<String, String> topicDetails = new HashMap<>();
    String topic;

    for (kafka.javaapi.TopicMetadata item : data) {
      topic = item.topic();
      for (kafka.javaapi.PartitionMetadata part : item.partitionsMetadata()) {
        String replicas = "";
        String isr = "";
        for (kafka.cluster.Broker replica : part.replicas()) {
          replicas += " " + replica.host();
        }
        for (kafka.cluster.Broker replica : part.isr()) {
          isr += " " + replica.host();
        }
        topicDetails.put("partition", String.valueOf(part.partitionId()));
        topicDetails.put("leader", part.leader().host());
        topicDetails.put("replicas", "[" + replicas + "]");
        topicDetails.put("isr", "[" + isr + "]");
      }

      brokerDetails.put(topic, topicDetails);
    }
    current = brokerDetails;
  }
Пример #2
0
  public Map<Partition, Broker> findPartBrokers(List<String> topics) throws IOException {
    Set<Broker> newMetaBrokers = new HashSet<Broker>();
    Map<Partition, Broker> partBrokers = null;
    String error = null;

    for (Broker metaBroker : metaBrokers) {
      SimpleConsumer consumer = null;
      try {
        consumer = clusterConsumer.connections.getConsumer(metaBroker);
        TopicMetadataRequest request = new TopicMetadataRequest(topics);
        TopicMetadataResponse response = consumer.send(request);

        List<TopicMetadata> metadatas = response.topicsMetadata();
        Map<Partition, Broker> _partBrokers = new TreeMap<Partition, Broker>();

        for (TopicMetadata metadata : metadatas) {
          for (PartitionMetadata partitionMetadata : metadata.partitionsMetadata()) {
            Partition partition = new Partition(metadata.topic(), partitionMetadata.partitionId());
            Broker broker = new Broker(partitionMetadata.leader());
            _partBrokers.put(partition, broker);
            if (LOG.isDebugEnabled()) {
              LOG.debug("find partition=" + partition + " at broker=" + broker);
            }

            if (!metaBrokers.contains(broker)) {
              LOG.info("find new meta broker=" + broker);
              newMetaBrokers.add(broker);
            }
          }
        }
        partBrokers = _partBrokers;
        break;
      } catch (Exception e) {
        LOG.warn("topic meta data request fail=" + topics + " at broker=" + metaBroker, e);
        error = e.getMessage();
      } finally {
        clusterConsumer.connections.returnConsumer(metaBroker, consumer);
      }
    }

    metaBrokers.addAll(newMetaBrokers);
    if (partBrokers != null) {
      return partBrokers;
    } else {
      throw new IOException("meta request fail=" + error + " for topics=" + topics);
    }
  }
Пример #3
0
  private PartitionMetadata findLeader(
      List<String> a_seedBrokers, int a_port, String a_topic, int a_partition) {
    PartitionMetadata returnMetaData = null;
    loop:
    for (String seed : a_seedBrokers) {
      SimpleConsumer consumer = null;
      try {
        consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
        List<String> topics = Collections.singletonList(a_topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

        List<TopicMetadata> metaData = resp.topicsMetadata();
        for (TopicMetadata item : metaData) {
          for (PartitionMetadata part : item.partitionsMetadata()) {
            if (part.partitionId() == a_partition) {
              returnMetaData = part;
              break loop;
            }
          }
        }
      } catch (Exception e) {
        System.out.println(
            "Error communicating with Broker ["
                + seed
                + "] to find Leader for ["
                + a_topic
                + ", "
                + a_partition
                + "] Reason: "
                + e);
      } finally {
        if (consumer != null) {
          consumer.close();
        }
      }
    }
    if (returnMetaData != null) {
      m_replicaBrokers.clear();
      for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
        m_replicaBrokers.add(replica.host());
      }
    }
    return returnMetaData;
  }
Пример #4
0
  protected PartitionMetadata findLeader(int partition) {
    PartitionMetadata returnMetaData = null;
    int errors = 0;
    int size = brokerList.size();

    Host brokerHost = brokerList.get(brokerIndex);
    try {
      if (consumer == null) {
        consumer =
            new SimpleConsumer(
                brokerHost.getHost(),
                brokerHost.getPort(),
                config.socketTimeoutMs,
                config.socketReceiveBufferBytes,
                config.clientId);
      }
    } catch (Exception e) {
      LOG.warn(e.getMessage(), e);
      consumer = null;
    }
    int i = brokerIndex;
    loop:
    while (i < size && errors < size + 1) {
      Host host = brokerList.get(i);
      i = (i + 1) % size;
      brokerIndex = i; // next index
      try {

        if (consumer == null) {
          consumer =
              new SimpleConsumer(
                  host.getHost(),
                  host.getPort(),
                  config.socketTimeoutMs,
                  config.socketReceiveBufferBytes,
                  config.clientId);
        }
        List<String> topics = Collections.singletonList(config.topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        kafka.javaapi.TopicMetadataResponse resp = null;
        try {
          resp = consumer.send(req);
        } catch (Exception e) {
          errors += 1;

          LOG.error(
              "findLeader error, broker:"
                  + host.toString()
                  + ", will change to next broker index:"
                  + (i + 1) % size);
          if (consumer != null) {
            consumer.close();
            consumer = null;
          }
          continue;
        }

        List<TopicMetadata> metaData = resp.topicsMetadata();
        for (TopicMetadata item : metaData) {
          for (PartitionMetadata part : item.partitionsMetadata()) {
            if (part.partitionId() == partition) {
              returnMetaData = part;
              break loop;
            }
          }
        }

      } catch (Exception e) {
        LOG.error(
            "Error communicating with Broker:"
                + host.toString()
                + ", find Leader for partition:"
                + partition);
      } finally {
        if (consumer != null) {
          consumer.close();
          consumer = null;
        }
      }
    }

    return returnMetaData;
  }