예제 #1
0
  public static void fetch() {
    HashMap<String, HashMap<String, String>> brokerDetails = new HashMap<>();
    List<kafka.javaapi.TopicMetadata> data = MetadataDump();
    HashMap<String, String> topicDetails = new HashMap<>();
    String topic;

    for (kafka.javaapi.TopicMetadata item : data) {
      topic = item.topic();
      for (kafka.javaapi.PartitionMetadata part : item.partitionsMetadata()) {
        String replicas = "";
        String isr = "";
        for (kafka.cluster.Broker replica : part.replicas()) {
          replicas += " " + replica.host();
        }
        for (kafka.cluster.Broker replica : part.isr()) {
          isr += " " + replica.host();
        }
        topicDetails.put("partition", String.valueOf(part.partitionId()));
        topicDetails.put("leader", part.leader().host());
        topicDetails.put("replicas", "[" + replicas + "]");
        topicDetails.put("isr", "[" + isr + "]");
      }

      brokerDetails.put(topic, topicDetails);
    }
    current = brokerDetails;
  }
예제 #2
0
  public Map<Partition, Broker> findPartBrokers(List<String> topics) throws IOException {
    Set<Broker> newMetaBrokers = new HashSet<Broker>();
    Map<Partition, Broker> partBrokers = null;
    String error = null;

    for (Broker metaBroker : metaBrokers) {
      SimpleConsumer consumer = null;
      try {
        consumer = clusterConsumer.connections.getConsumer(metaBroker);
        TopicMetadataRequest request = new TopicMetadataRequest(topics);
        TopicMetadataResponse response = consumer.send(request);

        List<TopicMetadata> metadatas = response.topicsMetadata();
        Map<Partition, Broker> _partBrokers = new TreeMap<Partition, Broker>();

        for (TopicMetadata metadata : metadatas) {
          for (PartitionMetadata partitionMetadata : metadata.partitionsMetadata()) {
            Partition partition = new Partition(metadata.topic(), partitionMetadata.partitionId());
            Broker broker = new Broker(partitionMetadata.leader());
            _partBrokers.put(partition, broker);
            if (LOG.isDebugEnabled()) {
              LOG.debug("find partition=" + partition + " at broker=" + broker);
            }

            if (!metaBrokers.contains(broker)) {
              LOG.info("find new meta broker=" + broker);
              newMetaBrokers.add(broker);
            }
          }
        }
        partBrokers = _partBrokers;
        break;
      } catch (Exception e) {
        LOG.warn("topic meta data request fail=" + topics + " at broker=" + metaBroker, e);
        error = e.getMessage();
      } finally {
        clusterConsumer.connections.returnConsumer(metaBroker, consumer);
      }
    }

    metaBrokers.addAll(newMetaBrokers);
    if (partBrokers != null) {
      return partBrokers;
    } else {
      throw new IOException("meta request fail=" + error + " for topics=" + topics);
    }
  }
 /** @see ConnectionFactory#refreshMetadata(Collection) */
 @Override
 public void refreshMetadata(Collection<String> topics) {
   try {
     this.lock.writeLock().lock();
     String brokerAddressesAsString =
         ListIterate.collect(this.configuration.getBrokerAddresses(), Functions.getToString())
             .makeString(",");
     Seq<Broker> brokers = null;
     try {
       brokers = ClientUtils$.MODULE$.parseBrokerList(brokerAddressesAsString);
     } catch (Exception e) {
       throw new IllegalStateException(
           "Can not parse Kafka Brokers for: [" + brokerAddressesAsString + "]", e);
     }
     TopicMetadataResponse topicMetadataResponse =
         new TopicMetadataResponse(
             ClientUtils$.MODULE$.fetchTopicMetadata(
                 JavaConversions.asScalaSet(new HashSet<>(topics)),
                 brokers,
                 this.configuration.getClientId(),
                 this.configuration.getFetchMetadataTimeout(),
                 0));
     PartitionIterable<TopicMetadata> selectWithoutErrors =
         Iterate.partition(
             topicMetadataResponse.topicsMetadata(), errorlessTopicMetadataPredicate);
     this.metadataCacheHolder.set(
         this.metadataCacheHolder.get().merge(selectWithoutErrors.getSelected()));
     if (log.isInfoEnabled()) {
       for (TopicMetadata topicMetadata : selectWithoutErrors.getRejected()) {
         log.info(
             String.format("No metadata could be retrieved for '%s'", topicMetadata.topic()),
             ErrorMapping.exceptionFor(topicMetadata.errorCode()));
       }
     }
   } finally {
     this.lock.writeLock().unlock();
   }
 }