Пример #1
0
  /**
   * Ensures an initialized kafka {@link ConsumerConnector} is present.
   *
   * @param config The storm configuration passed to {@link #open(Map, TopologyContext,
   *     SpoutOutputCollector)}.
   * @throws IllegalArgumentException When a required configuration parameter is missing or a sanity
   *     check fails.
   */
  protected void createConsumer(final Map<String, Object> config) {
    final Properties consumerConfig = createKafkaConfig(config);

    LOG.info(
        "connecting kafka client to zookeeper at {} as client group {}",
        consumerConfig.getProperty("zookeeper.connect"),
        consumerConfig.getProperty("group.id"));
    _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerConfig));
  }
Пример #2
0
  private static List<KafkaStream<String, String>> openKafkaStream(
      String zkConnect, String group, String topic) {
    consumer = Consumer.createJavaConsumerConnector(getConsumerConfig(zkConnect, group));

    StringDecoder decoder = new StringDecoder(null);
    Map<String, Integer> topicCountMap = Maps.of(topic, 1);
    Map<String, List<KafkaStream<String, String>>> consumerMap =
        consumer.createMessageStreams(topicCountMap, decoder, decoder);

    return consumerMap.get(topic);
  }
Пример #3
0
 Kafka_consumer() {
   Properties props = new Properties();
   props.put("zookeeper.connect", "localhost:2181");
   props.put("group.id", "0");
   props.put("zookeeper.session.timeout.ms", "4000");
   props.put("zookeeper.sync.time.ms", "200");
   props.put("auto.commit.interval.ms", "1000");
   ConsumerConfig config = new ConsumerConfig(props);
   // 配置consumer的值
   consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);
 }
  public KafkaMessageConsumerConnector(String group, String zkConnectUrls) {
    Properties props = new Properties();
    props.put("group.id", group);
    props.put("zookeeper.connect", zkConnectUrls);
    props.put("zookeeper.session.timeout.ms", "400");
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    // props.put("auto.commit.enable", "false");
    props.put("auto.offset.reset", "smallest");

    ConsumerConfig config = new ConsumerConfig(props);
    consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);
  }
  public static VertxKafkaConsumer create(
      final KafkaConfiguration configuration, final KafkaHandler handler) {
    final Properties properties =
        createProperties(
            configuration.getZookeeper(),
            configuration.getGroupId(),
            configuration.getZookeeperTimeout());

    final ConsumerConfig config = new ConsumerConfig(properties);

    final ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

    return new VertxKafkaConsumer(connector, configuration, handler);
  }
  private void start(String topic) {
    consumer = Consumer.createJavaConsumerConnector(config);

    /* We tell Kafka how many threads will read each topic. We have one topic and one thread */
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, new Integer(1));

    /* We will use a decoder to get Kafka to convert messages to Strings
     * valid property will be deserializer.encoding with the charset to use.
     * default is UTF8 which works for us */
    StringDecoder decoder = new StringDecoder(new VerifiableProperties());

    /* Kafka will give us a list of streams of messages for each topic.
    In this case, its just one topic with a list of a single stream */
    stream = consumer.createMessageStreams(topicCountMap, decoder, decoder).get(topic).get(0);
  }
Пример #7
0
  @Override
  public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    logger.info("Opened");
    this.collector = collector;
    logger.info(" topic = " + kafkaSpoutConfig.kafkaConsumerConfiguration.getTopic());
    this.spoutName = String.format("%s-%d", context.getThisComponentId(), context.getThisTaskId());

    Properties kafkaProperties =
        KafkaConsumerProperties.createKafkaProperties(kafkaSpoutConfig.kafkaConsumerConfiguration);
    // Have to use a different consumer.id for each spout so use the storm taskId. Otherwise,
    // zookeeper complains about a conflicted ephemeral node when there is more than one spout
    // reading from a topic
    kafkaProperties.setProperty("consumer.id", String.valueOf(context.getThisTaskId()));
    ConsumerConfig consumerConfig = new ConsumerConfig(kafkaProperties);
    this.consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
  }
Пример #8
0
  @Override
  public Firehose connect() throws IOException {
    final ConsumerConnector connector =
        Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps));

    final Map<String, List<KafkaStream<Message>>> streams =
        connector.createMessageStreams(ImmutableMap.of(feed, 1));

    final List<KafkaStream<Message>> streamList = streams.get(feed);
    if (streamList == null || streamList.size() != 1) {
      return null;
    }

    final KafkaStream<Message> stream = streamList.get(0);

    return new DefaultFirehose(connector, stream, parser);
  }
Пример #9
0
 public AvroClicksSessionizer(
     String zookeeper,
     String groupId,
     String inputTopic,
     String outputTopic,
     String url,
     int sessionLengthMs) {
   this.consumer =
       kafka.consumer.Consumer.createJavaConsumerConnector(
           new ConsumerConfig(createConsumerConfig(zookeeper, groupId, url)));
   this.producer = getProducer(url);
   this.zookeeper = zookeeper;
   this.groupId = groupId;
   this.inputTopic = inputTopic;
   this.outputTopic = outputTopic;
   this.url = url;
   this.sessionLengthMs = sessionLengthMs;
 }
Пример #10
0
    public KafkaConsumerConnector(String zk, String groupName) {
      // Get group id which should be unique for table so as to keep offsets clean for multiple
      // runs.
      String groupId = "voltdb-" + groupName;
      // TODO: Should get this from properties file or something as override?
      Properties props = new Properties();
      props.put("zookeeper.connect", zk);
      props.put("group.id", groupId);
      props.put("zookeeper.session.timeout.ms", "400");
      props.put("zookeeper.sync.time.ms", "200");
      props.put("auto.commit.interval.ms", "1000");
      props.put("auto.commit.enable", "true");
      props.put("auto.offset.reset", "smallest");
      props.put("rebalance.backoff.ms", "10000");

      m_consumerConfig = new ConsumerConfig(props);

      m_consumer = kafka.consumer.Consumer.createJavaConsumerConnector(m_consumerConfig);
    }
Пример #11
0
  public void open() {
    Properties props = new Properties();
    try {
      InputStream inputStream =
          ConfigUtil.getConfigReader().getResourceAsStream("/kafka.properties");
      props.load(inputStream);
      // 判断一下,那些topic是需要广播接收
      String broadcast_topics = props.getProperty("broadcast.topics");
      if (null != broadcast_topics && !broadcast_topics.equals("")) {
        if (broadcast_topics.indexOf(topic) >= 0) {
          String groupId = props.getProperty("group.id");
          props.setProperty("group.id", groupId + "_" + Tools.getHostAddress());
        }
      }

      consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
      FetchMessageThread thread = new FetchMessageThread(topic, consumer, handler);
      thread.start();
    } catch (IOException e) {
      e.printStackTrace();
      logger.error(String.format("读取kafka配置文件失败。 ErrorMsg:[%s]", e.getMessage()), e);
    }
  }
Пример #12
0
  public KafkaStream<byte[], byte[]> createMessageStream(String topic) throws Exception {

    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, 1);

    final ConsumerConnector consumer =
        kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(propsConsumer));

    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
        consumer.createMessageStreams(topicCountMap);
    Runtime.getRuntime()
        .addShutdownHook(
            new Thread() {

              @Override
              public synchronized void start() {
                super.start();
                LOG.info("Pigeon Shutdown - Closing kafka consumer on topic [" + topic + "]");
                consumer.shutdown();
              }
            });

    return consumerMap.get(topic).get(0);
  }
Пример #13
0
  @Override
  public void open(Configuration parameters) throws Exception {
    super.open(parameters);
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(this.consumerConfig);
    // we request only one stream per consumer instance. Kafka will make sure that each consumer
    // group
    // will see each message only once.
    Map<String, Integer> topicCountMap = Collections.singletonMap(topicName, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> streams =
        consumer.createMessageStreams(topicCountMap);
    if (streams.size() != 1) {
      throw new RuntimeException("Expected only one message stream but got " + streams.size());
    }
    List<KafkaStream<byte[], byte[]>> kafkaStreams = streams.get(topicName);
    if (kafkaStreams == null) {
      throw new RuntimeException(
          "Requested stream not available. Available streams: " + streams.toString());
    }
    if (kafkaStreams.size() != 1) {
      throw new RuntimeException(
          "Requested 1 stream from Kafka, bot got " + kafkaStreams.size() + " streams");
    }
    LOG.info(
        "Opening Consumer instance for topic '{}' on group '{}'",
        topicName,
        consumerConfig.groupId());
    this.iteratorToRead = kafkaStreams.get(0).iterator();
    this.consumer = consumer;

    zkClient =
        new ZkClient(
            consumerConfig.zkConnect(),
            consumerConfig.zkSessionTimeoutMs(),
            consumerConfig.zkConnectionTimeoutMs(),
            new KafkaZKStringSerializer());

    // most likely the number of offsets we're going to store here will be lower than the number of
    // partitions.
    int numPartitions = getNumberOfPartitions();
    LOG.debug("The topic {} has {} partitions", topicName, numPartitions);
    this.lastOffsets =
        getRuntimeContext().getOperatorState("offset", new long[numPartitions], false);
    this.commitedOffsets = new long[numPartitions];
    // check if there are offsets to restore
    if (!Arrays.equals(lastOffsets.getState(), new long[numPartitions])) {
      if (lastOffsets.getState().length != numPartitions) {
        throw new IllegalStateException(
            "There are "
                + lastOffsets.getState().length
                + " offsets to restore for topic "
                + topicName
                + " but "
                + "there are only "
                + numPartitions
                + " in the topic");
      }

      LOG.info("Setting restored offsets {} in ZooKeeper", Arrays.toString(lastOffsets.getState()));
      setOffsetsInZooKeeper(lastOffsets.getState());
    } else {
      // initialize empty offsets
      Arrays.fill(this.lastOffsets.getState(), -1);
    }
    Arrays.fill(this.commitedOffsets, 0); // just to make it clear

    running = true;
  }
Пример #14
0
 public Consumer(String topic) {
   consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());
   this.topic = topic;
 }