示例#1
0
 public void produce(SimplifiedLog message, String topicName) {
   if (producer == null)
     producer =
         KafkaUtils.createProducer(
             KafkaUtils.createZkClient(zkServer.getConnectString()), ASYNC, false);
   producer.send(new KeyedMessage<>(topicName, message.getHostName(), message));
   LOGGER.debug("Sent message: {}", message);
 }
示例#2
0
  public void start() throws Exception {
    zkServer = new TestingServer(zkPort, true);
    zkClient = KafkaUtils.createZkClient(zkServer.getConnectString());

    kafkaServer = new KafkaServerStartable(new KafkaConfig(getServerProperties()));
    kafkaServer.startup();
    LOGGER.debug("Started Kafka server at port {}", kafkaPort);
  }
示例#3
0
  public List<SimplifiedLog> consume(String topicName, int topicPartitions, int expectedMsg)
      throws InterruptedException {
    ConsumerConnector consumer =
        KafkaUtils.createConsumer(zkServer.getConnectString(), "test_group", "1");
    List<KafkaStream<String, SimplifiedLog>> streams =
        KafkaUtils.getConsumerStreams(consumer, topicName, topicPartitions);

    List<Callable<List<SimplifiedLog>>> tasks = new ArrayList<>();
    streams.forEach(stream -> tasks.add(createConsumerThread(stream.iterator(), expectedMsg)));

    ExecutorService executor = Executors.newFixedThreadPool(streams.size());
    List<Future<List<SimplifiedLog>>> futures =
        executor.invokeAll(tasks, 5 * expectedMsg, TimeUnit.SECONDS);

    List<SimplifiedLog> received = getResultsFromFutures(futures);
    consumer.shutdown();
    return received;
  }
  public void start() {
    List<String> zkServers = _consumerConfig.zkServers;
    String servers = Joiner.on(",").join(zkServers);
    _state = new ZkState(servers, _consumerConfig.zkRoot);
    _connections =
        new DynamicPartitionConnections(
            _consumerConfig, KafkaUtils.makeBrokerReader(_consumerConfig));

    // using TransactionalState like this is a hack
    _coordinator =
        new ZkCoordinator(
            _connections, _consumerConfig, _state, _taskIndex, _totalTasks, _uuid, _site);

    Thread t = new Thread(new Worker());
    t.start();
  }
  @Test
  public void testKafkaStream() throws InterruptedException {
    final String topic1 = "topic1";
    final String topic2 = "topic2";
    // hold a reference to the current offset ranges, so it can be used downstream
    final AtomicReference<OffsetRange[]> offsetRanges = new AtomicReference<>();

    String[] topic1data = createTopicAndSendData(topic1);
    String[] topic2data = createTopicAndSendData(topic2);

    Set<String> sent = new HashSet<>();
    sent.addAll(Arrays.asList(topic1data));
    sent.addAll(Arrays.asList(topic2data));

    Map<String, String> kafkaParams = new HashMap<>();
    kafkaParams.put("metadata.broker.list", kafkaTestUtils.brokerAddress());
    kafkaParams.put("auto.offset.reset", "smallest");

    JavaDStream<String> stream1 =
        KafkaUtils.createDirectStream(
                ssc,
                String.class,
                String.class,
                StringDecoder.class,
                StringDecoder.class,
                kafkaParams,
                topicToSet(topic1))
            .transformToPair(
                // Make sure you can get offset ranges from the rdd
                new Function<JavaPairRDD<String, String>, JavaPairRDD<String, String>>() {
                  @Override
                  public JavaPairRDD<String, String> call(JavaPairRDD<String, String> rdd) {
                    OffsetRange[] offsets = ((HasOffsetRanges) rdd.rdd()).offsetRanges();
                    offsetRanges.set(offsets);
                    Assert.assertEquals(topic1, offsets[0].topic());
                    return rdd;
                  }
                })
            .map(
                new Function<Tuple2<String, String>, String>() {
                  @Override
                  public String call(Tuple2<String, String> kv) {
                    return kv._2();
                  }
                });

    JavaDStream<String> stream2 =
        KafkaUtils.createDirectStream(
            ssc,
            String.class,
            String.class,
            StringDecoder.class,
            StringDecoder.class,
            String.class,
            kafkaParams,
            topicOffsetToMap(topic2, 0L),
            new Function<MessageAndMetadata<String, String>, String>() {
              @Override
              public String call(MessageAndMetadata<String, String> msgAndMd) {
                return msgAndMd.message();
              }
            });
    JavaDStream<String> unifiedStream = stream1.union(stream2);

    final Set<String> result = Collections.synchronizedSet(new HashSet<String>());
    unifiedStream.foreachRDD(
        new VoidFunction<JavaRDD<String>>() {
          @Override
          public void call(JavaRDD<String> rdd) {
            result.addAll(rdd.collect());
            for (OffsetRange o : offsetRanges.get()) {
              System.out.println(
                  o.topic() + " " + o.partition() + " " + o.fromOffset() + " " + o.untilOffset());
            }
          }
        });
    ssc.start();
    long startTime = System.currentTimeMillis();
    boolean matches = false;
    while (!matches && System.currentTimeMillis() - startTime < 20000) {
      matches = sent.size() == result.size();
      Thread.sleep(50);
    }
    Assert.assertEquals(sent, result);
    ssc.stop();
  }