@Before
  public void setUp() throws Exception {
    zookeeper = new EmbeddedZookeeper();
    zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
    zkUtils =
        ZkUtils.apply(
            zkConnect, zkSessionTimeout, zkConnectionTimeout, JaasUtils.isZkSecurityEnabled());
    zkClient = zkUtils.zkClient();

    configs = new Vector<>();
    servers = new Vector<>();
    for (int i = 0; i < numBrokers; i++) {
      final Option<java.io.File> noFile = scala.Option.apply(null);
      final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
      Properties props =
          TestUtils.createBrokerConfig(
              i,
              zkConnect,
              false,
              false,
              TestUtils.RandomPort(),
              noInterBrokerSecurityProtocol,
              noFile,
              true,
              false,
              TestUtils.RandomPort(),
              false,
              TestUtils.RandomPort(),
              false,
              TestUtils.RandomPort());
      props.setProperty("auto.create.topics.enable", "true");
      props.setProperty("num.partitions", "1");
      // We *must* override this to use the port we allocated (Kafka currently allocates one port
      // that it always uses for ZK
      props.setProperty("zookeeper.connect", this.zkConnect);
      KafkaConfig config = new KafkaConfig(props);
      configs.add(config);

      KafkaServer server = TestUtils.createServer(config, SystemTime$.MODULE$);
      servers.add(server);
    }

    brokerList =
        TestUtils.getBrokerListStrFromServers(
            JavaConversions.asScalaBuffer(servers), SecurityProtocol.PLAINTEXT);

    if (setupRestApp) {
      restApp = new RestApp(choosePort(), zkConnect, KAFKASTORE_TOPIC, compatibilityType);
      restApp.start();
    }
  }
  @After
  public void tearDown() throws Exception {
    if (restApp != null) {
      restApp.stop();
    }

    if (servers != null) {
      for (KafkaServer server : servers) {
        server.shutdown();
      }

      // Remove any persistent data
      for (KafkaServer server : servers) {
        for (String logDir : JavaConversions.asJavaCollection(server.config().logDirs())) {
          CoreUtils.rm(logDir);
        }
      }
    }

    if (zkUtils != null) {
      zkUtils.close();
    }

    if (zookeeper != null) {
      zookeeper.shutdown();
    }
  }
Пример #3
0
 public static long getOffset(ZkClient zkClient, String groupId, String topic, int partition) {
   TopicAndPartition tap = new TopicAndPartition(topic, partition);
   ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupId, tap.topic());
   scala.Tuple2<String, Stat> data =
       ZkUtils.readData(zkClient, topicDirs.consumerOffsetDir() + "/" + tap.partition());
   return Long.valueOf(data._1());
 }
  /**
   * Creates a Kafka topic if needed, or try to increase its partition count to the desired number.
   */
  private Collection<Partition> ensureTopicCreated(
      final String topicName, final int numPartitions, int replicationFactor) {

    final int sessionTimeoutMs = 10000;
    final int connectionTimeoutMs = 10000;
    final ZkClient zkClient =
        new ZkClient(zkAddress, sessionTimeoutMs, connectionTimeoutMs, utf8Serializer);
    try {
      // The following is basically copy/paste from AdminUtils.createTopic() with
      // createOrUpdateTopicPartitionAssignmentPathInZK(..., update=true)
      final Properties topicConfig = new Properties();
      Seq<Object> brokerList = ZkUtils.getSortedBrokerList(zkClient);
      final scala.collection.Map<Object, Seq<Object>> replicaAssignment =
          AdminUtils.assignReplicasToBrokers(brokerList, numPartitions, replicationFactor, -1, -1);
      retryOperations.execute(
          new RetryCallback<Object, RuntimeException>() {

            @Override
            public Object doWithRetry(RetryContext context) throws RuntimeException {
              AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(
                  zkClient, topicName, replicaAssignment, topicConfig, true);
              return null;
            }
          });
      try {
        Collection<Partition> partitions =
            retryOperations.execute(
                new RetryCallback<Collection<Partition>, Exception>() {

                  @Override
                  public Collection<Partition> doWithRetry(RetryContext context) throws Exception {
                    connectionFactory.refreshMetadata(Collections.singleton(topicName));
                    Collection<Partition> partitions = connectionFactory.getPartitions(topicName);
                    if (partitions.size() < numPartitions) {
                      throw new IllegalStateException(
                          "The number of expected partitions was: "
                              + numPartitions
                              + ", but "
                              + partitions.size()
                              + " have been found instead");
                    }
                    connectionFactory.getLeaders(partitions);
                    return partitions;
                  }
                });
        return partitions;
      } catch (Exception e) {
        logger.error("Cannot initialize Binder", e);
        throw new BinderException("Cannot initialize binder:", e);
      }

    } finally {
      zkClient.close();
    }
  }
Пример #5
0
 /** This method's code is based on ZookeeperConsumerConnector.commitOffsetToZooKeeper() */
 public static void setOffset(
     ZkClient zkClient, String groupId, String topic, int partition, long offset) {
   LOG.info(
       "Setting offset for partition {} of topic {} in group {} to offset {}",
       partition,
       topic,
       groupId,
       offset);
   TopicAndPartition tap = new TopicAndPartition(topic, partition);
   ZKGroupTopicDirs topicDirs = new ZKGroupTopicDirs(groupId, tap.topic());
   ZkUtils.updatePersistentPath(
       zkClient, topicDirs.consumerOffsetDir() + "/" + tap.partition(), Long.toString(offset));
 }
Пример #6
0
 private int getNumberOfPartitions() {
   scala.collection.immutable.List<String> scalaSeq =
       JavaConversions.asScalaBuffer(Collections.singletonList(topicName)).toList();
   scala.collection.mutable.Map<String, Seq<Object>> list =
       ZkUtils.getPartitionsForTopics(zkClient, scalaSeq);
   Option<Seq<Object>> topicOption = list.get(topicName);
   if (topicOption.isEmpty()) {
     throw new IllegalArgumentException(
         "Unable to get number of partitions for topic " + topicName + " from " + list.toString());
   }
   Seq<Object> topic = topicOption.get();
   return topic.size();
 }
  @Test
  public void testKafkaConsumer09Read() throws IOException, StageException {
    int zkConnectionTimeout = 6000;
    int zkSessionTimeout = 6000;

    EmbeddedZookeeper zookeeper = new EmbeddedZookeeper();
    String zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
    ZkUtils zkUtils =
        ZkUtils.apply(
            zkConnect, zkSessionTimeout, zkConnectionTimeout, JaasUtils.isZkSecurityEnabled());

    int port = TestUtil.getFreePort();
    KafkaServer kafkaServer = TestUtil.createKafkaServer(port, zkConnect);

    final String topic = "TestKafkaConsumer09_1";
    final String message = "Hello StreamSets";

    Source.Context sourceContext =
        ContextInfoCreator.createSourceContext(
            "s", false, OnRecordError.TO_ERROR, ImmutableList.of("a"));

    Map<String, Object> props = new HashMap<>();
    props.put("auto.commit.interval.ms", "1000");
    props.put("auto.offset.reset", "earliest");
    props.put("session.timeout.ms", "30000");
    SdcKafkaConsumer sdcKafkaConsumer =
        createSdcKafkaConsumer("localhost:" + port, topic, 1000, sourceContext, props, "test");
    sdcKafkaConsumer.validate(new ArrayList<Stage.ConfigIssue>(), sourceContext);
    sdcKafkaConsumer.init();

    // produce some messages to topic
    produce(topic, "localhost:" + port, message);

    // read
    List<MessageAndOffset> read = new ArrayList<>();
    while (read.size() < 10) {
      MessageAndOffset messageAndOffset = sdcKafkaConsumer.read();
      if (messageAndOffset != null) {
        read.add(messageAndOffset);
      }
    }
    // verify
    Assert.assertNotNull(read);
    Assert.assertEquals(10, read.size());
    verify(read, message);

    // delete topic and shutdown
    AdminUtils.deleteTopic(zkUtils, topic);
    kafkaServer.shutdown();
    zookeeper.shutdown();
  }