@After
  public void tearDown() throws Exception {
    if (restApp != null) {
      restApp.stop();
    }

    if (servers != null) {
      for (KafkaServer server : servers) {
        server.shutdown();
      }

      // Remove any persistent data
      for (KafkaServer server : servers) {
        for (String logDir : JavaConversions.asJavaCollection(server.config().logDirs())) {
          CoreUtils.rm(logDir);
        }
      }
    }

    if (zkUtils != null) {
      zkUtils.close();
    }

    if (zookeeper != null) {
      zookeeper.shutdown();
    }
  }
Esempio n. 2
0
 public void createTopic(
     String topic, int numPartitions, int replicationFactor, Properties topicConfig) {
   if (!AdminUtils.topicExists(server.zkClient(), topic)) {
     AdminUtils.createTopic(
         server.zkClient(), topic, numPartitions, replicationFactor, topicConfig);
     awaitPropagation(topic, 0, 2000l);
   }
 }
  @Test
  public void testKafkaConsumer09Read() throws IOException, StageException {
    int zkConnectionTimeout = 6000;
    int zkSessionTimeout = 6000;

    EmbeddedZookeeper zookeeper = new EmbeddedZookeeper();
    String zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
    ZkUtils zkUtils =
        ZkUtils.apply(
            zkConnect, zkSessionTimeout, zkConnectionTimeout, JaasUtils.isZkSecurityEnabled());

    int port = TestUtil.getFreePort();
    KafkaServer kafkaServer = TestUtil.createKafkaServer(port, zkConnect);

    final String topic = "TestKafkaConsumer09_1";
    final String message = "Hello StreamSets";

    Source.Context sourceContext =
        ContextInfoCreator.createSourceContext(
            "s", false, OnRecordError.TO_ERROR, ImmutableList.of("a"));

    Map<String, Object> props = new HashMap<>();
    props.put("auto.commit.interval.ms", "1000");
    props.put("auto.offset.reset", "earliest");
    props.put("session.timeout.ms", "30000");
    SdcKafkaConsumer sdcKafkaConsumer =
        createSdcKafkaConsumer("localhost:" + port, topic, 1000, sourceContext, props, "test");
    sdcKafkaConsumer.validate(new ArrayList<Stage.ConfigIssue>(), sourceContext);
    sdcKafkaConsumer.init();

    // produce some messages to topic
    produce(topic, "localhost:" + port, message);

    // read
    List<MessageAndOffset> read = new ArrayList<>();
    while (read.size() < 10) {
      MessageAndOffset messageAndOffset = sdcKafkaConsumer.read();
      if (messageAndOffset != null) {
        read.add(messageAndOffset);
      }
    }
    // verify
    Assert.assertNotNull(read);
    Assert.assertEquals(10, read.size());
    verify(read, message);

    // delete topic and shutdown
    AdminUtils.deleteTopic(zkUtils, topic);
    kafkaServer.shutdown();
    zookeeper.shutdown();
  }
 @Override
 public void start() throws Exception {
   LOG.info("KAFKA: Starting Kafka on port: " + kafkaPort);
   configure();
   kafkaServer = new KafkaServer(kafkaConfig, new LocalSystemTime());
   kafkaServer.startup();
 }
Esempio n. 5
0
  public EmbeddedKafka(Map<String, String> customProps) throws Exception {
    super();
    Map<String, String> defaultProps = Maps.newHashMap();
    defaultProps.put("broker.id", "0");
    defaultProps.put("host.name", "127.0.0.1");
    defaultProps.put("port", "9092");
    defaultProps.put("advertised.host.name", "127.0.0.1");
    defaultProps.put("advertised.port", "9092");
    defaultProps.put("log.dir", createTempDir().getAbsolutePath());
    defaultProps.put("zookeeper.connect", package$.MODULE$.ZookeeperConnectionString());
    defaultProps.put("replica.high.watermark.checkpoint.interval.ms", "5000");
    defaultProps.put("log.flush.interval.messages", "1");
    defaultProps.put("replica.socket.timeout.ms", "500");
    defaultProps.put("controlled.shutdown.enable", "false");
    defaultProps.put("auto.leader.rebalance.enable", "false");

    Properties props = new Properties();
    props.putAll(defaultProps);
    props.putAll(customProps);

    final KafkaConfig kafkaConfig = new KafkaConfig(props);

    zookeeper = new EmbeddedZookeeper((String) props.get("zookeeper.connect"));
    awaitCond(aVoid -> zookeeper.isRunning(), 3000, 100);

    server = new KafkaServer(kafkaConfig, SystemTime$.MODULE$);
    Thread.sleep(2000);

    log.info("Starting the Kafka server at {}", kafkaConfig.zkConnect());
    server.startup();
    Thread.sleep(2000);
  }
 /** Stop the broker. */
 public void stop() {
   log.debug(
       "Shutting down embedded Kafka broker at {} (with ZK ensemble at {}) ...",
       brokerList(),
       zookeeperConnect());
   kafka.shutdown();
   kafka.awaitShutdown();
   log.debug("Removing logs.dir at {} ...", logDir);
   List<String> logDirs = Collections.singletonList(logDir.getAbsolutePath());
   CoreUtils.delete(scala.collection.JavaConversions.asScalaBuffer(logDirs).seq());
   tmpFolder.delete();
   log.debug(
       "Shutdown of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...",
       brokerList(),
       zookeeperConnect());
 }
  @Override
  public void stop(boolean cleanUp) throws Exception {
    LOG.info("KAFKA: Stopping Kafka on port: " + kafkaPort);
    kafkaServer.shutdown();

    if (cleanUp) {
      cleanUp();
    }
  }
  /** Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed) */
  protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
    LOG.info("Starting broker with id {}", brokerId);
    Properties kafkaProperties = new Properties();

    // properties have to be Strings
    kafkaProperties.put("advertised.host.name", KAFKA_HOST);
    kafkaProperties.put("broker.id", Integer.toString(brokerId));
    kafkaProperties.put("log.dir", tmpFolder.toString());
    kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
    kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
    kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));

    // for CI stability, increase zookeeper session timeout
    kafkaProperties.put("zookeeper.session.timeout.ms", "30000");
    kafkaProperties.put("zookeeper.connection.timeout.ms", "30000");
    if (additionalServerProperties != null) {
      kafkaProperties.putAll(additionalServerProperties);
    }

    final int numTries = 5;

    for (int i = 1; i <= numTries; i++) {
      int kafkaPort = NetUtils.getAvailablePort();
      kafkaProperties.put("port", Integer.toString(kafkaPort));
      KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

      try {
        KafkaServer server = new KafkaServer(kafkaConfig, new KafkaLocalSystemTime());
        server.startup();
        return server;
      } catch (KafkaException e) {
        if (e.getCause() instanceof BindException) {
          // port conflict, retry...
          LOG.info("Port conflict when starting Kafka Broker. Retrying...");
        } else {
          throw e;
        }
      }
    }

    throw new Exception(
        "Could not start Kafka after " + numTries + " retries due to port conflicts.");
  }
 public void start() throws Exception {
   ServiceConfig config = getServiceConfig();
   String kafkaConfigPath = config.getParameter("kafkaConfigPath", null);
   Properties props = loadKafkaProperties(kafkaConfigPath);
   String logDir = props.getProperty("log.dirs");
   logDir = logDir.replace("/", File.separator);
   props.setProperty("log.dirs", logDir);
   server = new KafkaServer(new KafkaConfig(props), new SystemTime());
   server.startup();
 }
Esempio n. 10
0
 public void shutdown() throws Exception {
   log.info("Shutting down Kafka server");
   // https://issues.apache.org/jira/browse/KAFKA-1887
   server.kafkaController().shutdown();
   server.shutdown();
   server.awaitShutdown();
   final Seq<String> logDirs = server.config().logDirs();
   for (String f : JavaConversions.asJavaCollection(logDirs)) {
     try {
       deleteRecursively(new File(f));
     } catch (IOException e) {
       log.warn("Cannot delete file: " + f, e.getMessage());
     }
   }
   ;
   zookeeper.shutdown();
   awaitCond(aVoid -> !zookeeper.isRunning(), 2000, 100);
   log.info("ZooKeeper server shut down.");
   Thread.sleep(2000);
 }
  @Override
  public void shutdown() {
    if (brokers != null) {
      for (KafkaServer broker : brokers) {
        if (broker != null) {
          broker.shutdown();
        }
      }
      brokers.clear();
    }

    if (zookeeper != null) {
      try {
        zookeeper.stop();
        zookeeper.close();
      } catch (Exception e) {
        LOG.warn("ZK.stop() failed", e);
      }
      zookeeper = null;
    }

    // clean up the temp spaces

    if (tmpKafkaParent != null && tmpKafkaParent.exists()) {
      try {
        FileUtils.deleteDirectory(tmpKafkaParent);
      } catch (Exception e) {
        // ignore
      }
    }
    if (tmpZkDir != null && tmpZkDir.exists()) {
      try {
        FileUtils.deleteDirectory(tmpZkDir);
      } catch (Exception e) {
        // ignore
      }
    }
  }
Esempio n. 12
0
  /**
   * Creates an instance of kafka on the given broker_port using the supplied zookeeper.
   *
   * @param zookeeper_connection
   * @param broker_port
   * @throws IOException
   */
  protected void setupServer(final String zookeeper_connection, final int broker_port)
      throws IOException {
    Properties props = new Properties();
    this.broker_port = broker_port;
    props.put("port", "" + broker_port);
    props.put("broker.id", "1");
    // System.getProperty("java.io.tmpdir") + File.pathSeparator + "kafka_local_temp_" +
    // System.currentTimeMillis());
    props.put(
        "log.dir",
        File.createTempFile("kafka_local_temp_", "").getAbsolutePath() + File.pathSeparator);
    logger.debug("MockKafkaBroker log dir is: " + props.getProperty("log.dir"));
    String zk = zookeeper_connection;
    logger.debug("ZOOKEEPER: " + zk);
    props.put("zookeeper.connect", zk);
    props.put("auto.create.topics.enable", "true");
    props.put("delete.topic.enable", "true");
    KafkaConfig config = new KafkaConfig(props);

    // NOTE: scala version won't work here for some reason, copied same implementation as {@link
    // kafka.utils.SystemTime}
    kafka_server =
        new KafkaServer(
            config,
            new Time() {

              @Override
              public void sleep(long arg0) {
                try {
                  Thread.sleep(arg0);
                } catch (InterruptedException e) {
                  e.printStackTrace();
                }
              }

              @Override
              public long nanoseconds() {
                return System.nanoTime();
              }

              @Override
              public long milliseconds() {
                return System.currentTimeMillis();
              }
            });
    kafka_server.startup();
    logger.debug("local kafka is a go");
  }
Esempio n. 13
0
 private void awaitPropagation(String topic, int partition, long timeout) {
   awaitCond(
       aVoid ->
           server
               .apis()
               .metadataCache()
               .getPartitionInfo(topic, partition)
               .exists(
                   new AbstractFunction1<PartitionStateInfo, Object>() {
                     @Override
                     public Object apply(PartitionStateInfo info) {
                       return info.leaderIsrAndControllerEpoch().leaderAndIsr().leader() >= 0;
                     }
                   }),
       timeout,
       100);
 }
Esempio n. 14
0
  public void startKafkaServer() {
    Properties props = new Properties();
    if (useZookeeper) {
      props.setProperty("enable.zookeeper", "true");
      props.setProperty("zk.connect", "localhost:2182");
      props.setProperty("topic", "topic1");
      props.setProperty(
          "log.flush.interval",
          "10"); // Controls the number of messages accumulated in each topic (partition) before the
                 // data is flushed to disk and made available to consumers.
      //   props.setProperty("log.default.flush.scheduler.interval.ms", "100");  // optional if we
      // have the flush.interval
    } else {
      props.setProperty("enable.zookeeper", "false");
      props.setProperty("hostname", "localhost");
      props.setProperty("port", "2182");
    }
    props.setProperty("brokerid", "1");
    props.setProperty("log.dir", kafkalogdir);

    kserver = new KafkaServer(new KafkaConfig(props));
    kserver.startup();
  }
Esempio n. 15
0
 private static KafkaServer startBroker(Properties props) {
   KafkaServer server = new KafkaServer(new KafkaConfig(props), new SystemTime());
   server.startup();
   return server;
 }
Esempio n. 16
0
 public void shutdown() {
   for (KafkaServer broker : brokers) {
     broker.shutdown();
   }
   zookeeper.shutdown();
 }
 @Override
 public int getBrokerId(KafkaServer server) {
   return server.socketServer().brokerId();
 }
 public void stop() {
   server.shutdown();
 }
Esempio n. 19
0
 public void stopKafkaServer() {
   kserver.shutdown();
   kserver.awaitShutdown();
   Utils.rm(kafkalogdir);
 }
Esempio n. 20
0
 /** Shuts down the local kafka instance */
 public void stop() {
   kafka_server.shutdown();
   logger.debug("local kafka is a stop");
 }
 /**
  * This broker's `metadata.broker.list` value. Example: `127.0.0.1:9092`.
  *
  * <p>You can use this to tell Kafka producers and consumers how to connect to this instance.
  */
 public String brokerList() {
   return kafka.config().hostName() + ":" + kafka.boundPort(SecurityProtocol.PLAINTEXT);
 }