@After
  public void tearDown() throws Exception {
    if (restApp != null) {
      restApp.stop();
    }

    if (servers != null) {
      for (KafkaServer server : servers) {
        server.shutdown();
      }

      // Remove any persistent data
      for (KafkaServer server : servers) {
        for (String logDir : JavaConversions.asJavaCollection(server.config().logDirs())) {
          CoreUtils.rm(logDir);
        }
      }
    }

    if (zkUtils != null) {
      zkUtils.close();
    }

    if (zookeeper != null) {
      zookeeper.shutdown();
    }
  }
  @Override
  public void stop(boolean cleanUp) throws Exception {
    LOG.info("KAFKA: Stopping Kafka on port: " + kafkaPort);
    kafkaServer.shutdown();

    if (cleanUp) {
      cleanUp();
    }
  }
  @Test
  public void testKafkaConsumer09Read() throws IOException, StageException {
    int zkConnectionTimeout = 6000;
    int zkSessionTimeout = 6000;

    EmbeddedZookeeper zookeeper = new EmbeddedZookeeper();
    String zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
    ZkUtils zkUtils =
        ZkUtils.apply(
            zkConnect, zkSessionTimeout, zkConnectionTimeout, JaasUtils.isZkSecurityEnabled());

    int port = TestUtil.getFreePort();
    KafkaServer kafkaServer = TestUtil.createKafkaServer(port, zkConnect);

    final String topic = "TestKafkaConsumer09_1";
    final String message = "Hello StreamSets";

    Source.Context sourceContext =
        ContextInfoCreator.createSourceContext(
            "s", false, OnRecordError.TO_ERROR, ImmutableList.of("a"));

    Map<String, Object> props = new HashMap<>();
    props.put("auto.commit.interval.ms", "1000");
    props.put("auto.offset.reset", "earliest");
    props.put("session.timeout.ms", "30000");
    SdcKafkaConsumer sdcKafkaConsumer =
        createSdcKafkaConsumer("localhost:" + port, topic, 1000, sourceContext, props, "test");
    sdcKafkaConsumer.validate(new ArrayList<Stage.ConfigIssue>(), sourceContext);
    sdcKafkaConsumer.init();

    // produce some messages to topic
    produce(topic, "localhost:" + port, message);

    // read
    List<MessageAndOffset> read = new ArrayList<>();
    while (read.size() < 10) {
      MessageAndOffset messageAndOffset = sdcKafkaConsumer.read();
      if (messageAndOffset != null) {
        read.add(messageAndOffset);
      }
    }
    // verify
    Assert.assertNotNull(read);
    Assert.assertEquals(10, read.size());
    verify(read, message);

    // delete topic and shutdown
    AdminUtils.deleteTopic(zkUtils, topic);
    kafkaServer.shutdown();
    zookeeper.shutdown();
  }
 /** Stop the broker. */
 public void stop() {
   log.debug(
       "Shutting down embedded Kafka broker at {} (with ZK ensemble at {}) ...",
       brokerList(),
       zookeeperConnect());
   kafka.shutdown();
   kafka.awaitShutdown();
   log.debug("Removing logs.dir at {} ...", logDir);
   List<String> logDirs = Collections.singletonList(logDir.getAbsolutePath());
   CoreUtils.delete(scala.collection.JavaConversions.asScalaBuffer(logDirs).seq());
   tmpFolder.delete();
   log.debug(
       "Shutdown of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...",
       brokerList(),
       zookeeperConnect());
 }
Example #5
0
 public void shutdown() throws Exception {
   log.info("Shutting down Kafka server");
   // https://issues.apache.org/jira/browse/KAFKA-1887
   server.kafkaController().shutdown();
   server.shutdown();
   server.awaitShutdown();
   final Seq<String> logDirs = server.config().logDirs();
   for (String f : JavaConversions.asJavaCollection(logDirs)) {
     try {
       deleteRecursively(new File(f));
     } catch (IOException e) {
       log.warn("Cannot delete file: " + f, e.getMessage());
     }
   }
   ;
   zookeeper.shutdown();
   awaitCond(aVoid -> !zookeeper.isRunning(), 2000, 100);
   log.info("ZooKeeper server shut down.");
   Thread.sleep(2000);
 }
  @Override
  public void shutdown() {
    if (brokers != null) {
      for (KafkaServer broker : brokers) {
        if (broker != null) {
          broker.shutdown();
        }
      }
      brokers.clear();
    }

    if (zookeeper != null) {
      try {
        zookeeper.stop();
        zookeeper.close();
      } catch (Exception e) {
        LOG.warn("ZK.stop() failed", e);
      }
      zookeeper = null;
    }

    // clean up the temp spaces

    if (tmpKafkaParent != null && tmpKafkaParent.exists()) {
      try {
        FileUtils.deleteDirectory(tmpKafkaParent);
      } catch (Exception e) {
        // ignore
      }
    }
    if (tmpZkDir != null && tmpZkDir.exists()) {
      try {
        FileUtils.deleteDirectory(tmpZkDir);
      } catch (Exception e) {
        // ignore
      }
    }
  }
Example #7
0
 public void shutdown() {
   for (KafkaServer broker : brokers) {
     broker.shutdown();
   }
   zookeeper.shutdown();
 }
Example #8
0
 /** Shuts down the local kafka instance */
 public void stop() {
   kafka_server.shutdown();
   logger.debug("local kafka is a stop");
 }
 public void stop() {
   server.shutdown();
 }
 public void stopKafkaServer() {
   kserver.shutdown();
   kserver.awaitShutdown();
   Utils.rm(kafkalogdir);
 }