public void shutdown() throws IOException { kafka.shutdown(); if (zookeeper.getState().equals(CuratorFrameworkState.STARTED)) { zookeeper.close(); } server.close(); FileUtils.deleteQuietly(new File(getLogDir())); }
public void start() throws Exception { zkServer = new TestingServer(zkPort, true); zkClient = KafkaUtils.createZkClient(zkServer.getConnectString()); kafkaServer = new KafkaServerStartable(new KafkaConfig(getServerProperties())); kafkaServer.startup(); LOGGER.debug("Started Kafka server at port {}", kafkaPort); }
public void stop() throws IOException { if (zkClient != null) { zkClient.close(); } if (kafkaServer != null) { kafkaServer.shutdown(); } if (zkServer != null) { zkServer.stop(); } }
@AfterClass public static void shutdown() { kafkaGateway.stop(); pluginRegistry.stop(); simpleKafkaGateway.stop(); pluginRegistry2.stop(); try { if (kafkaServer != null) { kafkaServer.shutdown(); kafkaServer.awaitShutdown(); } zkTestServer.shutdownNetwork(); } finally { try { FileUtils.deleteDirectory(kafkaLogFile); } catch (IOException e) { e.printStackTrace(); } } }
public KafkaTestBroker() { try { server = new TestingServer(); String zookeeperConnectionString = server.getConnectString(); ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3); zookeeper = CuratorFrameworkFactory.newClient(zookeeperConnectionString, retryPolicy); zookeeper.start(); port = InstanceSpec.getRandomPort(); kafka.server.KafkaConfig config = buildKafkaConfig(zookeeperConnectionString); kafka = new KafkaServerStartable(config); kafka.startup(); } catch (Exception ex) { throw new RuntimeException("Could not start test broker", ex); } }
public void start() throws Exception { zkServer = new TestingServer(zkPort, true); zkClient = new ZkClient(zkServer.getConnectString(), 10000, 10000, ZKStringSerializer$.MODULE$); File logs = Files.createTempDirectory("kafka_tmp").toFile(); logs.deleteOnExit(); LOGGER.debug("Created temp log dir: {}", logs.getAbsolutePath()); Properties serverProperties = new Properties(); serverProperties.put("zookeeper.connect", zkServer.getConnectString()); serverProperties.put("broker.id", "1"); serverProperties.put("host.name", "localhost"); serverProperties.put("port", String.valueOf(kafkaPort)); serverProperties.put("log.dir", logs.getAbsolutePath()); serverProperties.put("log.flush.interval.messages", "1"); kafkaServer = new KafkaServerStartable(new KafkaConfig(serverProperties)); kafkaServer.startup(); AdminUtils.createTopic(zkClient, topicName, topicPartitions, 1, new Properties()); }
public void stop() throws IOException { if (zkClient != null) zkClient.close(); if (kafkaServer != null) kafkaServer.shutdown(); if (zkServer != null) zkServer.stop(); LOGGER.debug("Zookeeper / Kafka services stopped!"); }
@BeforeClass public static void init() throws Exception { final ShutdownRegistryImpl shutdownRegistry = new ShutdownRegistryImpl(); try { zkTestServer = new ZooKeeperTestServer(0, shutdownRegistry, ZooKeeperTestServer.DEFAULT_SESSION_TIMEOUT); port = zkTestServer.startNetwork(); } catch (Exception e) { throw new RuntimeException(e); } Properties kafkaProps = new Properties(); kafkaProps.setProperty("num.partitions", "1"); kafkaProps.setProperty("port", "9092"); kafkaProps.setProperty("broker.id", "0"); kafkaProps.setProperty("log.dir", "/tmp/sensei-gateway-test-kafka-logs"); // override to the local running zk server kafkaProps.setProperty("zookeeper.connect", "localhost:" + port); kafkaLogFile = new File(kafkaProps.getProperty("log.dir")); FileUtils.deleteDirectory(kafkaLogFile); KafkaConfig kafkaConfig = new KafkaConfig(kafkaProps); kafkaServer = new KafkaServerStartable(kafkaConfig); kafkaServer.startup(); Configuration config = new PropertiesConfiguration(); config.addProperty( "sensei.gateway.class", "com.senseidb.gateway.kafka.KafkaDataProviderBuilder"); config.addProperty("sensei.gateway.kafka.group.id", "1"); config.addProperty("sensei.gateway.kafka.zookeeper.connect", "localhost:" + port); config.addProperty("sensei.gateway.kafka.auto.offset.reset", "smallest"); config.addProperty("sensei.gateway.kafka.topic", "test"); config.addProperty("sensei.gateway.provider.batchSize", "1"); pluginRegistry = SenseiPluginRegistry.build(config); pluginRegistry.start(); kafkaGateway = pluginRegistry.getBeanByFullPrefix("sensei.gateway", SenseiGateway.class); kafkaGateway.start(); config = new PropertiesConfiguration(); config.addProperty("sensei.gateway.class", "com.senseidb.gateway.kafka.SimpleKafkaGateway"); config.addProperty("sensei.gateway.kafka.host", "localhost"); config.addProperty("sensei.gateway.kafka.port", "9092"); config.addProperty("sensei.gateway.kafka.topic", "test"); config.addProperty("sensei.gateway.kafka.timeout", "3000"); config.addProperty("sensei.gateway.provider.batchSize", "1"); pluginRegistry2 = SenseiPluginRegistry.build(config); pluginRegistry2.start(); simpleKafkaGateway = pluginRegistry2.getBeanByFullPrefix("sensei.gateway", SenseiGateway.class); simpleKafkaGateway.start(); Properties props = new Properties(); props.put("metadata.broker.list", "localhost:9092"); props.put("serializer.class", "kafka.serializer.StringEncoder"); ProducerConfig producerConfig = new ProducerConfig(props); Producer<String, String> kafkaProducer = new Producer<String, String>(producerConfig); for (JSONObject jsonObj : BaseGatewayTestUtil.dataList) { KeyedMessage<String, String> data = new KeyedMessage<String, String>("test", jsonObj.toString()); kafkaProducer.send(data); } }