public void createTopic( String topic, int numPartitions, int replicationFactor, Properties topicConfig) { if (!AdminUtils.topicExists(server.zkClient(), topic)) { AdminUtils.createTopic( server.zkClient(), topic, numPartitions, replicationFactor, topicConfig); awaitPropagation(topic, 0, 2000l); } }
@Override public void createTestTopic( String topic, int numberOfPartitions, int replicationFactor, Properties topicConfig) { // create topic with one client LOG.info("Creating topic {}", topic); ZkClient creator = createZkClient(); AdminUtils.createTopic(creator, topic, numberOfPartitions, replicationFactor, topicConfig); creator.close(); // validate that the topic has been created final long deadline = System.currentTimeMillis() + 30000; do { try { Thread.sleep(100); } catch (InterruptedException e) { // restore interrupted state } List<KafkaTopicPartitionLeader> partitions = FlinkKafkaConsumer08.getPartitionsForTopic( Collections.singletonList(topic), standardProps); if (partitions != null && partitions.size() > 0) { return; } } while (System.currentTimeMillis() < deadline); fail("Test topic could not be created"); }
@Override public void deleteTestTopic(String topic) { LOG.info("Deleting topic {}", topic); ZkClient zk = createZkClient(); AdminUtils.deleteTopic(zk, topic); zk.close(); }
/** * Creates a Kafka topic if needed, or try to increase its partition count to the desired number. */ private Collection<Partition> ensureTopicCreated( final String topicName, final int numPartitions, int replicationFactor) { final int sessionTimeoutMs = 10000; final int connectionTimeoutMs = 10000; final ZkClient zkClient = new ZkClient(zkAddress, sessionTimeoutMs, connectionTimeoutMs, utf8Serializer); try { // The following is basically copy/paste from AdminUtils.createTopic() with // createOrUpdateTopicPartitionAssignmentPathInZK(..., update=true) final Properties topicConfig = new Properties(); Seq<Object> brokerList = ZkUtils.getSortedBrokerList(zkClient); final scala.collection.Map<Object, Seq<Object>> replicaAssignment = AdminUtils.assignReplicasToBrokers(brokerList, numPartitions, replicationFactor, -1, -1); retryOperations.execute( new RetryCallback<Object, RuntimeException>() { @Override public Object doWithRetry(RetryContext context) throws RuntimeException { AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK( zkClient, topicName, replicaAssignment, topicConfig, true); return null; } }); try { Collection<Partition> partitions = retryOperations.execute( new RetryCallback<Collection<Partition>, Exception>() { @Override public Collection<Partition> doWithRetry(RetryContext context) throws Exception { connectionFactory.refreshMetadata(Collections.singleton(topicName)); Collection<Partition> partitions = connectionFactory.getPartitions(topicName); if (partitions.size() < numPartitions) { throw new IllegalStateException( "The number of expected partitions was: " + numPartitions + ", but " + partitions.size() + " have been found instead"); } connectionFactory.getLeaders(partitions); return partitions; } }); return partitions; } catch (Exception e) { logger.error("Cannot initialize Binder", e); throw new BinderException("Cannot initialize binder:", e); } } finally { zkClient.close(); } }
@Test public void testKafkaConsumer09Read() throws IOException, StageException { int zkConnectionTimeout = 6000; int zkSessionTimeout = 6000; EmbeddedZookeeper zookeeper = new EmbeddedZookeeper(); String zkConnect = String.format("127.0.0.1:%d", zookeeper.port()); ZkUtils zkUtils = ZkUtils.apply( zkConnect, zkSessionTimeout, zkConnectionTimeout, JaasUtils.isZkSecurityEnabled()); int port = TestUtil.getFreePort(); KafkaServer kafkaServer = TestUtil.createKafkaServer(port, zkConnect); final String topic = "TestKafkaConsumer09_1"; final String message = "Hello StreamSets"; Source.Context sourceContext = ContextInfoCreator.createSourceContext( "s", false, OnRecordError.TO_ERROR, ImmutableList.of("a")); Map<String, Object> props = new HashMap<>(); props.put("auto.commit.interval.ms", "1000"); props.put("auto.offset.reset", "earliest"); props.put("session.timeout.ms", "30000"); SdcKafkaConsumer sdcKafkaConsumer = createSdcKafkaConsumer("localhost:" + port, topic, 1000, sourceContext, props, "test"); sdcKafkaConsumer.validate(new ArrayList<Stage.ConfigIssue>(), sourceContext); sdcKafkaConsumer.init(); // produce some messages to topic produce(topic, "localhost:" + port, message); // read List<MessageAndOffset> read = new ArrayList<>(); while (read.size() < 10) { MessageAndOffset messageAndOffset = sdcKafkaConsumer.read(); if (messageAndOffset != null) { read.add(messageAndOffset); } } // verify Assert.assertNotNull(read); Assert.assertEquals(10, read.size()); verify(read, message); // delete topic and shutdown AdminUtils.deleteTopic(zkUtils, topic); kafkaServer.shutdown(); zookeeper.shutdown(); }
@Override public int getLeaderToShutDown(String topic) throws Exception { ZkClient zkClient = createZkClient(); PartitionMetadata firstPart = null; do { if (firstPart != null) { LOG.info("Unable to find leader. error code {}", firstPart.errorCode()); // not the first try. Sleep a bit Thread.sleep(150); } Seq<PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient).partitionsMetadata(); firstPart = partitionMetadata.head(); } while (firstPart.errorCode() != 0); zkClient.close(); return firstPart.leader().get().id(); }
public void start() throws Exception { zkServer = new TestingServer(zkPort, true); zkClient = new ZkClient(zkServer.getConnectString(), 10000, 10000, ZKStringSerializer$.MODULE$); File logs = Files.createTempDirectory("kafka_tmp").toFile(); logs.deleteOnExit(); LOGGER.debug("Created temp log dir: {}", logs.getAbsolutePath()); Properties serverProperties = new Properties(); serverProperties.put("zookeeper.connect", zkServer.getConnectString()); serverProperties.put("broker.id", "1"); serverProperties.put("host.name", "localhost"); serverProperties.put("port", String.valueOf(kafkaPort)); serverProperties.put("log.dir", logs.getAbsolutePath()); serverProperties.put("log.flush.interval.messages", "1"); kafkaServer = new KafkaServerStartable(new KafkaConfig(serverProperties)); kafkaServer.startup(); AdminUtils.createTopic(zkClient, topicName, topicPartitions, 1, new Properties()); }
public static void main(String[] args) { EmbeddedZookeeper embeddedZookeeper = new EmbeddedZookeeper(MyConstants.KAFKA_ZK_PORT); List<Integer> kafkaPorts = new ArrayList<Integer>(); // -1 for any available port kafkaPorts.add(9092); EmbeddedKafkaCluster embeddedKafkaCluster = new EmbeddedKafkaCluster(embeddedZookeeper.getConnection(), new Properties(), kafkaPorts); try { embeddedZookeeper.startup(); } catch (IOException e) { e.printStackTrace(); } System.out.println("### Embedded Zookeeper connection: " + embeddedZookeeper.getConnection()); embeddedKafkaCluster.startup(); System.out.println( "### Embedded Kafka cluster broker list: " + embeddedKafkaCluster.getBrokerList()); // embeddedKafkaCluster.shutdown(); // embeddedZookeeper.shutdown(); // Create a ZooKeeper client int sessionTimeoutMs = 10000; int connectionTimeoutMs = 10000; ZkClient zkClient = new ZkClient( "localhost:" + MyConstants.KAFKA_ZK_PORT, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$); // Create a topic named "gps" with N partition and a replication factor of 1 String topicName = MyConstants.TOPIC_NAME; int numPartitions = MyConstants.NUM_PARTITIONS; int replicationFactor = 1; Properties topicConfig = new Properties(); AdminUtils.createTopic(zkClient, topicName, numPartitions, replicationFactor, topicConfig); }
/** * Create a Kafka topic with the given parameters. * * @param topic The name of the topic. * @param partitions The number of partitions for this topic. * @param replication The replication factor for (partitions of) this topic. * @param topicConfig Additional topic-level configuration settings. */ public void createTopic(String topic, int partitions, int replication, Properties topicConfig) { log.debug( "Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", topic, partitions, replication, topicConfig); // Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then // createTopic() will only seem to work (it will return without error). The topic will exist in // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the // topic. ZkClient zkClient = new ZkClient( zookeeperConnect(), DEFAULT_ZK_SESSION_TIMEOUT_MS, DEFAULT_ZK_CONNECTION_TIMEOUT_MS, ZKStringSerializer$.MODULE$); boolean isSecure = false; ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure); AdminUtils.createTopic( zkUtils, topic, partitions, replication, topicConfig, RackAwareMode.Enforced$.MODULE$); zkClient.close(); }
public boolean isTopicAvailable() { return AdminUtils.topicExists(zkClient, topicName); }
public void createTopic(String topicName, int topicPartitions) { AdminUtils.createTopic(zkClient, topicName, topicPartitions, 1, new Properties()); LOGGER.debug("Created topic '{}' with {} partitions", topicName, topicPartitions); }