@Test
 public void testInitialization() {
   KafkaStore<String, String> kafkaStore =
       StoreUtils.createAndInitSSLKafkaStoreInstance(
           zkConnect, zkClient, clientSslConfigs, requireSSLClientAuth());
   kafkaStore.close();
 }
  @Test(expected = TimeoutException.class)
  public void testInitializationWithoutClientAuth() {
    KafkaStore<String, String> kafkaStore =
        StoreUtils.createAndInitSSLKafkaStoreInstance(zkConnect, zkClient, clientSslConfigs, false);
    kafkaStore.close();

    // TODO: make the timeout shorter so the test fails quicker.
  }
 @Test
 public void testDoubleInitialization() {
   KafkaStore<String, String> kafkaStore =
       StoreUtils.createAndInitSSLKafkaStoreInstance(
           zkConnect, zkClient, clientSslConfigs, requireSSLClientAuth());
   try {
     kafkaStore.init();
     fail("Kafka store repeated initialization should fail");
   } catch (StoreInitializationException e) {
     // this is expected
   }
   kafkaStore.close();
 }
 @Test
 public void testSimplePut() throws InterruptedException {
   KafkaStore<String, String> kafkaStore =
       StoreUtils.createAndInitSSLKafkaStoreInstance(
           zkConnect, zkClient, clientSslConfigs, requireSSLClientAuth());
   String key = "Kafka";
   String value = "Rocks";
   try {
     try {
       kafkaStore.put(key, value);
     } catch (StoreException e) {
       throw new RuntimeException("Kafka store put(Kafka, Rocks) operation failed", e);
     }
     String retrievedValue = null;
     try {
       retrievedValue = kafkaStore.get(key);
     } catch (StoreException e) {
       throw new RuntimeException("Kafka store get(Kafka) operation failed", e);
     }
     assertEquals("Retrieved value should match entered value", value, retrievedValue);
   } finally {
     kafkaStore.close();
   }
 }
  public KafkaStoreReaderThread(
      String bootstrapBrokers,
      String topic,
      String groupId,
      StoreUpdateHandler<K, V> storeUpdateHandler,
      Serializer<K, V> serializer,
      Store<K, V> localStore,
      K noopKey,
      SchemaRegistryConfig config) {
    super("kafka-store-reader-thread-" + topic, false); // this thread is not interruptible
    offsetUpdateLock = new ReentrantLock();
    offsetReachedThreshold = offsetUpdateLock.newCondition();
    this.topic = topic;
    this.groupId = groupId;
    this.storeUpdateHandler = storeUpdateHandler;
    this.serializer = serializer;
    this.localStore = localStore;
    this.noopKey = noopKey;

    Properties consumerProps = new Properties();
    consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, this.groupId);
    consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, "KafkaStore-reader-" + this.topic);

    consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
    consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    consumerProps.put(
        ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
        org.apache.kafka.common.serialization.ByteArrayDeserializer.class);
    consumerProps.put(
        ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
        org.apache.kafka.common.serialization.ByteArrayDeserializer.class);

    consumerProps.put(
        CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
        config.getString(SchemaRegistryConfig.KAFKASTORE_SECURITY_PROTOCOL_CONFIG));
    KafkaStore.addSecurityConfigsToClientProperties(config, consumerProps);

    this.consumer = new KafkaConsumer<>(consumerProps);

    List<PartitionInfo> partitions = this.consumer.partitionsFor(this.topic);
    if (partitions == null || partitions.size() < 1) {
      throw new IllegalArgumentException(
          "Unable to subscribe to the Kafka topic "
              + topic
              + " backing this data store. Topic may not exist.");
    } else if (partitions.size() > 1) {
      throw new IllegalStateException(
          "Unexpected number of partitions in the "
              + topic
              + " topic. Expected 1 and instead got "
              + partitions.size());
    }

    this.topicPartition = new TopicPartition(topic, 0);
    this.consumer.assign(Arrays.asList(this.topicPartition));
    this.consumer.seekToBeginning(Arrays.asList(this.topicPartition));

    log.info("Initialized last consumed offset to " + offsetInSchemasTopic);

    log.debug(
        "Kafka store reader thread started with consumer properties " + consumerProps.toString());
  }