@Before
  public void setUp() throws Exception {
    zookeeper = new EmbeddedZookeeper();
    zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
    zkUtils =
        ZkUtils.apply(
            zkConnect, zkSessionTimeout, zkConnectionTimeout, JaasUtils.isZkSecurityEnabled());
    zkClient = zkUtils.zkClient();

    configs = new Vector<>();
    servers = new Vector<>();
    for (int i = 0; i < numBrokers; i++) {
      final Option<java.io.File> noFile = scala.Option.apply(null);
      final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
      Properties props =
          TestUtils.createBrokerConfig(
              i,
              zkConnect,
              false,
              false,
              TestUtils.RandomPort(),
              noInterBrokerSecurityProtocol,
              noFile,
              true,
              false,
              TestUtils.RandomPort(),
              false,
              TestUtils.RandomPort(),
              false,
              TestUtils.RandomPort());
      props.setProperty("auto.create.topics.enable", "true");
      props.setProperty("num.partitions", "1");
      // We *must* override this to use the port we allocated (Kafka currently allocates one port
      // that it always uses for ZK
      props.setProperty("zookeeper.connect", this.zkConnect);
      KafkaConfig config = new KafkaConfig(props);
      configs.add(config);

      KafkaServer server = TestUtils.createServer(config, SystemTime$.MODULE$);
      servers.add(server);
    }

    brokerList =
        TestUtils.getBrokerListStrFromServers(
            JavaConversions.asScalaBuffer(servers), SecurityProtocol.PLAINTEXT);

    if (setupRestApp) {
      restApp = new RestApp(choosePort(), zkConnect, KAFKASTORE_TOPIC, compatibilityType);
      restApp.start();
    }
  }
  @Test
  public void testKafkaConsumer09Read() throws IOException, StageException {
    int zkConnectionTimeout = 6000;
    int zkSessionTimeout = 6000;

    EmbeddedZookeeper zookeeper = new EmbeddedZookeeper();
    String zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
    ZkUtils zkUtils =
        ZkUtils.apply(
            zkConnect, zkSessionTimeout, zkConnectionTimeout, JaasUtils.isZkSecurityEnabled());

    int port = TestUtil.getFreePort();
    KafkaServer kafkaServer = TestUtil.createKafkaServer(port, zkConnect);

    final String topic = "TestKafkaConsumer09_1";
    final String message = "Hello StreamSets";

    Source.Context sourceContext =
        ContextInfoCreator.createSourceContext(
            "s", false, OnRecordError.TO_ERROR, ImmutableList.of("a"));

    Map<String, Object> props = new HashMap<>();
    props.put("auto.commit.interval.ms", "1000");
    props.put("auto.offset.reset", "earliest");
    props.put("session.timeout.ms", "30000");
    SdcKafkaConsumer sdcKafkaConsumer =
        createSdcKafkaConsumer("localhost:" + port, topic, 1000, sourceContext, props, "test");
    sdcKafkaConsumer.validate(new ArrayList<Stage.ConfigIssue>(), sourceContext);
    sdcKafkaConsumer.init();

    // produce some messages to topic
    produce(topic, "localhost:" + port, message);

    // read
    List<MessageAndOffset> read = new ArrayList<>();
    while (read.size() < 10) {
      MessageAndOffset messageAndOffset = sdcKafkaConsumer.read();
      if (messageAndOffset != null) {
        read.add(messageAndOffset);
      }
    }
    // verify
    Assert.assertNotNull(read);
    Assert.assertEquals(10, read.size());
    verify(read, message);

    // delete topic and shutdown
    AdminUtils.deleteTopic(zkUtils, topic);
    kafkaServer.shutdown();
    zookeeper.shutdown();
  }