private void testProduceAMessage() throws TimeoutException {

    // Produce a message so we can check new offsets.
    ProducerConfig conf = kafkaRule.producerConfigWithStringEncoder();
    Producer<String, String> producer = new Producer<>(conf);
    producer.send(new KeyedMessage<>(testTopicName, "key", "value"));
    producer.close();

    // Verify publish
    List<String> messages = kafkaRule.readStringMessages(testTopicName, 1);
    assertThat(messages, is(notNullValue()));
    assertThat(messages.size(), is(1));
    assertThat(messages.get(0), is("value"));
  }
  @Before
  public void setUp() throws Exception {

    File tempDir = Files.createTempDir();
    tempDir.deleteOnExit();

    htu = HBaseTestingUtility.createLocalHTU();
    try {
      htu.cleanupTestDir();

      htu.startMiniZKCluster();
      htu.startMiniHBaseCluster(1, 1);

      try {
        htu.deleteTable(Bytes.toBytes(tableName));
      } catch (Exception e) {
        Log.info(" - no table " + tableName + " found");
      }
      htu.createTable(Bytes.toBytes(tableName), colFam);

      dao =
          new Hbase1OffsetStore.Builder()
              .setHbaseConfiguration(htu.getConfiguration())
              .setOffsetTable(tableName)
              .build();

    } catch (Exception e1) {
      throw new RuntimeException(e1);
    }

    KOM =
        new KafkaOffsetManager.Builder()
            .setOffsetManager(dao)
            .setKafkaBrokerList("localhost:" + kafkaRule.kafkaBrokerPort())
            .setGroupID(testGroupID)
            .setTopic(testTopicName)
            .build();
  }