// @Ignore @After public void tearDown() throws IOException { kafkaSink.stop(); simpleConsumer.close(); // kafkaServer.shutdown(); // zookeeperServer.shutdown(); }
// @Ignore @Test public void test() throws EventDeliveryException, UnsupportedEncodingException { Transaction tx = channel.getTransaction(); tx.begin(); ObjectNode jsonBody = new ObjectNode(JsonNodeFactory.instance); jsonBody.put("myString", "foo"); jsonBody.put("myInt32", 32); Map<String, String> headers = new HashMap<String, String>(); headers.put("myString", "bar"); headers.put("myInt64", "64"); headers.put("myBoolean", "true"); headers.put("myDouble", "1.0"); headers.put("myNull", "foobar"); Event event = EventBuilder.withBody(jsonBody.toString().getBytes(Charsets.UTF_8), headers); channel.put(event); tx.commit(); tx.close(); kafkaSink.process(); kafka.api.FetchRequest req = new FetchRequestBuilder().clientId(CLIENT_ID).addFetch("test", 0, 0L, 100).build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); ByteBufferMessageSet messageSet = fetchResponse.messageSet("test", 0); // Assert.assertTrue(messageSet.sizeInBytes() > 0); for (MessageAndOffset messageAndOffset : messageSet) { ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); String message = new String(bytes, "UTF-8"); Assert.assertNotNull(message); Assert.assertEquals(message, "{\"myString\":\"foo\",\"myInt32\":32}"); } }
// @Ignore @Before public void setUp() { conf = ConfigFactory.load(); List<String> zkHosts = conf.getStringList("zookeeper.hosts"); for (String host : zkHosts) { ZOOKEEPER_HOSTS += host + ","; } ZOOKEEPER_HOSTS = ZOOKEEPER_HOSTS.substring(0, ZOOKEEPER_HOSTS.length() - 1); List<String> kafkaHosts = conf.getStringList("kafka.hosts"); for (String host : kafkaHosts) { KAFKA_HOSTS += host + ","; } KAFKA_HOSTS = KAFKA_HOSTS.substring(0, KAFKA_HOSTS.length() - 1); LOGGER.debug("Using Zookeeper hosts: " + ZOOKEEPER_HOSTS); LOGGER.debug("Using Zookeeper hosts: " + KAFKA_HOSTS); // try { // zookeeperServer = new ZookeeperServer(); // zookeeperServer.start(); // } catch (Exception e) { // e.printStackTrace(); // } // try { // kafkaServer = new KafkaServer(); // kafkaServer.start(); // } catch (Exception e) { // e.printStackTrace(); // } String[] connection = KAFKA_HOSTS.split(":"); // simpleConsumer = new SimpleConsumer("localhost", 9092, 60000, 1024, CLIENT_ID); simpleConsumer = new SimpleConsumer(connection[0], Integer.parseInt(connection[1]), 60000, 1024, CLIENT_ID); kafkaSink = new KafkaSink(); Context kafkaContext = new Context(); kafkaContext.put("topic", "test"); kafkaContext.put("writeBody", "false"); kafkaContext.put("kafka.metadata.broker.list", KAFKA_HOSTS); kafkaContext.put("kafka.serializer.class", "kafka.serializer.StringEncoder"); Configurables.configure(kafkaSink, kafkaContext); Context channelContext = new Context(); channelContext.put("capacity", "10000"); channelContext.put("transactionCapacity", "200"); channel = new MemoryChannel(); channel.setName("junitChannel"); Configurables.configure(channel, channelContext); kafkaSink.setChannel(channel); channel.start(); kafkaSink.start(); }