Пример #1
0
  @Override
  public void run() {
    long offset = 0;
    while (isAlive) {
      // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of
      // 1MB
      FetchRequest fetchRequest =
          new FetchRequestBuilder()
              .clientId("default_client")
              .addFetch("topic1", 1, offset, 1000000)
              .build();

      //      FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);

      // get the message set from the consumer and print them out
      ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
      Iterator<MessageAndOffset> itr = messages.iterator();

      while (itr.hasNext() && isAlive) {
        MessageAndOffset msg = itr.next();
        // advance the offset after consuming each message
        offset = msg.offset();
        logger.debug(
            "consumed: {} offset: {}",
            byteBufferToString(msg.message().payload()).toString(),
            offset);
        receiveCount++;
      }
    }
  }
Пример #2
0
  protected boolean get(KafkaETLKey key, BytesWritable value) throws IOException {
    if (_messageIt != null && _messageIt.hasNext()) {
      MessageAndOffset messageAndOffset = _messageIt.next();

      ByteBuffer buf = messageAndOffset.message().buffer();
      int origSize = buf.remaining();
      byte[] bytes = new byte[origSize];
      buf.get(bytes, buf.position(), origSize);
      value.set(bytes, 0, origSize);

      key.set(_index, _offset, messageAndOffset.message().checksum());

      _offset = messageAndOffset.nextOffset(); // increase offset
      _count++; // increase count

      return true;
    } else return false;
  }
    /** consume data from kafka to list */
    private void fill() {
      SimpleConsumer consumer = _partitions.getConsumer(_partition);
      int hostPartition = _partitions.getHostPartition(_partition);
      ByteBufferMessageSet msgs =
          consumer.fetch(
              new FetchRequest(
                  _spoutConfig.topic,
                  hostPartition,
                  _emittedToOffset,
                  _spoutConfig.fetchSizeBytes));

      // LOG.info("partition fill msgs " + msgs);
      for (MessageAndOffset msg : msgs) {
        _pending.add(_emittedToOffset);
        _waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
        _emittedToOffset = msg.offset();
        // LOG.info("parition fill msg " + msg + " offset" + _emittedToOffset);
      }
    }
Пример #4
0
  //    @Ignore
  @Test
  public void test() throws EventDeliveryException, UnsupportedEncodingException {
    Transaction tx = channel.getTransaction();
    tx.begin();

    ObjectNode jsonBody = new ObjectNode(JsonNodeFactory.instance);
    jsonBody.put("myString", "foo");
    jsonBody.put("myInt32", 32);

    Map<String, String> headers = new HashMap<String, String>();
    headers.put("myString", "bar");
    headers.put("myInt64", "64");
    headers.put("myBoolean", "true");
    headers.put("myDouble", "1.0");
    headers.put("myNull", "foobar");

    Event event = EventBuilder.withBody(jsonBody.toString().getBytes(Charsets.UTF_8), headers);
    channel.put(event);

    tx.commit();
    tx.close();

    kafkaSink.process();

    kafka.api.FetchRequest req =
        new FetchRequestBuilder().clientId(CLIENT_ID).addFetch("test", 0, 0L, 100).build();
    FetchResponse fetchResponse = simpleConsumer.fetch(req);
    ByteBufferMessageSet messageSet = fetchResponse.messageSet("test", 0);

    //        Assert.assertTrue(messageSet.sizeInBytes() > 0);
    for (MessageAndOffset messageAndOffset : messageSet) {
      ByteBuffer payload = messageAndOffset.message().payload();
      byte[] bytes = new byte[payload.limit()];
      payload.get(bytes);
      String message = new String(bytes, "UTF-8");
      Assert.assertNotNull(message);
      Assert.assertEquals(message, "{\"myString\":\"foo\",\"myInt32\":32}");
    }
  }
Пример #5
0
 @Override
 protected GenericRecord decodeRecord(MessageAndOffset messageAndOffset) throws IOException {
   byte[] payload = getBytes(messageAndOffset.message().payload());
   Schema recordSchema = getRecordSchema(payload);
   Decoder decoder = getDecoder(payload);
   this.reader.get().setSchema(recordSchema);
   try {
     GenericRecord record = this.reader.get().read(null, decoder);
     record = AvroUtils.convertRecordSchema(record, this.schema.get());
     return record;
   } catch (IOException e) {
     log.error(
         String.format(
             "Error during decoding record for partition %s: ", this.getCurrentPartition()));
     throw e;
   }
 }
Пример #6
0
  public void run(
      long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, int a_port)
      throws Exception {
    // find the meta data about the topic and partition we are interested in
    //
    PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
    if (metadata == null) {
      System.out.println("Can't find metadata for Topic and Partition. Exiting");
      return;
    }
    if (metadata.leader() == null) {
      System.out.println("Can't find Leader for Topic and Partition. Exiting");
      return;
    }
    String leadBroker = metadata.leader().host();
    String clientName = "Client_" + a_topic + "_" + a_partition;

    SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
    long readOffset =
        getLastOffset(
            consumer, a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (a_maxReads > 0) {
      if (consumer == null) {
        consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
      }
      FetchRequest req =
          new FetchRequestBuilder()
              .clientId(clientName)
              .addFetch(
                  a_topic,
                  a_partition,
                  readOffset,
                  100000) // Note: this fetchSize of 100000 might need to
              // be increased if large batches are written to
              // Kafka
              .build();
      FetchResponse fetchResponse = consumer.fetch(req);

      if (fetchResponse.hasError()) {
        numErrors++;
        // Something went wrong!
        short code = fetchResponse.errorCode(a_topic, a_partition);
        System.out.println(
            "Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
        if (numErrors > 5) {
          break;
        }
        if (code == ErrorMapping.OffsetOutOfRangeCode()) {
          // We asked for an invalid offset. For simple case ask for the last element to reset
          readOffset =
              getLastOffset(
                  consumer, a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
          continue;
        }
        consumer.close();
        consumer = null;
        leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
        continue;
      }
      numErrors = 0;

      long numRead = 0;
      for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
        long currentOffset = messageAndOffset.offset();
        if (currentOffset < readOffset) {
          System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
          continue;
        }
        readOffset = messageAndOffset.nextOffset();
        ByteBuffer payload = messageAndOffset.message().payload();

        byte[] bytes = new byte[payload.limit()];
        payload.get(bytes);
        System.out.println(
            String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
        numRead++;
        a_maxReads--;
      }

      if (numRead == 0) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
        }
      }
    }
    if (consumer != null) {
      consumer.close();
    }
  }