Beispiel #1
0
  @Override
  public void run() {
    long offset = 0;
    while (isAlive) {
      // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of
      // 1MB
      FetchRequest fetchRequest =
          new FetchRequestBuilder()
              .clientId("default_client")
              .addFetch("topic1", 1, offset, 1000000)
              .build();

      //      FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);

      // get the message set from the consumer and print them out
      ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
      Iterator<MessageAndOffset> itr = messages.iterator();

      while (itr.hasNext() && isAlive) {
        MessageAndOffset msg = itr.next();
        // advance the offset after consuming each message
        offset = msg.offset();
        logger.debug(
            "consumed: {} offset: {}",
            byteBufferToString(msg.message().payload()).toString(),
            offset);
        receiveCount++;
      }
    }
  }
    /** consume data from kafka to list */
    private void fill() {
      SimpleConsumer consumer = _partitions.getConsumer(_partition);
      int hostPartition = _partitions.getHostPartition(_partition);
      ByteBufferMessageSet msgs =
          consumer.fetch(
              new FetchRequest(
                  _spoutConfig.topic,
                  hostPartition,
                  _emittedToOffset,
                  _spoutConfig.fetchSizeBytes));

      // LOG.info("partition fill msgs " + msgs);
      for (MessageAndOffset msg : msgs) {
        _pending.add(_emittedToOffset);
        _waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
        _emittedToOffset = msg.offset();
        // LOG.info("parition fill msg " + msg + " offset" + _emittedToOffset);
      }
    }
Beispiel #3
0
  //    @Ignore
  @Test
  public void test() throws EventDeliveryException, UnsupportedEncodingException {
    Transaction tx = channel.getTransaction();
    tx.begin();

    ObjectNode jsonBody = new ObjectNode(JsonNodeFactory.instance);
    jsonBody.put("myString", "foo");
    jsonBody.put("myInt32", 32);

    Map<String, String> headers = new HashMap<String, String>();
    headers.put("myString", "bar");
    headers.put("myInt64", "64");
    headers.put("myBoolean", "true");
    headers.put("myDouble", "1.0");
    headers.put("myNull", "foobar");

    Event event = EventBuilder.withBody(jsonBody.toString().getBytes(Charsets.UTF_8), headers);
    channel.put(event);

    tx.commit();
    tx.close();

    kafkaSink.process();

    kafka.api.FetchRequest req =
        new FetchRequestBuilder().clientId(CLIENT_ID).addFetch("test", 0, 0L, 100).build();
    FetchResponse fetchResponse = simpleConsumer.fetch(req);
    ByteBufferMessageSet messageSet = fetchResponse.messageSet("test", 0);

    //        Assert.assertTrue(messageSet.sizeInBytes() > 0);
    for (MessageAndOffset messageAndOffset : messageSet) {
      ByteBuffer payload = messageAndOffset.message().payload();
      byte[] bytes = new byte[payload.limit()];
      payload.get(bytes);
      String message = new String(bytes, "UTF-8");
      Assert.assertNotNull(message);
      Assert.assertEquals(message, "{\"myString\":\"foo\",\"myInt32\":32}");
    }
  }
  public boolean fetchMore() throws IOException {
    if (!hasMore()) return false;

    FetchRequest fetchRequest =
        builder
            .clientId(_request.clientId())
            .addFetch(_request.getTopic(), _request.getPartition(), _offset, _bufferSize)
            .build();

    long tempTime = System.currentTimeMillis();
    _response = _consumer.fetch(fetchRequest);
    if (_response != null) {
      _respIterator =
          new ArrayList<ByteBufferMessageSet>() {
            {
              add(_response.messageSet(_request.getTopic(), _request.getPartition()));
            }
          }.iterator();
    }
    _requestTime += (System.currentTimeMillis() - tempTime);

    return true;
  }
Beispiel #5
0
  public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

    String topic = config.topic;
    FetchRequest req =
        new FetchRequestBuilder()
            .clientId(config.clientId)
            .addFetch(topic, partition, offset, config.fetchMaxBytes)
            .maxWait(config.fetchWaitMaxMs)
            .build();
    FetchResponse fetchResponse = null;
    SimpleConsumer simpleConsumer = null;
    try {
      simpleConsumer = findLeaderConsumer(partition);
      if (simpleConsumer == null) {
        // LOG.error(message);
        return null;
      }
      fetchResponse = simpleConsumer.fetch(req);
    } catch (Exception e) {
      if (e instanceof ConnectException
          || e instanceof SocketTimeoutException
          || e instanceof IOException
          || e instanceof UnresolvedAddressException) {
        LOG.warn("Network error when fetching messages:", e);
        if (simpleConsumer != null) {
          String host = simpleConsumer.host();
          int port = simpleConsumer.port();
          simpleConsumer = null;
          throw new KafkaException(
              "Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(),
              e);
        }

      } else {
        throw new RuntimeException(e);
      }
    }
    if (fetchResponse.hasError()) {
      short code = fetchResponse.errorCode(topic, partition);
      if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
        long startOffset = getOffset(topic, partition, config.startOffsetTime);
        offset = startOffset;
      }
      if (leaderBroker != null) {
        LOG.error(
            "fetch data from kafka topic["
                + config.topic
                + "] host["
                + leaderBroker.host()
                + ":"
                + leaderBroker.port()
                + "] partition["
                + partition
                + "] error:"
                + code);
      } else {

      }
      return null;
    } else {
      ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
      return msgs;
    }
  }
  public void run(
      long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, int a_port)
      throws Exception {
    // find the meta data about the topic and partition we are interested in
    //
    PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
    if (metadata == null) {
      System.out.println("Can't find metadata for Topic and Partition. Exiting");
      return;
    }
    if (metadata.leader() == null) {
      System.out.println("Can't find Leader for Topic and Partition. Exiting");
      return;
    }
    String leadBroker = metadata.leader().host();
    String clientName = "Client_" + a_topic + "_" + a_partition;

    SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
    long readOffset =
        getLastOffset(
            consumer, a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName);

    int numErrors = 0;
    while (a_maxReads > 0) {
      if (consumer == null) {
        consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
      }
      FetchRequest req =
          new FetchRequestBuilder()
              .clientId(clientName)
              .addFetch(
                  a_topic,
                  a_partition,
                  readOffset,
                  100000) // Note: this fetchSize of 100000 might need to
              // be increased if large batches are written to
              // Kafka
              .build();
      FetchResponse fetchResponse = consumer.fetch(req);

      if (fetchResponse.hasError()) {
        numErrors++;
        // Something went wrong!
        short code = fetchResponse.errorCode(a_topic, a_partition);
        System.out.println(
            "Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
        if (numErrors > 5) {
          break;
        }
        if (code == ErrorMapping.OffsetOutOfRangeCode()) {
          // We asked for an invalid offset. For simple case ask for the last element to reset
          readOffset =
              getLastOffset(
                  consumer, a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
          continue;
        }
        consumer.close();
        consumer = null;
        leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
        continue;
      }
      numErrors = 0;

      long numRead = 0;
      for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
        long currentOffset = messageAndOffset.offset();
        if (currentOffset < readOffset) {
          System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
          continue;
        }
        readOffset = messageAndOffset.nextOffset();
        ByteBuffer payload = messageAndOffset.message().payload();

        byte[] bytes = new byte[payload.limit()];
        payload.get(bytes);
        System.out.println(
            String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
        numRead++;
        a_maxReads--;
      }

      if (numRead == 0) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
        }
      }
    }
    if (consumer != null) {
      consumer.close();
    }
  }