@Override
 protected boolean matches(Object item, Description mismatchDescription) {
   @SuppressWarnings("unchecked")
   ConsumerRecord<Object, V> record = (ConsumerRecord<Object, V>) item;
   boolean matches = record != null && record.value().equals(this.payload);
   if (!matches) {
     mismatchDescription.appendText("is ").appendValue(record);
   }
   return matches;
 }
  private static void processRecords(KafkaConsumer<String, String> consumer)
      throws InterruptedException {

    while (true) {

      ConsumerRecords<String, String> records = consumer.poll(100);
      long lastOffset = 0;

      for (ConsumerRecord<String, String> record : records) {
        System.out.printf(
            "\n\roffset = %d, key = %s, value = %s", record.offset(), record.key(), record.value());
        lastOffset = record.offset();
      }

      System.out.println("lastOffset read: " + lastOffset);

      process();

      // Below call is important to control the offset commit. Do this call after you
      // finish processing the business process to get the at least once guarantee.

      consumer.commitSync();
    }
  }
  @Override
  public void doWork() {
    try {
      ConsumerRecords<byte[], byte[]> records = consumer.poll(Long.MAX_VALUE);
      for (ConsumerRecord<byte[], byte[]> record : records) {
        K messageKey = null;
        try {
          messageKey = this.serializer.deserializeKey(record.key());
        } catch (SerializationException e) {
          log.error("Failed to deserialize the schema or config key", e);
          continue;
        }

        if (messageKey.equals(noopKey)) {
          // If it's a noop, update local offset counter and do nothing else
          try {
            offsetUpdateLock.lock();
            offsetInSchemasTopic = record.offset();
            offsetReachedThreshold.signalAll();
          } finally {
            offsetUpdateLock.unlock();
          }
        } else {
          V message = null;
          try {
            message =
                record.value() == null
                    ? null
                    : serializer.deserializeValue(messageKey, record.value());
          } catch (SerializationException e) {
            log.error("Failed to deserialize a schema or config update", e);
            continue;
          }
          try {
            log.trace(
                "Applying update (" + messageKey + "," + message + ") to the local " + "store");
            if (message == null) {
              localStore.delete(messageKey);
            } else {
              localStore.put(messageKey, message);
            }
            this.storeUpdateHandler.handleUpdate(messageKey, message);
            try {
              offsetUpdateLock.lock();
              offsetInSchemasTopic = record.offset();
              offsetReachedThreshold.signalAll();
            } finally {
              offsetUpdateLock.unlock();
            }
          } catch (StoreException se) {
            log.error("Failed to add record from the Kafka topic" + topic + " the local store");
          }
        }
      }
    } catch (WakeupException we) {
      // do nothing because the thread is closing -- see shutdown()
    } catch (RecordTooLargeException rtle) {
      throw new IllegalStateException(
          "Consumer threw RecordTooLargeException. A schema has been written that "
              + "exceeds the default maximum fetch size.",
          rtle);
    } catch (RuntimeException e) {
      log.error("KafkaStoreReader thread has died for an unknown reason.");
      throw new RuntimeException(e);
    }
  }