/** * The main constructor for creating a FlinkKafkaProducer. * * @param defaultTopicId The default topic to write data to * @param serializationSchema A serializable serialization schema for turning user objects into a * kafka-consumable byte[] supporting key/value messages * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is * the only required argument. * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. * Passing null will use Kafka's partitioner */ public FlinkKafkaProducerBase( String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, KafkaPartitioner<IN> customPartitioner) { requireNonNull(defaultTopicId, "TopicID not set"); requireNonNull(serializationSchema, "serializationSchema not set"); requireNonNull(producerConfig, "producerConfig not set"); ClosureCleaner.ensureSerializable(customPartitioner); ClosureCleaner.ensureSerializable(serializationSchema); this.defaultTopicId = defaultTopicId; this.schema = serializationSchema; this.producerConfig = producerConfig; // set the producer configuration properties. if (!producerConfig.contains(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) { this.producerConfig.put( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName()); } else { LOG.warn( "Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); } if (!producerConfig.contains(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) { this.producerConfig.put( ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName()); } else { LOG.warn( "Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); } // create a local KafkaProducer to get the list of partitions. // this will also ensure locally that all required ProducerConfig values are set. try (Producer<Void, IN> getPartitionsProd = getKafkaProducer(this.producerConfig)) { List<PartitionInfo> partitionsList = getPartitionsProd.partitionsFor(defaultTopicId); this.partitions = new int[partitionsList.size()]; for (int i = 0; i < partitions.length; i++) { partitions[i] = partitionsList.get(i).partition(); } getPartitionsProd.close(); } this.partitioner = customPartitioner; this.producerId = UUID.randomUUID().toString(); }
public void publish(byte[] event, String Id) throws ExecutionException, InterruptedException { logger.debug("Send message"); RecordMetadata m = kafkaProducer.send(new ProducerRecord<String, byte[]>("events", Id, event)).get(); System.out.println("Message produced, offset: " + m.offset()); System.out.println("Message produced, partition : " + m.partition()); System.out.println("Message produced, topic: " + m.topic()); }
private static void test(Producer<String, String> producer) { for (int i = testNumber; i > 0; i--) { // partion 随机 // ProducerRecord record = new ProducerRecord<String, String>(TopicOne.TOPIC, i%2, // i+"", i+""); ProducerRecord record = new ProducerRecord<String, String>(TopicOne.TOPIC, 0, i + "", i + ""); producer.send(record); } }
public void onStatus(Status status) { Tweet tweet = TwitterUtils.parseTweet(status); ProducerRecord<String, Tweet> data = new ProducerRecord<String, Tweet>(topic, tweet); producer.send(data); }