Exemplo n.º 1
0
  /**
   * The main constructor for creating a FlinkKafkaProducer.
   *
   * @param defaultTopicId The default topic to write data to
   * @param serializationSchema A serializable serialization schema for turning user objects into a
   *     kafka-consumable byte[] supporting key/value messages
   * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is
   *     the only required argument.
   * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
   *     Passing null will use Kafka's partitioner
   */
  public FlinkKafkaProducerBase(
      String defaultTopicId,
      KeyedSerializationSchema<IN> serializationSchema,
      Properties producerConfig,
      KafkaPartitioner<IN> customPartitioner) {
    requireNonNull(defaultTopicId, "TopicID not set");
    requireNonNull(serializationSchema, "serializationSchema not set");
    requireNonNull(producerConfig, "producerConfig not set");
    ClosureCleaner.ensureSerializable(customPartitioner);
    ClosureCleaner.ensureSerializable(serializationSchema);

    this.defaultTopicId = defaultTopicId;
    this.schema = serializationSchema;
    this.producerConfig = producerConfig;

    // set the producer configuration properties.

    if (!producerConfig.contains(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
      this.producerConfig.put(
          ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName());
    } else {
      LOG.warn(
          "Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
    }

    if (!producerConfig.contains(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
      this.producerConfig.put(
          ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
          ByteArraySerializer.class.getCanonicalName());
    } else {
      LOG.warn(
          "Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
    }

    // create a local KafkaProducer to get the list of partitions.
    // this will also ensure locally that all required ProducerConfig values are set.
    try (Producer<Void, IN> getPartitionsProd = getKafkaProducer(this.producerConfig)) {
      List<PartitionInfo> partitionsList = getPartitionsProd.partitionsFor(defaultTopicId);

      this.partitions = new int[partitionsList.size()];
      for (int i = 0; i < partitions.length; i++) {
        partitions[i] = partitionsList.get(i).partition();
      }
      getPartitionsProd.close();
    }

    this.partitioner = customPartitioner;
    this.producerId = UUID.randomUUID().toString();
  }
Exemplo n.º 2
0
  /**
   * Create a new FlinkKinesisProducer. This is a constructor supporting {@see
   * KinesisSerializationSchema}.
   *
   * @param schema Kinesis serialization schema for the data type
   * @param configProps The properties used to configure AWS credentials and AWS region
   */
  public FlinkKinesisProducer(KinesisSerializationSchema<OUT> schema, Properties configProps) {
    this.configProps = checkNotNull(configProps, "configProps can not be null");

    // check the configuration properties for any conflicting settings
    KinesisConfigUtil.validateProducerConfiguration(this.configProps);

    ClosureCleaner.ensureSerializable(Objects.requireNonNull(schema));
    this.schema = schema;
  }
 public AccumulatingAlignedProcessingTimeWindowOperatorTest() {
   ClosureCleaner.clean(identitySelector, false);
   ClosureCleaner.clean(validatingIdentityFunction, false);
 }
Exemplo n.º 4
0
 public void setCustomPartitioner(KinesisPartitioner<OUT> partitioner) {
   Objects.requireNonNull(partitioner);
   ClosureCleaner.ensureSerializable(partitioner);
   this.customPartitioner = partitioner;
 }
 public AggregatingAlignedProcessingTimeWindowOperatorTest() {
   ClosureCleaner.clean(fieldOneSelector, false);
   ClosureCleaner.clean(sumFunction, false);
 }