@Override public void run(SourceContext<Long> ctx) throws Exception { final Object checkpointLock = ctx.getCheckpointLock(); RuntimeContext context = getRuntimeContext(); final long stepSize = context.getNumberOfParallelSubtasks(); final long congruence = start + context.getIndexOfThisSubtask(); final long toCollect = ((end - start + 1) % stepSize > (congruence - start)) ? ((end - start + 1) / stepSize + 1) : ((end - start + 1) / stepSize); while (isRunning && collected < toCollect) { synchronized (checkpointLock) { ctx.collect(collected * stepSize + congruence); collected++; } } }
/** Initializes the connection to Kafka. */ @Override public void open(Configuration configuration) { producer = getKafkaProducer(this.producerConfig); RuntimeContext ctx = getRuntimeContext(); if (partitioner != null) { partitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks(), partitions); } LOG.info( "Starting FlinkKafkaProducer ({}/{}) to produce into topic {}", ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks(), defaultTopicId); // register Kafka metrics to Flink accumulators if (!Boolean.valueOf(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) { Map<MetricName, ? extends Metric> metrics = this.producer.metrics(); if (metrics == null) { // MapR's Kafka implementation returns null here. LOG.info("Producer implementation does not support metrics"); } else { for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) { String name = producerId + "-producer-" + metric.getKey().name(); DefaultKafkaMetricAccumulator kafkaAccumulator = DefaultKafkaMetricAccumulator.createFor(metric.getValue()); // best effort: we only add the accumulator if available. if (kafkaAccumulator != null) { getRuntimeContext().addAccumulator(name, kafkaAccumulator); } } } } if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) { LOG.warn( "Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing."); flushOnCheckpoint = false; } if (flushOnCheckpoint) { pendingRecordsLock = new Object(); } if (logFailuresOnly) { callback = new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception e) { if (e != null) { LOG.error("Error while sending record to Kafka: " + e.getMessage(), e); } acknowledgeMessage(); } }; } else { callback = new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null && asyncException == null) { asyncException = exception; } acknowledgeMessage(); } }; } }