예제 #1
0
 // This API is being used by ClusterKafkaSource
 public int getParallelism() throws IOException {
   if (originParallelism == 0) {
     // origin parallelism is not yet calculated
     originParallelism = KafkaUtil.getPartitionCount(metadataBrokerList, topic, 3, 1000);
   }
   return originParallelism;
 }
예제 #2
0
  @Override
  protected List<ConfigIssue> init() {
    List<ConfigIssue> issues = new ArrayList<ConfigIssue>();
    if (topic == null || topic.isEmpty()) {
      issues.add(
          getContext().createConfigIssue(Groups.KAFKA.name(), "topic", KafkaErrors.KAFKA_05));
    }
    // maxWaitTime
    if (maxWaitTime < 1) {
      issues.add(
          getContext().createConfigIssue(Groups.KAFKA.name(), "maxWaitTime", KafkaErrors.KAFKA_35));
    }

    switch (dataFormat) {
      case JSON:
        if (jsonMaxObjectLen < 1) {
          issues.add(
              getContext()
                  .createConfigIssue(Groups.JSON.name(), "maxJsonObjectLen", KafkaErrors.KAFKA_38));
        }
        break;
      case TEXT:
        if (textMaxLineLen < 1) {
          issues.add(
              getContext()
                  .createConfigIssue(Groups.TEXT.name(), "maxLogLineLength", KafkaErrors.KAFKA_38));
        }
        break;
      case DELIMITED:
        if (csvMaxObjectLen < 1) {
          issues.add(
              getContext()
                  .createConfigIssue(
                      Groups.DELIMITED.name(), "csvMaxObjectLen", KafkaErrors.KAFKA_38));
        }
        break;
      case XML:
        if (produceSingleRecordPerMessage) {
          issues.add(
              getContext()
                  .createConfigIssue(
                      Groups.KAFKA.name(), "produceSingleRecordPerMessage", KafkaErrors.KAFKA_40));
        }
        if (xmlMaxObjectLen < 1) {
          issues.add(
              getContext()
                  .createConfigIssue(Groups.XML.name(), "maxXmlObjectLen", KafkaErrors.KAFKA_38));
        }
        if (xmlRecordElement != null
            && !xmlRecordElement.isEmpty()
            && !XMLChar.isValidName(xmlRecordElement)) {
          issues.add(
              getContext()
                  .createConfigIssue(
                      Groups.XML.name(),
                      "xmlRecordElement",
                      KafkaErrors.KAFKA_36,
                      xmlRecordElement));
        }
        break;
      case SDC_JSON:
      case BINARY:
        break;
      case LOG:
        logDataFormatValidator =
            new LogDataFormatValidator(
                logMode,
                logMaxObjectLen,
                logRetainOriginalLine,
                customLogFormat,
                regex,
                grokPatternDefinition,
                grokPattern,
                enableLog4jCustomLogFormat,
                log4jCustomLogFormat,
                onParseError,
                maxStackTraceLines,
                Groups.LOG.name(),
                getFieldPathToGroupMap(fieldPathsToGroupName));
        logDataFormatValidator.validateLogFormatConfig(issues, getContext());
        break;
      case AVRO:
        if (!messageHasSchema && (avroSchema == null || avroSchema.isEmpty())) {
          issues.add(
              getContext()
                  .createConfigIssue(
                      Groups.AVRO.name(), "avroSchema", KafkaErrors.KAFKA_43, avroSchema));
        }
        break;
      default:
        issues.add(
            getContext()
                .createConfigIssue(
                    Groups.KAFKA.name(), "dataFormat", KafkaErrors.KAFKA_39, dataFormat));
    }

    validateParserFactoryConfigs(issues);

    // Validate broker config
    try {
      int partitionCount = KafkaUtil.getPartitionCount(metadataBrokerList, topic, 3, 1000);
      if (partitionCount < 1) {
        issues.add(
            getContext()
                .createConfigIssue(Groups.KAFKA.name(), "topic", KafkaErrors.KAFKA_42, topic));
      } else {
        // cache the partition count as parallelism for future use
        originParallelism = partitionCount;
      }
    } catch (IOException e) {
      issues.add(
          getContext()
              .createConfigIssue(
                  Groups.KAFKA.name(), "topic", KafkaErrors.KAFKA_41, topic, e.toString(), e));
    }

    // Validate zookeeper config
    List<KafkaBroker> kafkaBrokers =
        KafkaUtil.validateZkConnectionString(
            issues, zookeeperConnect, Groups.KAFKA.name(), "zookeeperConnect", getContext());

    // validate connecting to kafka
    if (kafkaBrokers != null && !kafkaBrokers.isEmpty() && topic != null && !topic.isEmpty()) {
      kafkaConsumer =
          new KafkaConsumer(
              zookeeperConnect,
              topic,
              consumerGroup,
              maxBatchSize,
              maxWaitTime,
              kafkaConsumerConfigs,
              getContext());
      kafkaConsumer.validate(issues, getContext());
    }

    // consumerGroup
    if (consumerGroup == null || consumerGroup.isEmpty()) {
      issues.add(
          getContext()
              .createConfigIssue(Groups.KAFKA.name(), "consumerGroup", KafkaErrors.KAFKA_33));
    }
    return issues;
  }