@Before
  public void setup() {

    when(context.getString("status.file.name")).thenReturn("statusFileName.txt");
    when(context.getString("connection.url")).thenReturn("jdbc:mysql://host:3306/database");
    when(context.getString("table")).thenReturn("table");
    when(context.getString("incremental.column.name")).thenReturn("incrementalColumName");
    when(context.getString("user")).thenReturn("user");
    when(context.getString("password")).thenReturn("password");
    when(context.getString("status.file.path", "/var/lib/flume")).thenReturn("/tmp/flume");
    when(context.getString("columns.to.select", "*")).thenReturn("*");
    when(context.getInteger("run.query.delay", 10000)).thenReturn(10000);
    when(context.getInteger("batch.size", 100)).thenReturn(100);
    when(context.getInteger("max.rows", 10000)).thenReturn(10000);
    when(context.getLong("incremental.value", 0L)).thenReturn(0L);
  }
  /** {@inheritDoc} */
  @Override
  public void configure(final Context context) {
    hostName = context.getString(HOSTNAME_CONFIG_PROP_NAME);
    port = context.getInteger(PORT_CONFIG_PROP_NAME);
    batchSize = context.getInteger(BATCH_SIZE_PROP_NAME, DEFAULT_BATCH_SIZE);

    if (sinkCounter == null) {
      sinkCounter = new SinkCounter(getName());
    }

    LOGGER.info(
        "Configuring ZipkinSpanCollectorSink. hostname: {}, port: {}, batchsize: {}",
        hostName,
        port,
        batchSize);
  }
  @Override
  public synchronized void configure(Context context) {
    spoolDirectory = context.getString(SPOOL_DIRECTORY);
    Preconditions.checkState(
        spoolDirectory != null, "Configuration must specify a spooling directory");

    completedSuffix = context.getString(SPOOLED_FILE_SUFFIX, DEFAULT_SPOOLED_FILE_SUFFIX);
    deletePolicy = context.getString(DELETE_POLICY, DEFAULT_DELETE_POLICY);
    fileHeader = context.getBoolean(FILENAME_HEADER, DEFAULT_FILE_HEADER);
    fileHeaderKey = context.getString(FILENAME_HEADER_KEY, DEFAULT_FILENAME_HEADER_KEY);
    basenameHeader = context.getBoolean(BASENAME_HEADER, DEFAULT_BASENAME_HEADER);
    basenameHeaderKey = context.getString(BASENAME_HEADER_KEY, DEFAULT_BASENAME_HEADER_KEY);
    batchSize = context.getInteger(BATCH_SIZE, DEFAULT_BATCH_SIZE);
    inputCharset = context.getString(INPUT_CHARSET, DEFAULT_INPUT_CHARSET);
    decodeErrorPolicy =
        DecodeErrorPolicy.valueOf(
            context
                .getString(DECODE_ERROR_POLICY, DEFAULT_DECODE_ERROR_POLICY)
                .toUpperCase(Locale.ENGLISH));

    ignorePattern = context.getString(IGNORE_PAT, DEFAULT_IGNORE_PAT);
    trackerDirPath = context.getString(TRACKER_DIR, DEFAULT_TRACKER_DIR);

    deserializerType = context.getString(DESERIALIZER, "ZipDeserializer");
    deserializerContext = new Context(context.getSubProperties(DESERIALIZER + "."));

    consumeOrder =
        ConsumeOrder.valueOf(
            context
                .getString(CONSUME_ORDER, DEFAULT_CONSUME_ORDER.toString())
                .toUpperCase(Locale.ENGLISH));

    // "Hack" to support backwards compatibility with previous generation of
    // spooling directory source, which did not support deserializers
    Integer bufferMaxLineLength = context.getInteger(BUFFER_MAX_LINE_LENGTH);
    if (bufferMaxLineLength != null
        && deserializerType != null
        && deserializerType.equalsIgnoreCase(DEFAULT_DESERIALIZER)) {
      deserializerContext.put(LineDeserializer.MAXLINE_KEY, bufferMaxLineLength.toString());
    }

    maxBackoff = context.getInteger(MAX_BACKOFF, DEFAULT_MAX_BACKOFF);
    if (sourceCounter == null) {
      sourceCounter = new SourceCounter(getName());
    }
  }
 @Override
 public void configure(Context context) {
   /*
    * Default is to listen on UDP port 162 on all IPv4 interfaces.
    * Since 162 is a privileged port, snmptrapd must typically be run as root.
    * Or change to non-privileged port > 1024.
    */
   bindAddress = context.getString("bind", DEFAULT_BIND);
   bindPort = context.getInteger("port", DEFAULT_PORT);
 }
  @Override
  public void configure(Context context) {
    setName(NAME_PREFIX + counter.getAndIncrement());

    host = context.getString(HOST, DEFAULT_HOST);
    port = context.getInteger(PORT, DEFAULT_PORT);
    username = context.getString(USERNAME);
    password = context.getString(PASSWORD);
    model = CollectionModel.valueOf(context.getString(MODEL, CollectionModel.single.name()));
    dbName = context.getString(DB_NAME, DEFAULT_DB);
    collectionName = context.getString(COLLECTION, DEFAULT_COLLECTION);
    batchSize = context.getInteger(BATCH_SIZE, DEFAULT_BATCH);
    autoWrap = context.getBoolean(AUTO_WRAP, DEFAULT_AUTO_WRAP);
    wrapField = context.getString(WRAP_FIELD, DEFAULT_WRAP_FIELD);

    logger.info(
        "MongoSink {} context { host:{}, port:{}, username:{}, password:{}, model:{}, dbName:{}, collectionName:{}, batch: {} }",
        new Object[] {
          getName(), host, port, username, password, model, dbName, collectionName, batchSize
        });
  }
 @Override
 public void configure(Context context) throws FlumeException {
   preserveExisting = context.getBoolean(PRESERVE, PRESERVE_DEFAULT);
   key = context.getString(KEY, KEY_DEFAULT);
   file = context.getString(FILE);
   period = context.getInteger(PERIOD, new Integer(PERIOD_DEFAULT));
   if (file != null) {
     value = readHeader(file);
   } else {
     logger.error("CSVHeaderInterceptor - file not specified");
     throw new FlumeException("CSVHeaderInterceptor - file not specified");
   }
 }
  @Override
  public void configure(Context context) {
    logger.info("Configuring thrift source.");
    port = context.getInteger(CONFIG_PORT);
    Preconditions.checkNotNull(port, "Port must be specified for Thrift " + "Source.");
    bindAddress = context.getString(CONFIG_BIND);
    Preconditions.checkNotNull(
        bindAddress, "Bind address must be specified " + "for Thrift Source.");

    try {
      maxThreads = context.getInteger(CONFIG_THREADS, 0);
    } catch (NumberFormatException e) {
      logger.warn(
          "Thrift source\'s \"threads\" property must specify an "
              + "integer value: "
              + context.getString(CONFIG_THREADS));
    }

    if (sourceCounter == null) {
      sourceCounter = new SourceCounter(getName());
    }
  }
  @Override
  public void configure(Context context) {
    port = Integer.parseInt(context.getString("port"));
    bindAddress = context.getString("bind");
    try {
      maxThreads = context.getInteger(THREADS, 0);
    } catch (NumberFormatException e) {
      logger.warn(
          "AVRO source\'s \"threads\" property must specify an integer value.",
          context.getString(THREADS));
    }

    if (sourceCounter == null) {
      sourceCounter = new SourceCounter(getName());
    }
  }
Exemple #9
0
 @Override
 public void configure(Context context) {
   // TODO Auto-generated method stub
   batchSize = context.getInteger(Constants.BATCH_SIZE, Constants.DEFAULT_BATCH_SIZE);
   messageList = new ArrayList<KeyedMessage<String, byte[]>>(batchSize);
   log.debug("Using batch size: {}", batchSize);
   topic = context.getString(Constants.TOPIC, Constants.DEFAULT_TOPIC);
   if (topic.equals(Constants.DEFAULT_TOPIC)) {
     log.warn(
         "The property 'topic' is not set .  Using the default topic name ["
             + Constants.DEFAULT_TOPIC
             + "]");
   } else {
     log.info(
         "Using the configured topic:[" + topic + "] this may be over-ridden by event headers");
   }
   kafkaProps = KafkaUtil.getKafkaConfig(context);
   if (log.isDebugEnabled()) {
     log.debug("Kafka producer properties : " + kafkaProps);
   }
 }