Beispiel #1
0
  @Override
  public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _collector = collector;
    if (_local_drpc_id == null) {
      _backround = Executors.newCachedThreadPool();
      _futures = new LinkedList<Future<Void>>();

      int numTasks = context.getComponentTasks(context.getThisComponentId()).size();
      int index = context.getThisTaskIndex();

      int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
      List<String> servers = (List<String>) conf.get(Config.DRPC_SERVERS);
      if (servers == null || servers.isEmpty()) {
        throw new RuntimeException("No DRPC servers configured for topology");
      }

      if (numTasks < servers.size()) {
        for (String s : servers) {
          _futures.add(_backround.submit(new Adder(s, port, conf)));
        }
      } else {
        int i = index % servers.size();
        _futures.add(_backround.submit(new Adder(servers.get(i), port, conf)));
      }
    }
  }
  @Override
  public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _collector = collector;
    Map stateConf = new HashMap(conf);

    List<String> zkServers = _spoutConfig.zkServers;
    if (zkServers == null) zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);

    Integer zkPort = _spoutConfig.zkPort;
    if (zkPort == null) zkPort = ((Number) conf.get(Config.STORM_ZOOKEEPER_PORT)).intValue();

    String zkRoot = _spoutConfig.zkRoot;

    stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, zkServers);
    stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, zkPort);
    stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_ROOT, zkRoot);

    Config componentConf = new Config();
    componentConf.registerSerialization(ZooMeta.class);

    // using TransactionalState like this is a hack
    _state = TransactionalState.newUserState(stateConf, _spoutConfig.id, componentConf);
    _partitions = new KafkaPartitionConnections(_spoutConfig);

    int totalPartitions = _spoutConfig.partitionsPerHost * _spoutConfig.hosts.size();
    int numTasks = context.getComponentTasks(context.getThisComponentId()).size();
    for (int p = context.getThisTaskIndex(); p < totalPartitions; p += numTasks) {
      _managedPartitions.add(p);
      _managers.put(p, new PartitionManager(p));
    }
  }
  @Override
  public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    _fieldLocations = new HashMap<String, GlobalStreamId>();
    _collector = collector;
    int timeout = ((Number) conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)).intValue();
    _pending =
        new TimeCacheMap<List<Object>, Map<GlobalStreamId, Tuple>>(timeout, new ExpireCallback());
    _numSources = context.getThisSources().size();
    Set<String> idFields = null;
    for (GlobalStreamId source : context.getThisSources().keySet()) {
      Fields fields =
          context.getComponentOutputFields(source.get_componentId(), source.get_streamId());
      Set<String> setFields = new HashSet<String>(fields.toList());
      if (idFields == null) idFields = setFields;
      else idFields.retainAll(setFields);

      for (String outfield : _outFields) {
        for (String sourcefield : fields) {
          if (outfield.equals(sourcefield)) {
            _fieldLocations.put(outfield, source);
          }
        }
      }
    }
    _idFields = new Fields(new ArrayList<String>(idFields));

    if (_fieldLocations.size() != _outFields.size()) {
      throw new RuntimeException("Cannot find all outfields among sources");
    }
  }
Beispiel #4
0
  @Override
  public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
    // TODO Auto-generated method stub
    this.collector = collector;

    tpsCounter = new TpsCounter(context.getThisComponentId() + ":" + context.getThisTaskId());

    LOG.info("Finished preparation");
  }
Beispiel #5
0
  @Override
  public final void run(final SourceContext<OUT> ctx) throws Exception {
    final GlobalJobParameters config =
        super.getRuntimeContext().getExecutionConfig().getGlobalJobParameters();
    StormConfig stormConfig = new StormConfig();

    if (config != null) {
      if (config instanceof StormConfig) {
        stormConfig = (StormConfig) config;
      } else {
        stormConfig.putAll(config.toMap());
      }
    }

    final TopologyContext stormTopologyContext =
        WrapperSetupHelper.createTopologyContext(
            (StreamingRuntimeContext) super.getRuntimeContext(),
            this.spout,
            this.name,
            this.stormTopology,
            stormConfig);

    SpoutCollector<OUT> collector =
        new SpoutCollector<OUT>(this.numberOfAttributes, stormTopologyContext.getThisTaskId(), ctx);

    this.spout.open(stormConfig, stormTopologyContext, new SpoutOutputCollector(collector));
    this.spout.activate();

    if (numberOfInvocations == null) {
      if (this.spout instanceof FiniteSpout) {
        final FiniteSpout finiteSpout = (FiniteSpout) this.spout;

        while (this.isRunning && !finiteSpout.reachedEnd()) {
          finiteSpout.nextTuple();
        }
      } else {
        while (this.isRunning) {
          this.spout.nextTuple();
        }
      }
    } else {
      int counter = this.numberOfInvocations;
      if (counter >= 0) {
        while ((--counter >= 0) && this.isRunning) {
          this.spout.nextTuple();
        }
      } else {
        do {
          collector.tupleEmitted = false;
          this.spout.nextTuple();
        } while (collector.tupleEmitted && this.isRunning);
      }
    }
  }
 public String topologyStatus() {
   try {
     if (localCluster != null) {
       return localCluster.getTopologyInfo(topologyContext.getStormId()).get_status();
     } else {
       NimbusClient client = NimbusClient.getConfiguredClient(stormConf);
       return client.getClient().getTopologyInfo(topologyContext.getStormId()).get_status();
     }
   } catch (Exception e) {
     System.out.println("topologyStatus() exception: " + e);
     return null;
   }
 }
  private void handleMetrics(ShellMsg shellMsg) {
    // get metric name
    String name = shellMsg.getMetricName();
    if (name.isEmpty()) {
      throw new RuntimeException("Receive Metrics name is empty");
    }

    // get metric by name
    IMetric iMetric = _context.getRegisteredMetricByName(name);
    if (iMetric == null) {
      throw new RuntimeException("Could not find metric by name[" + name + "] ");
    }
    if (!(iMetric instanceof IShellMetric)) {
      throw new RuntimeException("Metric[" + name + "] is not IShellMetric, can not call by RPC");
    }
    IShellMetric iShellMetric = (IShellMetric) iMetric;

    // call updateMetricFromRPC with params
    Object paramsObj = shellMsg.getMetricParams();
    try {
      iShellMetric.updateMetricFromRPC(paramsObj);
    } catch (RuntimeException re) {
      throw re;
    } catch (Exception e) {
      throw new RuntimeException(e);
    }
  }
Beispiel #8
0
  @Override
  public void prepare(
      @SuppressWarnings("rawtypes") Map stormConf,
      TopologyContext context,
      OutputCollector collector) {
    this.stormConf = stormConf;
    this.context = context;
    this.collector = collector;
    totalShards =
        context.getRawTopology().get_bolts().get("search").get_common().get_parallelism_hint();
    TimerTask t =
        new TimerTask() {
          @Override
          public void run() {
            ArrayList<Merger> mergers;

            synchronized (inCourse) {
              mergers = new ArrayList<MergeBolt.Merger>(inCourse.values());
            }

            for (Merger merger : mergers) {
              if (merger.getAge() > 1000) finish(merger);
            }
          }
        };
    Timer timer = new Timer();
    timer.scheduleAtFixedRate(t, 1000, 1000);
  }
 public static void createBoltIdentifyingFiles(TopologyContext topologyContext) {
   String componentName = topologyContext.getThisComponentId();
   Long ts = System.currentTimeMillis();
   String fileName = "bolt-" + ts + "-" + componentName + ".log";
   File file =
       new File(
           in.dream_lab.stream.eventgen.utils.GlobalConstants.defaultBoltDirectory + fileName);
   try {
     FileWriter fw = new FileWriter(file);
     BufferedWriter bw = new BufferedWriter(fw);
     String rowString =
         InetAddress.getLocalHost().getHostName()
             + ","
             + Thread.currentThread().getName()
             + ","
             + componentName
             + ","
             + ts;
     bw.write(rowString);
     bw.flush();
     bw.close();
   } catch (UnknownHostException e) {
     // TODO Auto-generated catch block
     e.printStackTrace();
   } catch (IOException e) {
     // TODO Auto-generated catch block
     e.printStackTrace();
   }
 }
Beispiel #10
0
  public Number launch(Map conf, TopologyContext context) throws IOException {
    ProcessBuilder builder = new ProcessBuilder(command);
    builder.directory(new File(context.getCodeDir()));
    _subprocess = builder.start();

    processIn = new DataOutputStream(_subprocess.getOutputStream());
    processOut = new BufferedReader(new InputStreamReader(_subprocess.getInputStream()));
    processErrorStream = _subprocess.getErrorStream();

    JSONObject setupInfo = new JSONObject();
    setupInfo.put("pidDir", context.getPIDDir());
    setupInfo.put("conf", conf);
    setupInfo.put("context", context);
    writeMessage(setupInfo);

    return (Number) readMessage().get("pid");
  }
Beispiel #11
0
  @Override
  public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    // TODO Auto-generated method stub
    this._collector = collector;

    /*
     * init zookeeper [load zk server and port]
     */
    Map stateConf = new HashMap(conf);

    List<String> zkServers = this._configParser.zkServers;
    // System.out.println("$$$$$$$$$$$$$$$$$$$$$$" + zkServers.toString());
    if (zkServers == null) {
      zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
    }
    Integer zkPort = this._configParser.zkPort;
    if (zkPort == null) {
      zkPort = (Integer) (conf.get(Config.STORM_ZOOKEEPER_PORT));
    }
    String zkRoot = this._configParser.zkRoot;
    stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS, zkServers);
    stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_PORT, zkPort);
    stateConf.put(Config.TRANSACTIONAL_ZOOKEEPER_ROOT, zkRoot);
    // init zk operator object
    _zkState = new ZkState(stateConf);

    // init kafka consumer connection
    // _configParser = new SpoutConfigParser();
    _connections = new DynamicPartitionConnections(this._configParser);

    // using Transactions
    int totalTasks = context.getComponentTasks(context.getThisComponentId()).size();
    // System.out.println("!!!!!!!!!!!!!!!" + totalTasks);
    this._pmKeeper =
        new DynamicPartitionManagerKeeper(
            _connections,
            _zkState,
            this._configParser,
            stateConf,
            context.getThisTaskId(),
            totalTasks,
            CommonUtils.getUUID());
  }
Beispiel #12
0
  public Number connect(Map conf, TopologyContext context) throws IOException, NoOutputException {
    JSONObject setupInfo = new JSONObject();
    setupInfo.put("pidDir", context.getPIDDir());
    setupInfo.put("conf", conf);
    setupInfo.put("context", context);
    writeMessage(setupInfo);

    Number pid = (Number) ((JSONObject) readMessage()).get("pid");
    return pid;
  }
Beispiel #13
0
  @Override
  public void open(
      @SuppressWarnings("rawtypes") Map stormConf,
      TopologyContext context,
      SpoutOutputCollector collector) {
    LOG.info(
        "open({}[{}]) TaskId: {}, ThisComponetTasks: {}, ThisWorkerTasks: {}",
        context.getThisComponentId(),
        context.getThisTaskIndex(),
        context.getThisTaskId(),
        context.getComponentTasks(context.getThisComponentId()),
        context.getThisWorkerTasks());

    @SuppressWarnings("unchecked")
    GungnirConfig config = GungnirConfig.wrap((Map<String, Object>) stormConf.get(GUNGNIR_CONFIG));

    topologyContext = context;

    if (config.getBoolean(TOPOLOGY_METRICS_ENABLED) && metricsMap != null) {
      for (Map.Entry<String, Metrics> entry : metricsMap.entrySet()) {
        topologyContext.registerMetric(
            entry.getKey(), entry.getValue(), config.getInteger(TOPOLOGY_METRICS_INTERVAL_SECS));
      }
    }

    this.context.setComponent(this);

    incomingOperator.doPrepare(config, this.context);

    for (PartitionOperator partitionOperator : outgoingOperators) {
      SpoutDispatcher spoutDispatcher = new SpoutDispatcher();
      spoutDispatcher.collector = collector;

      Dispatcher dispatcher = partitionOperator.getDispatcher();
      if (dispatcher == null) {
        partitionOperator.setDispatcher(spoutDispatcher);
      } else if (dispatcher instanceof MultiDispatcher) {
        for (Dispatcher d : ((MultiDispatcher) dispatcher).getDispatchers()) {
          if (d instanceof FilterDispatcher) {
            ((FilterDispatcher) d).setDispatcher(spoutDispatcher);
          }
        }
      } else if (dispatcher instanceof FilterDispatcher) {
        ((FilterDispatcher) dispatcher).setDispatcher(spoutDispatcher);
      }
    }

    // TODO: parallelization
    // snapshotTimer = new SnapshotTimer(config.getInteger(COMPONENT_SNAPSHOT_QUEUE_SIZE),
    //     config.getInteger(COMPONENT_SNAPSHOT_PARALLELISM));
    snapshotTimer = new SnapshotTimer(getName() + "_" + topologyContext.getThisTaskIndex());
  }
Beispiel #14
0
  @Override
  public void prepare(@SuppressWarnings("rawtypes") Map stormConf, TopologyContext context) {

    this.thisTaskId = context.getThisTaskId();
    this.recorder = Recorder.getInstance(stormConf);

    this.recordMessages = true;
    //		// only record messages that are not from a "source" component
    //		this.recordMessages = !context.getThisComponentId().toLowerCase().contains("source");

  }
 @Override
 public void internalPrepare(
     OutputCollector collector,
     IMetadataChangeNotifyService coordinatorService,
     Config config,
     TopologyContext context) {
   coordinatorService.registerListener(this);
   coordinatorService.init(config, MetadataType.ALERT_PUBLISH_BOLT);
   this.alertPublisher.init(config, stormConf);
   streamContext =
       new StreamContextImpl(
           config, context.registerMetric("eagle.publisher", new MultiCountMetric(), 60), context);
 }
  /**
   * This method prepares the configurator bolt by initializing all the appropriate fields and
   * obtaining awareness over the rest of the topology.
   */
  public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
    this.context = context;
    taskId = context.getThisTaskId();
    changed = changed1 = false;
    commandHistory = new int[10];
    lvl0 = new ArrayList<>();
    lvl1 = new ArrayList<>();
    state = new ConfiguratorStateKeeper();

    Map<Integer, String> map = context.getTaskToComponent();
    for (Map.Entry<Integer, String> entry : map.entrySet()) {
      if (entry.getValue().equals("node_0_lvl_0")) {
        lvl0.add(entry.getKey());
      }
      if (entry.getValue().equals("node_0_lvl_1")) {
        lvl1.add(entry.getKey());
      }
    }
    all = new ArrayList<>();
    all.addAll(lvl0);
    all.addAll(lvl1);
    intialConfig();
  }
  @Override
  public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _collector = collector;
    _rand = new Random();
    if (pwEnable) {
      try {
        String completeFileName =
            path + "-Spout-" + String.valueOf(context.getThisTaskId()) + ".txt";
        pw = ReadWriteGod.writer(completeFileName, true);
        pw.write("Start Time: " + String.valueOf(System.currentTimeMillis()) + "\n");

      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  }
  @SuppressWarnings({"rawtypes", "unchecked"})
  @Override
  public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;

    eventCounter =
        context.registerMetric(this.getClass().getSimpleName(), new MultiCountMetric(), 10);

    parseFilters = ParseFilters.emptyParseFilter;

    String parseconfigfile =
        ConfUtils.getString(conf, "parsefilters.config.file", "parsefilters.json");
    if (parseconfigfile != null) {
      try {
        parseFilters = new ParseFilters(conf, parseconfigfile);
      } catch (IOException e) {
        LOG.error("Exception caught while loading the ParseFilters");
        throw new RuntimeException("Exception caught while loading the ParseFilters", e);
      }
    }

    urlFilters = URLFilters.emptyURLFilters;
    emitOutlinks = ConfUtils.getBoolean(conf, "parser.emitOutlinks", true);

    if (emitOutlinks) {
      String urlconfigfile = ConfUtils.getString(conf, "urlfilters.config.file", "urlfilters.json");

      if (urlconfigfile != null) {
        try {
          urlFilters = new URLFilters(conf, urlconfigfile);
        } catch (IOException e) {
          LOG.error("Exception caught while loading the URLFilters");
          throw new RuntimeException("Exception caught while loading the URLFilters", e);
        }
      }
    }

    trackAnchors = ConfUtils.getBoolean(conf, "track.anchors", true);

    robots_noFollow_strict = ConfUtils.getBoolean(conf, RobotsTags.ROBOTS_NO_FOLLOW_STRICT, true);

    metadataTransfer = MetadataTransfer.getInstance(conf);
  }
  public void initialize(
      String topologyName,
      Map stormConf,
      TopologyContext topologyContext,
      Map<String, Integer> startingParallelism) {
    this.topologyName = topologyName;
    this.stormConf = stormConf;
    this.topologyContext = topologyContext;
    this.startingParallelism = startingParallelism;

    newThroughputs = new ArrayList<Double>();
    newThroughput = 0;
    updateCounter = 0;

    // load previous data from zookeeper
    basePath = "/feedback/" + topologyContext.getStormId();
    zookeeper =
        Utils.newCurator(
            stormConf,
            (List<String>) stormConf.get(Config.STORM_ZOOKEEPER_SERVERS),
            stormConf.get(Config.STORM_ZOOKEEPER_PORT),
            (String) stormConf.get(Config.STORM_ZOOKEEPER_ROOT),
            new ZookeeperAuthInfo(stormConf));
    zookeeper.start();
    load();

    if (parallelism == null) {
      parallelism = startingParallelism;
    }

    if (iteration == null) {
      iteration = 0;
    }

    LOG.info("zookeeper path: " + basePath);
    LOG.info("parallelism rebalance " + System.currentTimeMillis());
    for (String c : parallelism.keySet()) {
      LOG.info("parallelism " + c + " " + parallelism.get(c));
    }
  }
Beispiel #20
0
  public BoltCollector(
      int message_timeout_secs,
      ITaskReportErr report_error,
      TaskSendTargets _send_fn,
      Map _storm_conf,
      TaskTransfer _transfer_fn,
      TopologyContext _topology_context,
      Integer task_id,
      RotatingMap<Tuple, Long> tuple_start_times,
      CommonStatsRolling _task_stats) {

    this.rotateTime = 1000L * message_timeout_secs / (Acker.TIMEOUT_BUCKET_NUM - 1);
    this.reportError = report_error;
    this.sendTargets = _send_fn;
    this.storm_conf = _storm_conf;
    this.taskTransfer = _transfer_fn;
    this.topologyContext = _topology_context;
    this.task_id = task_id;
    this.task_stats = _task_stats;

    this.pending_acks = new RotatingMap<Tuple, Long>(Acker.TIMEOUT_BUCKET_NUM);
    // this.pending_acks = new TimeCacheMap<Tuple,
    // Long>(message_timeout_secs,
    // Acker.TIMEOUT_BUCKET_NUM);
    this.tuple_start_times = tuple_start_times;

    this.ackerNum = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));

    String componentId = topologyContext.getThisComponentId();
    timer =
        Metrics.registerTimer(
            JStormServerUtils.getName(componentId, task_id),
            MetricDef.EMIT_TIME,
            String.valueOf(task_id),
            Metrics.MetricType.TASK);
    random = new Random();
    random.setSeed(System.currentTimeMillis());
  }
  public SpoutCollector(
      Integer task_id,
      backtype.storm.spout.ISpout spout,
      CommonStatsRolling task_stats,
      TaskSendTargets sendTargets,
      Map _storm_conf,
      TaskTransfer _transfer_fn,
      TimeOutMap<Long, TupleInfo> pending,
      TopologyContext topology_context,
      DisruptorQueue disruptorAckerQueue,
      ITaskReportErr _report_error) {
    this.sendTargets = sendTargets;
    this.storm_conf = _storm_conf;
    this.transfer_fn = _transfer_fn;
    this.pending = pending;
    this.topology_context = topology_context;

    this.disruptorAckerQueue = disruptorAckerQueue;

    this.task_stats = task_stats;
    this.spout = spout;
    this.task_id = task_id;
    this.report_error = _report_error;

    ackerNum = JStormUtils.parseInt(storm_conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
    isDebug = JStormUtils.parseBoolean(storm_conf.get(Config.TOPOLOGY_DEBUG), false);

    random = new Random();
    random.setSeed(System.currentTimeMillis());

    String componentId = topology_context.getThisComponentId();
    emitTotalTimer =
        Metrics.registerTimer(
            JStormServerUtils.getName(componentId, task_id),
            MetricDef.EMIT_TIME,
            String.valueOf(task_id),
            Metrics.MetricType.TASK);
  }
  @SuppressWarnings("rawtypes")
  public void open(
      final Map conf, final TopologyContext context, final SpoutOutputCollector collector) {
    this.collector = collector;

    this.topologyName = (String) conf.get(Config.TOPOLOGY_NAME);

    if (mqClient == null) {
      try {
        config.setInstanceName(String.valueOf(context.getThisTaskId()));
        mqClient = new MessagePushConsumer(config);

        mqClient.start(buildMessageListener());
      } catch (Throwable e) {
        LOG.error("Failed to init consumer !", e);
        throw new RuntimeException(e);
      }
    }

    LOG.info(
        "Topology {} opened {} spout successfully!",
        new Object[] {topologyName, config.getTopic()});
  }
 @SuppressWarnings("rawtypes")
 @Override
 public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
   this.collector = collector;
   this.index = context.getThisTaskIndex();
 }
 @Override
 public void prepare(Map stormConf, TopologyContext context) {
   this.boltName = context.getThisComponentId();
   cluster = HFactory.getOrCreateCluster(cluster_name, cluster_host);
   keyspace = HFactory.createKeyspace(keyspace_name, cluster);
 }
Beispiel #25
0
 @Override
 public void prepare(@SuppressWarnings("rawtypes") Map stormConf, TopologyContext context) {
   this.name = context.getThisComponentId();
   this.id = context.getThisTaskId();
   this.received = new ArrayList<TimedBindings>();
 }
Beispiel #26
0
 public void prepare(Map conf, TopologyContext context) {
   tpsCounter = new TpsCounter(context.getThisComponentId() + ":" + context.getThisTaskId());
   LOG.info("Successfully do parepare " + context.getThisComponentId());
 }
  @SuppressWarnings({"rawtypes", "unchecked"})
  @Override
  public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {

    _collector = collector;
    this.conf = new Config();
    this.conf.putAll(stormConf);

    checkConfiguration();

    this.taskIndex = context.getThisTaskIndex();

    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.ENGLISH);
    long start = System.currentTimeMillis();
    LOG.info("[Fetcher #{}] : starting at {}", taskIndex, sdf.format(start));

    // Register a "MultiCountMetric" to count different events in this bolt
    // Storm will emit the counts every n seconds to a special bolt via a
    // system stream
    // The data can be accessed by registering a "MetricConsumer" in the
    // topology
    this.eventCounter = context.registerMetric("fetcher_counter", new MultiCountMetric(), 10);

    this.averagedMetrics =
        context.registerMetric("fetcher_average", new MultiReducedMetric(new MeanReducer()), 10);

    this.perSecMetrics =
        context.registerMetric(
            "fetcher_average_persec", new MultiReducedMetric(new PerSecondReducer()), 10);

    protocolFactory = new ProtocolFactory(conf);

    String urlconfigfile = ConfUtils.getString(conf, "urlfilters.config.file", "urlfilters.json");

    if (urlconfigfile != null)
      try {
        urlFilters = new URLFilters(conf, urlconfigfile);
      } catch (IOException e) {
        LOG.error("Exception caught while loading the URLFilters");
        throw new RuntimeException("Exception caught while loading the URLFilters", e);
      }

    metadataTransfer = MetadataTransfer.getInstance(stormConf);

    allowRedirs =
        ConfUtils.getBoolean(
            stormConf, com.digitalpebble.storm.crawler.Constants.AllowRedirParamName, true);

    sitemapsAutoDiscovery = ConfUtils.getBoolean(stormConf, "sitemap.discovery", false);

    queueMode = ConfUtils.getString(conf, "fetcher.queue.mode", QUEUE_MODE_HOST);
    // check that the mode is known
    if (!queueMode.equals(QUEUE_MODE_IP)
        && !queueMode.equals(QUEUE_MODE_DOMAIN)
        && !queueMode.equals(QUEUE_MODE_HOST)) {
      LOG.error("Unknown partition mode : {} - forcing to byHost", queueMode);
      queueMode = QUEUE_MODE_HOST;
    }
    LOG.info("Using queue mode : {}", queueMode);

    this.crawlDelay = (long) (ConfUtils.getFloat(conf, "fetcher.server.delay", 1.0f) * 1000);

    this.maxCrawlDelay = (long) ConfUtils.getInt(conf, "fetcher.max.crawl.delay", 30) * 1000;
  }