public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    List<String> zks = new ArrayList<String>();
    zks.add("127.0.0.1");

    List<String> cFs = new ArrayList<String>();
    cFs.add("personal");
    cFs.add("company");

    builder.setSpout("hbase_spout", new HbaseSpout(), 2);
    builder
        .setBolt("hbase_bolt", new HbaseBolt("user", cFs, zks, 2181), 2)
        .shuffleGrouping("hbase_spout");

    Config config = new Config();
    config.setDebug(true);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("hbase_topology", config, builder.createTopology());

    try {
      Thread.sleep(20000);
    } catch (InterruptedException e) {
      System.out.println("Thread interrupted:" + e);
    }

    System.out.println("stopped called...");

    cluster.killTopology("hbase_topology");

    cluster.shutdown();
  }
Exemple #2
0
  public static void main(String[] args) {
    FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender"));
    FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("gender", genderSpout);
    builder.setSpout("age", ageSpout);
    builder
        .setBolt("join", new SingleJoinBolt(new Fields("gender", "age")))
        .fieldsGrouping("gender", new Fields("id"))
        .fieldsGrouping("age", new Fields("id"));

    Config conf = new Config();
    conf.setDebug(true);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("join-example", conf, builder.createTopology());

    for (int i = 0; i < 10; i++) {
      String gender;
      if (i % 2 == 0) {
        gender = "male";
      } else {
        gender = "female";
      }
      genderSpout.feed(new Values(i, gender));
    }

    for (int i = 9; i >= 0; i--) {
      ageSpout.feed(new Values(i, i + 20));
    }

    Utils.sleep(2000);
    cluster.shutdown();
  }
  public static void main(String[] args) throws Exception {

    if (args.length < 4) {
      System.err.println(
          "Usage: PrintSampleStream <consumer-key> <consumer-secret> <access-token> <access-token-secret>");
      return;
    }

    String consumerKey = args[0];
    String consumerSecret = args[1];
    String accessToken = args[2];
    String accessTokenSecret = args[3];

    // keywords start with the 5th parameter
    String[] keyWords = Arrays.copyOfRange(args, 4, args.length);

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(
        "twitter",
        new TwitterSampleSpout(
            consumerKey, consumerSecret, accessToken, accessTokenSecret, keyWords));
    builder.setBolt("print", new PrinterBolt()).shuffleGrouping("twitter");

    Config conf = new Config();

    final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
    cluster.submitTopology("Print", conf, FlinkTopology.createTopology(builder));

    Utils.sleep(10 * 1000);

    cluster.shutdown();
  }
  public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    List<String> hosts = new ArrayList<String>();
    hosts.add("127.0.0.1");

    SpoutConfig spoutConfig =
        new SpoutConfig(
            KafkaConfig.StaticHosts.fromHostString(hosts, 1), "test", "/kafkastorm", "discovery");

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("kafkaSpout", new KafkaSpout(spoutConfig), 1);
    builder
        .setBolt("flightInformationParserBolt", new FlightInformationParserBolt(), 1)
        .shuffleGrouping("kafkaSpout");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);
      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("flightInformationTest", conf, builder.createTopology());
      //            Utils.sleep(10000);
      //            cluster.killTopology("flightInformationTest");
      //            cluster.shutdown();
    }
  }
Exemple #5
0
  @Test
  public void testEPL() {
    // should say fieldsTypes, maybe with object/component prefix
    Map<String, Object> eventTypes = new HashMap<>();
    eventTypes.put(LITERAL_SYMBOL, String.class);
    eventTypes.put(LITERAL_PRICE, Integer.class);

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(LITERAL_QUOTES, new RandomSentenceSpout());
    builder
        .setBolt(
            LITERAL_ESPER,
            (new EsperBolt())
                .addEventTypes(eventTypes)
                .addOutputTypes(
                    Collections.singletonMap(
                        LITERAL_RETURN_OBJ, Arrays.asList(LITERAL_AVG, LITERAL_PRICE)))
                .addStatements(
                    Collections.singleton(
                        "insert into Result "
                            + "select avg(price) as avg, price from "
                            + "quotes_default(symbol='A').win:length(2) "
                            + "having avg(price) > 60.0")))
        .shuffleGrouping(LITERAL_QUOTES);
    builder.setBolt("print", new PrinterBolt()).shuffleGrouping(LITERAL_ESPER, LITERAL_RETURN_OBJ);

    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.shutdown();
    assertEquals(resultEPL.get(100), new Double(75.0));
    assertEquals(resultEPL.get(50), new Double(75.0));
  }
  @Ignore
  @Test
  public void testSiddhiSpout()
      throws AlreadyAliveException, InvalidTopologyException, InterruptedException {
    eventsReceived = false;
    ExecutionPlanConfiguration executionPlanConfiguration = new ExecutionPlanConfiguration();
    StreamDefinition siddhiStreamDef =
        new StreamDefinition().name("wordStream").attribute("word", Attribute.Type.STRING);
    ConsumingQueuedEventSource eventSource =
        new ConsumingQueuedEventSource(siddhiStreamDef, executionPlanConfiguration);
    SiddhiSpout siddhiSpout = new SiddhiSpout(siddhiStreamDef, eventSource);
    siddhiSpout.setUseDefaultAsStreamName(false);

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("siddhi-spout", siddhiSpout);
    // builder.setBolt("count", wordCount, 12).fieldsGrouping("siddhi-spout", new Fields("word"));
    builder
        .setBolt("count", new WordCount(), 8)
        .fieldsGrouping("siddhi-spout", "wordStream", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(false);

    conf.setMaxTaskParallelism(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("word-count", conf, builder.createTopology());
    eventSource.consumeEvents(new Object[][] {{"GOOG"}, {"WSO2"}, {"FB"}});
    Thread.sleep(10000);
    Assert.assertTrue("No events received.", eventsReceived);
    Assert.assertTrue("Event count is zero", eventCount > 0);
    cluster.shutdown();
  }
Exemple #7
0
  public static void main(String[] args) {
    try {
      // 实例化TopologyBuilder类。
      TopologyBuilder topologyBuilder = new TopologyBuilder();
      // 设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数。
      topologyBuilder.setSpout("SimpleSpout", new SimpleSpout(), 1);
      // 设置数据处理节点并分配并发数。指定该节点接收喷发节点的策略为随机方式。
      topologyBuilder.setBolt("SimpleBolt", new SimpleBolt(), 3).shuffleGrouping("SimpleSpout");
      Config config = new Config();
      config.setDebug(true);
      if (args != null && args.length > 0) {
        config.setNumWorkers(1);
        StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
      } else {
        // 这里是本地模式下运行的启动代码。
        config.setMaxTaskParallelism(1);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("simple", config, topologyBuilder.createTopology());

        Utils.sleep(5000);
        cluster.shutdown();
      }
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
Exemple #8
0
  public static void main(String[] args) throws Exception {
    String topic = "wordcount";
    String zkRoot = "/kafka-storm";
    String spoutId = "KafkaSpout";

    // 整合kafka  设置brober 及 spout 配置信息
    BrokerHosts brokerHosts = new ZkHosts("hadoop04:2181,hadoop05:2181,hadoop06:2181");
    SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId);
    spoutConfig.scheme = new SchemeAsMultiScheme(new MessageSpout());

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(spoutId, new KafkaSpout(spoutConfig));
    builder.setBolt("word-spilter", new WordSpliter(), 4).shuffleGrouping(spoutId);
    builder
        .setBolt("writer", new WriterBolt(), 4)
        .fieldsGrouping("word-spilter", new Fields("word"));

    Config conf = new Config();
    conf.setNumWorkers(4);
    conf.setNumAckers(0);
    conf.setDebug(true);

    // LocalCluster用来将topology提交到本地模拟器运行,方便开发调试
    //        LocalCluster cluster = new LocalCluster();
    //        cluster.submitTopology("WordCount",conf,builder.createTopology());

    StormSubmitter.submitTopology("WordCount", conf, builder.createTopology());
  }
  // Entry point for the topology
  public static void main(String[] args) throws Exception {
    // Read and set configuration
    Properties properties = new Properties();
    // Arguments? Or from config file?
    if (args.length > 1) {
      properties.load(new FileReader(args[1]));
    } else {
      properties.load(
          EventHubWriter.class.getClassLoader().getResourceAsStream("EventHubs.properties"));
    }
    // Configure the bolt for Event Hub
    String policyName = properties.getProperty("eventhubs.writerpolicyname");
    String policyKey = properties.getProperty("eventhubs.writerpolicykey");
    String namespaceName = properties.getProperty("eventhubs.namespace");
    String entityPath = properties.getProperty("eventhubs.entitypath");

    EventHubBoltConfig spoutConfig =
        new EventHubBoltConfig(
            policyName, policyKey, namespaceName, "servicebus.windows.net", entityPath);

    // Used to build the topology
    TopologyBuilder builder = new TopologyBuilder();
    // Add the spout, with a name of 'spout'
    // and parallelism hint of 5 executors
    builder.setSpout("spout", new DeviceSpout(), 5);

    builder.setBolt("eventhubbolt", new EventHubBolt(spoutConfig), 8).shuffleGrouping("spout");

    // new configuration
    Config conf = new Config();
    conf.setDebug(true);

    // If there are arguments, we are running on a cluster
    if (args != null && args.length > 0) {
      // parallelism hint to set the number of workers
      conf.setNumWorkers(3);
      // submit the topology
      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    }
    // Otherwise, we are running locally
    else {
      // Cap the maximum number of executors that can be spawned
      // for a component to 3
      conf.setMaxTaskParallelism(3);
      // LocalCluster is used to run locally
      LocalCluster cluster = new LocalCluster();
      // submit the topology
      cluster.submitTopology("writer", conf, builder.createTopology());
      // sleep
      Thread.sleep(10000);
      // shut down the cluster
      cluster.shutdown();
    }
  }
  public static void SetRemoteTopology(String streamName)
      throws AlreadyAliveException, InvalidTopologyException {
    TopologyBuilder builder = new TopologyBuilder();

    Map conf = new HashMap();

    SetBuilder(builder, conf);

    conf.put(Config.STORM_CLUSTER_MODE, "distributed");

    StormSubmitter.submitTopology(streamName, conf, builder.createTopology());
  }
Exemple #11
0
  public static void main(String[] args) throws Exception {
    final TopologyBuilder builder = new TopologyBuilder();
    final KestrelThriftSpout spout =
        new KestrelThriftSpout("localhost", 2229, "test", new StringScheme());
    builder.setSpout("spout", spout).setDebug(true);
    builder.setBolt("bolt", new FailEveryOther()).shuffleGrouping("spout");

    final LocalCluster cluster = new LocalCluster();
    final Config conf = new Config();
    cluster.submitTopology("test", conf, builder.createTopology());

    Thread.sleep(600000);
  }
  private void buildAndSubmit() throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    configureKafkaSpout(builder);
    // configureLogTruckEventBolt(builder);
    configureHDFSBolt(builder);

    configureHBaseBolt(builder);

    Config conf = new Config();
    conf.setDebug(true);

    StormSubmitter.submitTopology("truck-event-processor", conf, builder.createTopology());
  }
 /**
  * A topology that produces random sentences using {@link RandomSentenceSpout} and publishes the
  * sentences using a KafkaBolt to kafka "test" topic.
  *
  * @return the storm topology
  */
 public StormTopology buildProducerTopology() {
   TopologyBuilder builder = new TopologyBuilder();
   builder.setSpout("spout", new RandomSentenceSpout(), 2);
   /**
    * The output field of the RandomSentenceSpout ("word") is provided as the boltMessageField so
    * that this gets written out as the message in the kafka topic.
    */
   KafkaBolt bolt =
       new KafkaBolt()
           .withTopicSelector(new DefaultTopicSelector("test"))
           .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("key", "word"));
   builder.setBolt("forwardToKafka", bolt, 1).shuffleGrouping("spout");
   return builder.createTopology();
 }
 public SynchronizedStormDataSource(
     ComponentProperties cp,
     List<String> allCompNames,
     ArrayList<Type> tupleTypes,
     int hierarchyPosition,
     int parallelism,
     int keyIndex,
     boolean isPartitioner,
     TopologyBuilder builder,
     TopologyKiller killer,
     Config conf,
     int numOfTuplesThreshold) {
   super(cp, allCompNames, hierarchyPosition, isPartitioner, conf);
   _numOfTuplesThreshold = numOfTuplesThreshold;
   _keyIndex = keyIndex;
   _name = cp.getName();
   _aggBatchOutputMillis = cp.getBatchOutputMillis();
   _operatorChain = cp.getChainOperator();
   _schema = tupleTypes;
   _frequentSet = new HashSet<Integer>();
   if (getHierarchyPosition() == FINAL_COMPONENT && (!MyUtilities.isAckEveryTuple(conf)))
     killer.registerComponent(this, parallelism);
   builder.setSpout(getID(), this, parallelism);
   if (MyUtilities.isAckEveryTuple(conf)) killer.registerComponent(this, parallelism);
 }
  public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("PlayStream1", new FootballDataSpout(), 1);
    builder.setBolt("AvgRunPlay", configureSiddhiBolt1(), 1).shuffleGrouping("PlayStream1");
    builder.setBolt("FastRunPlay", configureSiddhiBolt2(), 1).shuffleGrouping("AvgRunPlay");
    builder.setBolt("LeafEcho", new EchoBolt(), 1).shuffleGrouping("FastRunPlay");

    Config conf = new Config();
    // conf.setDebug(true);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);
      cluster.shutdown();
    }
  }
  public static void main(String[] args) throws InterruptedException {

    // Topology definition
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(
        Constants.objectLocationGenerator,
        new ObjectLocationGenerator(),
        Constants.dataSpoutParallelism);
    builder.setSpout(
        Constants.queryGenerator, new RangeQueryGenerator(), Constants.querySpoutParallelism);
    builder
        .setBolt(
            Constants.rangeFilterBolt, new NonIncrementalRangeFilter(), Constants.boltParallelism)
        .customGrouping(
            Constants.queryGenerator,
            new QueryStaticGridCustomGrouping(
                Constants.numberOfBolts,
                Constants.xMaxRange,
                Constants.yMaxRange,
                Constants.xCellsNum,
                Constants.yCellsNum))
        .customGrouping(
            Constants.objectLocationGenerator,
            new DataStaticGridCustomGrouping(
                Constants.numberOfBolts,
                Constants.xMaxRange,
                Constants.yMaxRange,
                Constants.xCellsNum,
                Constants.yCellsNum));

    // Configuration
    Config conf = new Config();

    conf.setDebug(false);
    // Topology run
    // conf.put(Config., 1);
    // conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);
    // conf.put(Config., 1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(
        "OptimizedNonIncrementalRange-Queries_toplogy", conf, builder.createTopology());
    //		while (true)
    //			Thread.sleep(1000);

  }
  public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TwitterStreamSpout(), 5);
    builder
        .setBolt("windowingTwitterStream", new WindowingTwitterStream(300), 1)
        .shuffleGrouping("spout");

    builder
        .setBolt(
            "PrepareUserSimilarityCalculationBolt", new PrepareUserSimilarityCalculationBolt(), 1)
        .shuffleGrouping("windowingTwitterStream");
    builder
        .setBolt("UserSimilarityCalculationBolt", new UserSimilarityCalculationBolt(), 1)
        .shuffleGrouping("PrepareUserSimilarityCalculationBolt");

    Config conf = new Config();
    conf.setDebug(false);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);
      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
      conf.setMaxTaskParallelism(3);
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());
      // Thread.sleep(10000);
      // cluster.shutdown();
    }
  }
  public static void main(String[] args) {
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout("aa", new DataSourceSpout());
    topologyBuilder.setBolt("bb", new SumBolt(), 3).shuffleGrouping("aa");

    /*//创建本地storm集群
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("topology-name", new Config(), topologyBuilder.createTopology());*/

    try {
      StormSubmitter.submitTopology(
          "topology-name", new Config(), topologyBuilder.createTopology());
    } catch (AlreadyAliveException e) {
      e.printStackTrace();
    } catch (InvalidTopologyException e) {
      e.printStackTrace();
    }
  }
  public StormTopology build() {
    final TopologyBuilder builder = new TopologyBuilder();

    for (Map.Entry<String, IRichSpout> spoutEntry : spoutMap.entrySet()) {
      builder.setSpout(spoutEntry.getKey(), spoutEntry.getValue());
    }
    for (Map.Entry<String, IRichBolt> boltEntry : boltMap.entrySet()) {
      InputDeclarer declarer = builder.setBolt(boltEntry.getKey(), boltEntry.getValue());
      List<Connection> connectionsForTarget = connections.get(boltEntry.getKey());
      if (connectionsForTarget != null) {
        for (Connection connection : connectionsForTarget) {
          declarer =
              declarer.shuffleGrouping(
                  connection.getSourceComponent(), connection.getSourceStream());
        }
      }
    }
    return builder.createTopology();
  }
Exemple #20
0
 @SuppressWarnings("unchecked")
 private <T> Map<String, T> getPrivateField(String field) {
   try {
     Field f = builder.getClass().getDeclaredField(field);
     f.setAccessible(true);
     return copyObject((Map<String, T>) f.get(builder));
   } catch (NoSuchFieldException | IllegalAccessException e) {
     throw new RuntimeException("Couldn't get " + field + " from TopologyBuilder", e);
   }
 }
Exemple #21
0
  public void buildEvaluatePart(
      IStateFactory cassandra,
      InstanceStreamSource source,
      TopologyBuilder builder,
      MoaConfig config) {
    int num_workers = config.getNumWorkers();
    int ensemble_size = config.getEnsembleSize();
    int num_classifier_executors = config.getNumClassifierExecutors();
    int num_combiners = config.getNumCombiners();
    int num_aggregators = config.getNumAggregators();
    EvaluateSpout evaluate_spout = new EvaluateSpout(source, cassandra, 100);
    builder.setSpout("prediction_stream", evaluate_spout);

    builder
        .setBolt(
            "shared_storage",
            new SharedStorageBolt<Classifier>(cassandra, "evaluate_classifier"),
            num_workers)
        .customGrouping("prediction_stream", EvaluateSpout.NOTIFICATION_STREAM, new AllGrouping());

    builder
        .setBolt(
            "p_deserialize",
            new TopologyBroadcastBolt("evaluate", LearnSpout.LEARN_STREAM_FIELDS),
            num_workers)
        .shuffleGrouping("prediction_stream");

    builder
        .setBolt(
            "evaluate_local_grouping",
            new WorkerBroadcastBolt("evaluate", LearnSpout.LEARN_STREAM_FIELDS),
            num_workers)
        .customGrouping("p_deserialize", "evaluate", new AllGrouping());

    builder
        .setBolt(
            "evaluate_classifier",
            new EvaluateClassifierBolt(cassandra),
            Math.max(num_classifier_executors, num_workers))
        .customGrouping("evaluate_local_grouping", "evaluate", new AllLocalGrouping())
        .setNumTasks(ensemble_size);

    builder
        .setBolt(
            "combine_result",
            new CombinerBolt("evaluate_classifier"),
            Math.max(num_workers, num_combiners))
        .customGrouping("evaluate_classifier", new LocalGrouping(new IdBasedGrouping()))
        .setNumTasks(Math.max(num_workers, num_combiners));

    builder
        .setBolt(
            "prediction_result",
            new CombinerBolt(ensemble_size),
            Math.max(num_workers, num_combiners))
        .customGrouping("combine_result", new IdBasedGrouping())
        .setNumTasks(Math.max(num_workers, num_aggregators));
  }
  protected StormTopology buildTopology(EventHubSpout eventHubSpout) {
    TopologyBuilder topologyBuilder = new TopologyBuilder();

    // one thread, one task (task runs in thread)
    topologyBuilder.setSpout("EventHubsSpout", eventHubSpout, 1).setNumTasks(1);
    // topologyBuilder.setSpout("EventHubsSpout", eventHubSpout, spoutConfig.getPartitionCount())
    //        .setNumTasks(spoutConfig.getPartitionCount());
    // topologyBuilder.setBolt("PartialCountBolt", new PartialCountBolt(),
    // spoutConfig.getPartitionCount())
    //
    // .localOrShuffleGrouping("EventHubsSpout").setNumTasks(spoutConfig.getPartitionCount());
    // topologyBuilder.setBolt("GlobalCountBolt", new GlobalCountBolt(),
    // 1).globalGrouping("PartialCountBolt")
    //        .setNumTasks(1);
    topologyBuilder
        .setBolt("HBaseBolt", new HBaseBolt(), 1)
        .localOrShuffleGrouping("EventHubsSpout")
        .setNumTasks(1);
    return topologyBuilder.createTopology();
  }
  public static void main(String[] args) throws InterruptedException {

    // Topology definition
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("word-reader", new WordReader());
    builder.setBolt("word-normalizer", new WordNormalizer()).shuffleGrouping("word-reader");
    builder
        .setBolt("word-counter", new WordCounter(), 2)
        .fieldsGrouping("word-normalizer", new Fields("word"));

    // Configuration
    Config conf = new Config();
    conf.put("wordsFile", args[0]);
    conf.setDebug(false);
    // Topology run
    conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("Getting-Started-Toplogie", conf, builder.createTopology());
    Thread.sleep(10000);
    cluster.shutdown();
  }
  public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(2);
      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("test", conf, builder.createTopology());
      Utils.sleep(10000);
      cluster.killTopology("test");
      cluster.shutdown();
    }
  }
Exemple #25
0
  private FlinkTopology(TopologyBuilder builder) {
    this.builder = builder;
    this.stormTopology = builder.createTopology();
    // extract the spouts and bolts
    this.spouts = getPrivateField("_spouts");
    this.bolts = getPrivateField("_bolts");

    this.env = StreamExecutionEnvironment.getExecutionEnvironment();

    // Kick off the translation immediately
    translateTopology();
  }
  @Test
  public void testStorm() throws Exception {
    SentenceSpout spout = new SentenceSpout();
    SplitSentenceBolt splitSentenceBolt = new SplitSentenceBolt();
    WordCountBolt countBolt = new WordCountBolt();
    ReportBolt reportBolt = new ReportBolt();

    TopologyBuilder builder = new TopologyBuilder();
    // 注册一个sentence spout,并且赋予一个唯一值
    builder.setSpout(SENTENCE_SPOUT_ID, spout);
    // 然后注册一个SplitSentenceBolt,这个bolt订阅SentenceSpout发射出来的数据流
    // 将SentenceSpout的唯一ID赋值给shuffleGrouping()方法确立了这种订阅关系
    // shuffleGrouping()方法告诉storm,要将SentenceSpout发射出来的tuple随机均匀的分发给SplitSentenceBolt的实例
    builder.setBolt(SPLIT_BOLT_ID, splitSentenceBolt).shuffleGrouping(SENTENCE_SPOUT_ID);
    // 将含有特定数据的tuple路由到特殊的bolt实例中
    // 此处使用BoltDeclarer的fieldsGrouping()方法保证所有“word”字段值相同的tuple会被路由到同一个WordCountBolt实例中
    builder.setBolt(COUNT_BOLT_ID, countBolt).fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
    // 将WordCountBolt发出的所有tuple流路由到唯一的ReportBolt中,使用globalGrouping
    builder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(COUNT_BOLT_ID);

    Config config = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());

    Thread.sleep(10000);
    cluster.killTopology(TOPOLOGY_NAME);
    cluster.shutdown();
  }
Exemple #27
0
  public static void main(String[] args)
      throws InterruptedException, AlreadyAliveException, InvalidTopologyException {
    Config config = new Config();

    // 设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数。
    builder.setSpout("readlog", new ReadLogSpout(), 1);

    // 创建monitor监控过滤节点
    builder.setBolt("monitor", new MonitorBolt("MonitorBolt.xml"), 3).shuffleGrouping("readlog");

    // 创建mysql数据存储节点
    builder.setBolt("mysql", new MysqlBolt("MysqlBolt.xml"), 3).shuffleGrouping("monitor");

    builder.setBolt("print", new PrintBolt(), 3).shuffleGrouping("monitor");

    config.setDebug(false);

    if (args != null && args.length > 0) {
      config.setNumWorkers(1);
      StormSubmitter.submitTopology(args[0], config, builder.createTopology());
    } else {
      // 这里是本地模式下运行的启动代码。
      config.setMaxTaskParallelism(1);
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("simple", config, builder.createTopology());
    }
  }
  public static void main(String[] args) {

    Config conf = new Config();
    conf.setDebug(true);
    // SessionFactory sessionFactory =
    // HibernateSessionFactory.createSession("mySql");

    if (args != null && args.length > 0) {
      TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(null, null);
      builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt");
      conf.setNumWorkers(1);
      try {
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
      } catch (AlreadyAliveException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      } catch (InvalidTopologyException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    } else {
      LocalDRPC drpc = new LocalDRPC();
      TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(drpc, null);
      builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt");
      conf.setMaxTaskParallelism(3);
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("mongooseTopologyLocal", conf, builder.createTopology());

      for (String request : new String[] {"{\"studentId\":\"1\",\"greScore\":380.0}"}) {
        System.out.println("Result for: " + request + "\": " + drpc.execute("mongoose", request));
      }
      cluster.shutdown();
      drpc.shutdown();
    }
  }
  public static void main(String[] args) {

    Config config = new Config();

    builder.setSpout("RandomSentence", new RandomSentenceSpout(), 2);
    builder
        .setBolt("WordNormalizer", new WordNormalizerBolt(), 2)
        .shuffleGrouping("RandomSentence");
    builder
        .setBolt("WordCount", new WordCountBolt(), 2)
        .fieldsGrouping("WordNormalizer", new Fields("word"));
    builder.setBolt("Print", new PrintBolt(), 1).shuffleGrouping("WordCount");

    config.setDebug(false);

    // 通过是否有参数来控制是否启动集群,或者本地模式执行
    if (args != null && args.length > 0) {
      try {
        config.setNumWorkers(1);
        StormSubmitter.submitTopology(args[0], config, builder.createTopology());
      } catch (Exception e) {
        e.printStackTrace();
      }
    } else {
      config.setMaxTaskParallelism(1);
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("wordcount", config, builder.createTopology());
    }
  }
  public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      /*设置该topology在storm集群中要抢占的资源slot数,一个slot对应这supervisor节点上的以个worker进程
       如果你分配的spot数超过了你的物理节点所拥有的worker数目的话,有可能提交不成功,加入你的集群上面已经有了
       一些topology而现在还剩下2个worker资源,如果你在代码里分配4个给你的topology的话,那么这个topology可以提交
       但是提交以后你会发现并没有运行。 而当你kill掉一些topology后释放了一些slot后你的这个topology就会恢复正常运行。
      */
      conf.setNumWorkers(3);

      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
      conf.setMaxTaskParallelism(3);
      // 指定为本地模式运行
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }