Esempio n. 1
0
  @Test
  public void testEPL() {
    // should say fieldsTypes, maybe with object/component prefix
    Map<String, Object> eventTypes = new HashMap<>();
    eventTypes.put(LITERAL_SYMBOL, String.class);
    eventTypes.put(LITERAL_PRICE, Integer.class);

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(LITERAL_QUOTES, new RandomSentenceSpout());
    builder
        .setBolt(
            LITERAL_ESPER,
            (new EsperBolt())
                .addEventTypes(eventTypes)
                .addOutputTypes(
                    Collections.singletonMap(
                        LITERAL_RETURN_OBJ, Arrays.asList(LITERAL_AVG, LITERAL_PRICE)))
                .addStatements(
                    Collections.singleton(
                        "insert into Result "
                            + "select avg(price) as avg, price from "
                            + "quotes_default(symbol='A').win:length(2) "
                            + "having avg(price) > 60.0")))
        .shuffleGrouping(LITERAL_QUOTES);
    builder.setBolt("print", new PrinterBolt()).shuffleGrouping(LITERAL_ESPER, LITERAL_RETURN_OBJ);

    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.shutdown();
    assertEquals(resultEPL.get(100), new Double(75.0));
    assertEquals(resultEPL.get(50), new Double(75.0));
  }
Esempio n. 2
0
  public static void main(String[] args) {
    try {
      // 实例化TopologyBuilder类。
      TopologyBuilder topologyBuilder = new TopologyBuilder();
      // 设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数。
      topologyBuilder.setSpout("SimpleSpout", new SimpleSpout(), 1);
      // 设置数据处理节点并分配并发数。指定该节点接收喷发节点的策略为随机方式。
      topologyBuilder.setBolt("SimpleBolt", new SimpleBolt(), 3).shuffleGrouping("SimpleSpout");
      Config config = new Config();
      config.setDebug(true);
      if (args != null && args.length > 0) {
        config.setNumWorkers(1);
        StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
      } else {
        // 这里是本地模式下运行的启动代码。
        config.setMaxTaskParallelism(1);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("simple", config, topologyBuilder.createTopology());

        Utils.sleep(5000);
        cluster.shutdown();
      }
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
  public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("PlayStream1", new FootballDataSpout(), 1);
    builder.setBolt("AvgRunPlay", configureSiddhiBolt1(), 1).shuffleGrouping("PlayStream1");
    builder.setBolt("FastRunPlay", configureSiddhiBolt2(), 1).shuffleGrouping("AvgRunPlay");
    builder.setBolt("LeafEcho", new EchoBolt(), 1).shuffleGrouping("FastRunPlay");

    Config conf = new Config();
    // conf.setDebug(true);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);
      cluster.shutdown();
    }
  }
Esempio n. 4
0
  public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(1000000);

      cluster.shutdown();
    }
  }
 public static void main(String[] args) throws Exception {
   Config conf = new Config();
   LocalCluster cluster = new LocalCluster();
   cluster.submitTopology("cdc", conf, buildTopology());
   Thread.sleep(200000);
   cluster.shutdown();
 }
  /**
   * @param args
   * @throws InterruptedException
   */
  public static void main(String[] args) throws InterruptedException {
    // Add transactional spout
    MemoryTransactionalSpout spout =
        new MemoryTransactionalSpout(values, new Fields("shortid", "url", "user", "date"), 3);

    TransactionalTopologyBuilder builder =
        new TransactionalTopologyBuilder("shorturl-count", "spout", spout, 2);

    // Build TupleTableConifg
    TupleTableConfig ttConfig = new TupleTableConfig("shorturl", "shortid");
    ttConfig.setBatch(false);
    ttConfig.addColumn("data", "clicks");
    ttConfig.addColumn("daily", "date");

    builder
        .setBolt("hbase-counters", new HBaseCountersBatchBolt(ttConfig), 2)
        .fieldsGrouping("spout", new Fields("shortid"));

    LocalCluster cluster = new LocalCluster();

    Config stormConfig = new Config();
    stormConfig.setDebug(true);
    stormConfig.setMaxSpoutPending(3);

    cluster.submitTopology("hbase-example", stormConfig, builder.buildTopology());

    Thread.sleep(10000);
    cluster.shutdown();
  }
Esempio n. 7
0
  public static void main(String[] args) {

    Config conf = new Config();
    conf.setDebug(true);
    // SessionFactory sessionFactory =
    // HibernateSessionFactory.createSession("mySql");

    if (args != null && args.length > 0) {
      TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(null, null);
      builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt");
      conf.setNumWorkers(1);
      try {
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
      } catch (AlreadyAliveException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      } catch (InvalidTopologyException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    } else {
      LocalDRPC drpc = new LocalDRPC();
      TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(drpc, null);
      builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt");
      conf.setMaxTaskParallelism(3);
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("mongooseTopologyLocal", conf, builder.createTopology());

      for (String request : new String[] {"{\"studentId\":\"1\",\"greScore\":380.0}"}) {
        System.out.println("Result for: " + request + "\": " + drpc.execute("mongoose", request));
      }
      cluster.shutdown();
      drpc.shutdown();
    }
  }
  @Ignore
  @Test
  public void testSiddhiSpout()
      throws AlreadyAliveException, InvalidTopologyException, InterruptedException {
    eventsReceived = false;
    ExecutionPlanConfiguration executionPlanConfiguration = new ExecutionPlanConfiguration();
    StreamDefinition siddhiStreamDef =
        new StreamDefinition().name("wordStream").attribute("word", Attribute.Type.STRING);
    ConsumingQueuedEventSource eventSource =
        new ConsumingQueuedEventSource(siddhiStreamDef, executionPlanConfiguration);
    SiddhiSpout siddhiSpout = new SiddhiSpout(siddhiStreamDef, eventSource);
    siddhiSpout.setUseDefaultAsStreamName(false);

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("siddhi-spout", siddhiSpout);
    // builder.setBolt("count", wordCount, 12).fieldsGrouping("siddhi-spout", new Fields("word"));
    builder
        .setBolt("count", new WordCount(), 8)
        .fieldsGrouping("siddhi-spout", "wordStream", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(false);

    conf.setMaxTaskParallelism(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("word-count", conf, builder.createTopology());
    eventSource.consumeEvents(new Object[][] {{"GOOG"}, {"WSO2"}, {"FB"}});
    Thread.sleep(10000);
    Assert.assertTrue("No events received.", eventsReceived);
    Assert.assertTrue("Event count is zero", eventCount > 0);
    cluster.shutdown();
  }
  public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    List<String> zks = new ArrayList<String>();
    zks.add("127.0.0.1");

    List<String> cFs = new ArrayList<String>();
    cFs.add("personal");
    cFs.add("company");

    builder.setSpout("hbase_spout", new HbaseSpout(), 2);
    builder
        .setBolt("hbase_bolt", new HbaseBolt("user", cFs, zks, 2181), 2)
        .shuffleGrouping("hbase_spout");

    Config config = new Config();
    config.setDebug(true);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("hbase_topology", config, builder.createTopology());

    try {
      Thread.sleep(20000);
    } catch (InterruptedException e) {
      System.out.println("Thread interrupted:" + e);
    }

    System.out.println("stopped called...");

    cluster.killTopology("hbase_topology");

    cluster.shutdown();
  }
  public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      /*设置该topology在storm集群中要抢占的资源slot数,一个slot对应这supervisor节点上的以个worker进程
       如果你分配的spot数超过了你的物理节点所拥有的worker数目的话,有可能提交不成功,加入你的集群上面已经有了
       一些topology而现在还剩下2个worker资源,如果你在代码里分配4个给你的topology的话,那么这个topology可以提交
       但是提交以后你会发现并没有运行。 而当你kill掉一些topology后释放了一些slot后你的这个topology就会恢复正常运行。
      */
      conf.setNumWorkers(3);

      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
      conf.setMaxTaskParallelism(3);
      // 指定为本地模式运行
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }
Esempio n. 11
0
  public static void main(String[] args) {
    FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender"));
    FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("gender", genderSpout);
    builder.setSpout("age", ageSpout);
    builder
        .setBolt("join", new SingleJoinBolt(new Fields("gender", "age")))
        .fieldsGrouping("gender", new Fields("id"))
        .fieldsGrouping("age", new Fields("id"));

    Config conf = new Config();
    conf.setDebug(true);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("join-example", conf, builder.createTopology());

    for (int i = 0; i < 10; i++) {
      String gender;
      if (i % 2 == 0) {
        gender = "male";
      } else {
        gender = "female";
      }
      genderSpout.feed(new Values(i, gender));
    }

    for (int i = 9; i >= 0; i--) {
      ageSpout.feed(new Values(i, i + 20));
    }

    Utils.sleep(2000);
    cluster.shutdown();
  }
  @Test
  public void testStorm() throws Exception {
    SentenceSpout spout = new SentenceSpout();
    SplitSentenceBolt splitSentenceBolt = new SplitSentenceBolt();
    WordCountBolt countBolt = new WordCountBolt();
    ReportBolt reportBolt = new ReportBolt();

    TopologyBuilder builder = new TopologyBuilder();
    // 注册一个sentence spout,并且赋予一个唯一值
    builder.setSpout(SENTENCE_SPOUT_ID, spout);
    // 然后注册一个SplitSentenceBolt,这个bolt订阅SentenceSpout发射出来的数据流
    // 将SentenceSpout的唯一ID赋值给shuffleGrouping()方法确立了这种订阅关系
    // shuffleGrouping()方法告诉storm,要将SentenceSpout发射出来的tuple随机均匀的分发给SplitSentenceBolt的实例
    builder.setBolt(SPLIT_BOLT_ID, splitSentenceBolt).shuffleGrouping(SENTENCE_SPOUT_ID);
    // 将含有特定数据的tuple路由到特殊的bolt实例中
    // 此处使用BoltDeclarer的fieldsGrouping()方法保证所有“word”字段值相同的tuple会被路由到同一个WordCountBolt实例中
    builder.setBolt(COUNT_BOLT_ID, countBolt).fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
    // 将WordCountBolt发出的所有tuple流路由到唯一的ReportBolt中,使用globalGrouping
    builder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(COUNT_BOLT_ID);

    Config config = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());

    Thread.sleep(10000);
    cluster.killTopology(TOPOLOGY_NAME);
    cluster.shutdown();
  }
Esempio n. 13
0
  @Test
  public void testTridentTopology() throws Exception {

    Session session = cassandraCQLUnit.session;
    String[] stationIds = {"station-1", "station-2", "station-3"};
    for (int i = 1; i < 4; i++) {
      ResultSet resultSet =
          session.execute(
              "INSERT INTO weather.station(id, name) VALUES(?, ?)",
              stationIds[i - 1],
              "Foo-Station-" + new Random().nextInt());
    }

    ResultSet rows = cassandraCQLUnit.session.execute("SELECT * FROM weather.station");
    for (Row row : rows) {
      System.out.println("####### row = " + row);
    }

    WeatherBatchSpout weatherBatchSpout =
        new WeatherBatchSpout(
            new Fields("weather_station_id", "temperature", "event_time"), 3, stationIds);

    TridentTopology topology = new TridentTopology();
    Stream stream = topology.newStream("cassandra-trident-stream", weatherBatchSpout);

    CassandraStateFactory insertValuesStateFactory = getInsertTemperatureStateFactory();

    CassandraStateFactory selectWeatherStationStateFactory = getSelectWeatherStationStateFactory();

    TridentState selectState = topology.newStaticState(selectWeatherStationStateFactory);
    stream =
        stream.stateQuery(
            selectState,
            new Fields("weather_station_id"),
            new CassandraQuery(),
            new Fields("name"));
    stream = stream.each(new Fields("name"), new PrintFunction(), new Fields("name_x"));

    stream.partitionPersist(
        insertValuesStateFactory,
        new Fields("weather_station_id", "name", "event_time", "temperature"),
        new CassandraStateUpdater(),
        new Fields());

    StormTopology stormTopology = topology.build();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", getConfig(), stormTopology);
    Thread.sleep(30 * 1000);

    rows = cassandraCQLUnit.session.execute("SELECT * FROM weather.temperature");
    Assert.assertTrue(rows.iterator().hasNext()); // basic sanity check

    cluster.killTopology("wordCounter");
    cluster.shutdown();
  }
  // Entry point for the topology
  public static void main(String[] args) throws Exception {
    // Read and set configuration
    Properties properties = new Properties();
    // Arguments? Or from config file?
    if (args.length > 1) {
      properties.load(new FileReader(args[1]));
    } else {
      properties.load(
          EventHubWriter.class.getClassLoader().getResourceAsStream("EventHubs.properties"));
    }
    // Configure the bolt for Event Hub
    String policyName = properties.getProperty("eventhubs.writerpolicyname");
    String policyKey = properties.getProperty("eventhubs.writerpolicykey");
    String namespaceName = properties.getProperty("eventhubs.namespace");
    String entityPath = properties.getProperty("eventhubs.entitypath");

    EventHubBoltConfig spoutConfig =
        new EventHubBoltConfig(
            policyName, policyKey, namespaceName, "servicebus.windows.net", entityPath);

    // Used to build the topology
    TopologyBuilder builder = new TopologyBuilder();
    // Add the spout, with a name of 'spout'
    // and parallelism hint of 5 executors
    builder.setSpout("spout", new DeviceSpout(), 5);

    builder.setBolt("eventhubbolt", new EventHubBolt(spoutConfig), 8).shuffleGrouping("spout");

    // new configuration
    Config conf = new Config();
    conf.setDebug(true);

    // If there are arguments, we are running on a cluster
    if (args != null && args.length > 0) {
      // parallelism hint to set the number of workers
      conf.setNumWorkers(3);
      // submit the topology
      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    }
    // Otherwise, we are running locally
    else {
      // Cap the maximum number of executors that can be spawned
      // for a component to 3
      conf.setMaxTaskParallelism(3);
      // LocalCluster is used to run locally
      LocalCluster cluster = new LocalCluster();
      // submit the topology
      cluster.submitTopology("writer", conf, builder.createTopology());
      // sleep
      Thread.sleep(10000);
      // shut down the cluster
      cluster.shutdown();
    }
  }
  public static void main(String[] args) throws Exception {

    if (args.length < 3)
      throw new RuntimeException("requires 3 args: toponame, host, streamName [workerCnt]");

    String topoName = args[0];
    String xaphost = args[1];
    String streamName = args[2];
    int workerCnt = 4;
    if (args.length > 3) workerCnt = Integer.parseInt(args[3]);

    log.info(String.format("executing wordcount with %s %s %s", topoName, xaphost, streamName));

    XAPConfig config = new XAPConfig();
    config.setBatchSize(1000);
    config.setStreamName(streamName);
    config.setXapHost(xaphost);
    config.setFields("sentence");
    config.setCollectStats(true);

    Config conf = new Config();
    // conf.setDebug(true);

    XAPTridentSpout spout = new XAPTridentSpout(config);

    TridentTopology topology = new TridentTopology();
    TridentState wordCounts =
        topology
            .newStream("spout1", spout)
            .each(new Fields("sentence"), new SplitLarge(6), new Fields("word"))
            .groupBy(new Fields("word"))
            .persistentAggregate(
                XAPState2.nonTransactional(
                    String.format("jini://*/*/streamspace?locators=%s", xaphost), true),
                new Count(),
                new Fields("count"));

    if (args != null && args.length > 0) {
      conf.setNumWorkers(workerCnt);
      StormSubmitter.submitTopology(topoName, conf, topology.build());
    } else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, topology.build());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }
  /**
   * プログラムエントリポイント<br>
   *
   * <ul>
   *   <li>起動引数:arg[0] 設定値を記述したyamlファイルパス
   *   <li>起動引数:arg[1] Stormの起動モード(true:LocalMode、false:DistributeMode)
   * </ul>
   *
   * @param args 起動引数
   * @throws Exception 初期化例外発生時
   */
  public static void main(String[] args) throws Exception {
    // プログラム引数の不足をチェック
    if (args.length < 2) {
      System.out.println(
          "Usage: java acromusashi.stream.example.topology.DecisionTestTopology ConfigPath isExecuteLocal(true|false)");
      return;
    }

    // 起動引数として使用したパスからStorm設定オブジェクトを生成
    Config conf = StormConfigGenerator.loadStormConfig(args[0]);

    // プログラム引数から設定値を取得(ローカル環境or分散環境)
    boolean isLocal = Boolean.valueOf(args[1]);

    TopologyBuilder builder = new TopologyBuilder();

    // Get setting from StormConfig Object
    int wordSpoutPara = StormConfigUtil.getIntValue(conf, "WordSpout.Parallelism", 2);
    int judgeBoltPara = StormConfigUtil.getIntValue(conf, "JudgeBolt.Parallelism", 2);
    int shortWordBoltPara = StormConfigUtil.getIntValue(conf, "ShortWord.Parallelism", 2);
    int longWordBoltPara = StormConfigUtil.getIntValue(conf, "LongWord.Parallelism", 2);

    builder.setSpout("WordSpout", new TestWordSpout(), wordSpoutPara);
    builder
        .setBolt("JudgeBolt", new JudgeBolt(), judgeBoltPara)
        .fieldsGrouping("WordSpout", new Fields("word"));

    // ShortWordのStreamを読み込むよう定義
    builder
        .setBolt("ShortWord", new ShortWordBolt(), shortWordBoltPara)
        .fieldsGrouping("JudgeBolt", "ShortWord", new Fields("word"));

    // LongWordのStreamを読み込むよう定義
    builder
        .setBolt("LongWord", new LongWordBolt(), longWordBoltPara)
        .fieldsGrouping("JudgeBolt", "LongWord", new Fields("word"));

    if (isLocal) {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("DecisionTest", conf, builder.createTopology());
      Utils.sleep(10000000);
      cluster.killTopology("DecisionTest");
      cluster.shutdown();
    } else {
      StormSubmitter.submitTopology("DecisionTest", conf, builder.createTopology());
    }
  }
Esempio n. 17
0
  public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    // 登录
    builder.setBolt("mlogin_verify_bolt", new MLoginVeriBolt(), 10).shuffleGrouping("mlogin_spout");
    builder
        .setBolt("mlogin_calc_bolt", new MLoginCalcBolt(), 10)
        .shuffleGrouping("mlogin_verify_bolt");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      String gamecfg_path = "";
      try {
        gamecfg_path = args[1];
      } catch (ArrayIndexOutOfBoundsException e) {
        System.out.println("NOTICE: 请输入游戏配置文件路径(param 2)!");
        e.printStackTrace();
        System.exit(-999);
      }
      conf.put("gamecfg_path", gamecfg_path);

      conf.put("isOnline", true);
      String topicLogin = "******";
      String zkRoot = "/home/ztgame/storm/zkroot";
      String spoutIdLogin = "******";
      BrokerHosts brokerHosts =
          new ZkHosts("172.29.201.208:2181,172.29.201.207:2181,172.29.201.205:2181");

      SpoutConfig spoutConfLogin = new SpoutConfig(brokerHosts, topicLogin, zkRoot, spoutIdLogin);
      spoutConfLogin.scheme = new SchemeAsMultiScheme(new StringScheme());
      builder.setSpout("mlogin_spout", new KafkaSpout(spoutConfLogin), 1);

      conf.setNumWorkers(2);
      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {
      conf.put("isOnline", false);
      builder.setSpout("mlogin_spout", new SampleMLoginSpout(), 1);

      conf.setMaxTaskParallelism(2);
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());
      Thread.sleep(200000);
      cluster.shutdown();
    }
  }
Esempio n. 18
0
  public static void main(String[] args) throws Exception {
    LocalDRPC drpc = new LocalDRPC();

    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();

    cluster.submitTopology("reach", conf, buildTopology(drpc));

    Thread.sleep(2000);

    System.out.println("REACH: " + drpc.execute("reach", "aaa"));
    System.out.println("REACH: " + drpc.execute("reach", "foo.com/blog/1"));
    System.out.println("REACH: " + drpc.execute("reach", "engineering.twitter.com/blog/5"));

    cluster.shutdown();
    drpc.shutdown();
  }
  public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSentenceSpout(), 5);

    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));

    Config conf = new Config();
    conf.setDebug(true);

    List<String> componentList = new ArrayList<String>(Arrays.asList("spout", "split", "count"));
    conf.put("components", componentList);

    Map<String, List<String>> streamMap = new HashMap<String, List<String>>();
    List<String> splitstream = new ArrayList<String>(Arrays.asList("spout"));
    List<String> countstream = new ArrayList<String>(Arrays.asList("split"));
    streamMap.put("split", splitstream);
    streamMap.put("count", countstream);
    conf.put("streams", streamMap);

    conf.put("traffic.improvement", 5);

    conf.put("alfa", "0");
    conf.put("beta", "1");
    conf.put("gamma", "1");

    if (args != null && args.length > 0) {
      conf.setNumWorkers(3);

      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
      WorkerMonitor.getInstance();
    } else {
      conf.setMaxTaskParallelism(3);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("word-count", conf, builder.createTopology());

      Thread.sleep(10000);

      cluster.shutdown();
    }
  }
  public static void main(String[] args) throws Exception {
    MemoryTransactionalSpout spout =
        new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
    TransactionalTopologyBuilder builder =
        new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
    builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
    builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

    LocalCluster cluster = new LocalCluster();

    Config config = new Config();
    config.setDebug(true);
    config.setMaxSpoutPending(3);

    cluster.submitTopology("global-count-topology", config, builder.buildTopology());

    Thread.sleep(3000);
    cluster.shutdown();
  }
  public static void main(String[] args) throws Exception {

    TopologyBuilder builder = new TopologyBuilder();

    Config config = new Config();
    config.setDebug(true);

    /*
       ----------------------TODO-----------------------
       Task: wire up the topology

       NOTE:make sure when connecting components together, using the functions setBolt(name,…) and setSpout(name,…),
       you use the following names for each component:

       FileReaderSpout -> "spout"
       SplitSentenceBolt -> "split"
       WordCountBolt -> "count"
    NormalizerBolt -> "normalize"
       TopNFinderBolt -> "top-n"


       ------------------------------------------------- */
    builder.setSpout("spout", new FileReaderSpout(args[0]), 1);

    builder.setBolt("split", new SplitSentenceBolt(), 8).shuffleGrouping("spout");
    builder
        .setBolt("normalize", new NormalizerBolt(), 12)
        .fieldsGrouping("split", new Fields("word"));
    builder
        .setBolt("count", new WordCountBolt(), 12)
        .fieldsGrouping("normalize", new Fields("word"));
    builder.setBolt("top-n", new TopNFinderBolt(N), 1).fieldsGrouping("count", new Fields("word"));

    config.setMaxTaskParallelism(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("word-count", config, builder.createTopology());

    // wait for 2 minutes and then kill the job
    Thread.sleep(2 * 60 * 1000);

    cluster.shutdown();
  }
  public static void main(String[] args) throws Exception {
    ParameterTool pt = ParameterTool.fromArgs(args);

    int par = pt.getInt("para");

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
    int i = 0;
    for (; i < pt.getInt("repartitions", 1) - 1; i++) {
      System.out.println("adding source" + i + " --> source" + (i + 1));
      builder
          .setBolt("source" + (i + 1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism"))
          .fieldsGrouping("source" + i, new Fields("id"));
    }
    System.out.println("adding final source" + i + " --> sink");

    builder
        .setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism"))
        .fieldsGrouping("source" + i, new Fields("id"));

    Config conf = new Config();
    conf.setDebug(false);
    // System.exit(1);

    if (!pt.has("local")) {
      conf.setNumWorkers(par);

      StormSubmitter.submitTopologyWithProgressBar(
          "throughput-" + pt.get("name", "no_name"), conf, builder.createTopology());
    } else {
      conf.setMaxTaskParallelism(par);

      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("throughput", conf, builder.createTopology());

      Thread.sleep(300000);

      cluster.shutdown();
    }
  }
  /**
   * To run this topology ensure you have a kafka broker running. Create a topic test with command
   * line, kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partition 1
   * --topic test
   */
  public static void main(String[] args) throws Exception {

    String zkUrl = "localhost:2181"; // the defaults.
    String brokerUrl = "localhost:9092";

    if (args.length > 2 || (args.length == 1 && args[0].matches("^-h|--help$"))) {
      System.out.println("Usage: TridentKafkaWordCount [kafka zookeeper url] [kafka broker url]");
      System.out.println("   E.g TridentKafkaWordCount [" + zkUrl + "]" + " [" + brokerUrl + "]");
      System.exit(1);
    } else if (args.length == 1) {
      zkUrl = args[0];
    } else if (args.length == 2) {
      zkUrl = args[0];
      brokerUrl = args[1];
    }

    System.out.println("Using Kafka zookeeper url: " + zkUrl + " broker url: " + brokerUrl);

    TridentKafkaWordCount wordCount = new TridentKafkaWordCount(zkUrl, brokerUrl);

    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();

    // submit the consumer topology.
    cluster.submitTopology(
        "wordCounter", wordCount.getConsumerConfig(), wordCount.buildConsumerTopology(drpc));

    // submit the producer topology.
    cluster.submitTopology(
        "kafkaBolt", wordCount.getProducerConfig(), wordCount.buildProducerTopology());

    // keep querying the word counts for a minute.
    for (int i = 0; i < 60; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "the and apple snow jumped"));
      Thread.sleep(1000);
    }

    cluster.killTopology("kafkaBolt");
    cluster.killTopology("wordCounter");
    cluster.shutdown();
  }
Esempio n. 24
0
  public static void main(String[] args) throws InterruptedException {

    // Topology definition
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("word-reader", new WordReader());
    builder.setBolt("word-normalizer", new WordNormalizer()).shuffleGrouping("word-reader");
    builder
        .setBolt("word-counter", new WordCounter(), 2)
        .fieldsGrouping("word-normalizer", new Fields("word"));

    // Configuration
    Config conf = new Config();
    conf.put("wordsFile", args[0]);
    conf.setDebug(false);
    // Topology run
    conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("Getting-Started-Toplogie", conf, builder.createTopology());
    Thread.sleep(10000);
    cluster.shutdown();
  }
  public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(2);
      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("test", conf, builder.createTopology());
      Utils.sleep(10000);
      cluster.killTopology("test");
      cluster.shutdown();
    }
  }
Esempio n. 26
0
  @Test
  public void testSODA() {
    // should say fieldsTypes, maybe with object/component prefix
    Map<String, Object> eventTypes = new HashMap<>();
    eventTypes.put(LITERAL_SYMBOL, String.class);
    eventTypes.put(LITERAL_PRICE, Integer.class);

    EPStatementObjectModel model = new EPStatementObjectModel();
    model.setInsertInto(InsertIntoClause.create(LITERAL_RETURN_OBJ));
    model.setSelectClause(
        SelectClause.create().add(Expressions.avg(LITERAL_PRICE), LITERAL_AVG).add(LITERAL_PRICE));
    Filter filter = Filter.create("quotes_default", Expressions.eq(LITERAL_SYMBOL, "A"));
    model.setFromClause(
        FromClause.create(
            FilterStream.create(filter).addView("win", "length", Expressions.constant(2))));
    model.setHavingClause(
        Expressions.gt(Expressions.avg(LITERAL_PRICE), Expressions.constant(60.0)));

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(LITERAL_QUOTES, new RandomSentenceSpout());
    builder
        .setBolt(
            LITERAL_ESPER,
            (new EsperBolt())
                .addEventTypes(eventTypes)
                .addOutputTypes(
                    Collections.singletonMap(
                        LITERAL_RETURN_OBJ, Arrays.asList(LITERAL_AVG, LITERAL_PRICE)))
                .addObjectStatemens(Collections.singleton(model)))
        .shuffleGrouping(LITERAL_QUOTES);
    builder.setBolt("print", new PrinterBolt()).shuffleGrouping(LITERAL_ESPER, LITERAL_RETURN_OBJ);

    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.shutdown();
    assertEquals(resultSODA.get(100), new Double(75.0));
    assertEquals(resultSODA.get(50), new Double(75.0));
  }
Esempio n. 27
0
  public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setDebug(false);

    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("internet-radio-play-stats", config, TopologyBuilder.buildLocal(drpc));

    // Utils.sleep(ONE_MINUTE);

    String result = drpc.execute("count-request-by-tag", "Classic Rock,Punk,Post Punk");
    System.out.println("RESULTS");
    System.out.println(
        "==========================================================================");
    System.out.println(result);
    System.out.println(
        "==========================================================================");

    cluster.killTopology("internet-radio-play-stats");
    cluster.shutdown();
    drpc.shutdown();
  }
  protected int submit(String name, Config conf, TopologyBuilder builder) {

    // register Metadata for serialization with FieldsSerializer
    Config.registerSerialization(conf, Metadata.class);

    if (isLocal) {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology(name, conf, builder.createTopology());
      if (ttl != -1) {
        Utils.sleep(ttl * 1000);
        cluster.shutdown();
      }
    } else {
      try {
        StormSubmitter.submitTopology(name, conf, builder.createTopology());
      } catch (Exception e) {
        e.printStackTrace();
        return -1;
      }
    }
    return 0;
  }
  public static void main(String[] args) throws Exception {
    Config config = new Config();

    String host = TEST_REDIS_HOST;
    int port = TEST_REDIS_PORT;

    if (args.length >= 2) {
      host = args[0];
      port = Integer.parseInt(args[1]);
    }

    JedisPoolConfig poolConfig = new JedisPoolConfig.Builder().setHost(host).setPort(port).build();

    WordSpout spout = new WordSpout();
    WordCounter bolt = new WordCounter();
    StoreCountRedisBolt redisBolt = new StoreCountRedisBolt(poolConfig);

    // wordSpout ==> countBolt ==> RedisBolt
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(WORD_SPOUT, spout, 1);
    builder.setBolt(COUNT_BOLT, bolt, 1).shuffleGrouping(WORD_SPOUT);
    builder.setBolt(REDIS_BOLT, redisBolt, 1).fieldsGrouping(COUNT_BOLT, new Fields("word"));

    if (args.length == 2) {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("test", config, builder.createTopology());
      Thread.sleep(30000);
      cluster.killTopology("test");
      cluster.shutdown();
      System.exit(0);
    } else if (args.length == 3) {
      StormSubmitter.submitTopology(args[2], config, builder.createTopology());
    } else {
      System.out.println("Usage: PersistentWordCount <redis host> <redis port> (topology name)");
    }
  }
Esempio n. 30
0
  public static void main(String[] args) throws Exception {

    LinearDRPCTopologyBuilder builder = construct();

    Config conf = new Config();
    conf.setNumWorkers(6);
    if (args.length != 0) {

      try {
        Map yamlConf = LoadConf.LoadYaml(args[0]);
        if (yamlConf != null) {
          conf.putAll(yamlConf);
        }
      } catch (Exception e) {
        System.out.println("Input " + args[0] + " isn't one yaml ");
      }

      StormSubmitter.submitTopology(TOPOLOGY_NAME, conf, builder.createRemoteTopology());
    } else {

      conf.setMaxTaskParallelism(3);
      LocalDRPC drpc = new LocalDRPC();
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology(TOPOLOGY_NAME, conf, builder.createLocalTopology(drpc));

      JStormUtils.sleepMs(50000);

      String[] urlsToTry =
          new String[] {"foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com"};
      for (String url : urlsToTry) {
        System.out.println("Reach of " + url + ": " + drpc.execute(TOPOLOGY_NAME, url));
      }

      cluster.shutdown();
      drpc.shutdown();
    }
  }