public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    List<String> zks = new ArrayList<String>();
    zks.add("127.0.0.1");

    List<String> cFs = new ArrayList<String>();
    cFs.add("personal");
    cFs.add("company");

    builder.setSpout("hbase_spout", new HbaseSpout(), 2);
    builder
        .setBolt("hbase_bolt", new HbaseBolt("user", cFs, zks, 2181), 2)
        .shuffleGrouping("hbase_spout");

    Config config = new Config();
    config.setDebug(true);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("hbase_topology", config, builder.createTopology());

    try {
      Thread.sleep(20000);
    } catch (InterruptedException e) {
      System.out.println("Thread interrupted:" + e);
    }

    System.out.println("stopped called...");

    cluster.killTopology("hbase_topology");

    cluster.shutdown();
  }
  @Test
  public void testStorm() throws Exception {
    SentenceSpout spout = new SentenceSpout();
    SplitSentenceBolt splitSentenceBolt = new SplitSentenceBolt();
    WordCountBolt countBolt = new WordCountBolt();
    ReportBolt reportBolt = new ReportBolt();

    TopologyBuilder builder = new TopologyBuilder();
    // 注册一个sentence spout,并且赋予一个唯一值
    builder.setSpout(SENTENCE_SPOUT_ID, spout);
    // 然后注册一个SplitSentenceBolt,这个bolt订阅SentenceSpout发射出来的数据流
    // 将SentenceSpout的唯一ID赋值给shuffleGrouping()方法确立了这种订阅关系
    // shuffleGrouping()方法告诉storm,要将SentenceSpout发射出来的tuple随机均匀的分发给SplitSentenceBolt的实例
    builder.setBolt(SPLIT_BOLT_ID, splitSentenceBolt).shuffleGrouping(SENTENCE_SPOUT_ID);
    // 将含有特定数据的tuple路由到特殊的bolt实例中
    // 此处使用BoltDeclarer的fieldsGrouping()方法保证所有“word”字段值相同的tuple会被路由到同一个WordCountBolt实例中
    builder.setBolt(COUNT_BOLT_ID, countBolt).fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
    // 将WordCountBolt发出的所有tuple流路由到唯一的ReportBolt中,使用globalGrouping
    builder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(COUNT_BOLT_ID);

    Config config = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());

    Thread.sleep(10000);
    cluster.killTopology(TOPOLOGY_NAME);
    cluster.shutdown();
  }
예제 #3
0
  @Test
  public void testTridentTopology() throws Exception {

    Session session = cassandraCQLUnit.session;
    String[] stationIds = {"station-1", "station-2", "station-3"};
    for (int i = 1; i < 4; i++) {
      ResultSet resultSet =
          session.execute(
              "INSERT INTO weather.station(id, name) VALUES(?, ?)",
              stationIds[i - 1],
              "Foo-Station-" + new Random().nextInt());
    }

    ResultSet rows = cassandraCQLUnit.session.execute("SELECT * FROM weather.station");
    for (Row row : rows) {
      System.out.println("####### row = " + row);
    }

    WeatherBatchSpout weatherBatchSpout =
        new WeatherBatchSpout(
            new Fields("weather_station_id", "temperature", "event_time"), 3, stationIds);

    TridentTopology topology = new TridentTopology();
    Stream stream = topology.newStream("cassandra-trident-stream", weatherBatchSpout);

    CassandraStateFactory insertValuesStateFactory = getInsertTemperatureStateFactory();

    CassandraStateFactory selectWeatherStationStateFactory = getSelectWeatherStationStateFactory();

    TridentState selectState = topology.newStaticState(selectWeatherStationStateFactory);
    stream =
        stream.stateQuery(
            selectState,
            new Fields("weather_station_id"),
            new CassandraQuery(),
            new Fields("name"));
    stream = stream.each(new Fields("name"), new PrintFunction(), new Fields("name_x"));

    stream.partitionPersist(
        insertValuesStateFactory,
        new Fields("weather_station_id", "name", "event_time", "temperature"),
        new CassandraStateUpdater(),
        new Fields());

    StormTopology stormTopology = topology.build();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", getConfig(), stormTopology);
    Thread.sleep(30 * 1000);

    rows = cassandraCQLUnit.session.execute("SELECT * FROM weather.temperature");
    Assert.assertTrue(rows.iterator().hasNext()); // basic sanity check

    cluster.killTopology("wordCounter");
    cluster.shutdown();
  }
예제 #4
0
  /**
   * To run this topology ensure you have a kafka broker running. Create a topic test with command
   * line, kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partition 1
   * --topic test
   */
  public static void main(String[] args) throws Exception {

    String zkUrl = "localhost:2181"; // the defaults.
    String brokerUrl = "localhost:9092";

    if (args.length > 2 || (args.length == 1 && args[0].matches("^-h|--help$"))) {
      System.out.println("Usage: TridentKafkaWordCount [kafka zookeeper url] [kafka broker url]");
      System.out.println("   E.g TridentKafkaWordCount [" + zkUrl + "]" + " [" + brokerUrl + "]");
      System.exit(1);
    } else if (args.length == 1) {
      zkUrl = args[0];
    } else if (args.length == 2) {
      zkUrl = args[0];
      brokerUrl = args[1];
    }

    System.out.println("Using Kafka zookeeper url: " + zkUrl + " broker url: " + brokerUrl);

    TridentKafkaWordCount wordCount = new TridentKafkaWordCount(zkUrl, brokerUrl);

    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();

    // submit the consumer topology.
    cluster.submitTopology(
        "wordCounter", wordCount.getConsumerConfig(), wordCount.buildConsumerTopology(drpc));

    // submit the producer topology.
    cluster.submitTopology(
        "kafkaBolt", wordCount.getProducerConfig(), wordCount.buildProducerTopology());

    // keep querying the word counts for a minute.
    for (int i = 0; i < 60; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "the and apple snow jumped"));
      Thread.sleep(1000);
    }

    cluster.killTopology("kafkaBolt");
    cluster.killTopology("wordCounter");
    cluster.shutdown();
  }
  /**
   * プログラムエントリポイント<br>
   *
   * <ul>
   *   <li>起動引数:arg[0] 設定値を記述したyamlファイルパス
   *   <li>起動引数:arg[1] Stormの起動モード(true:LocalMode、false:DistributeMode)
   * </ul>
   *
   * @param args 起動引数
   * @throws Exception 初期化例外発生時
   */
  public static void main(String[] args) throws Exception {
    // プログラム引数の不足をチェック
    if (args.length < 2) {
      System.out.println(
          "Usage: java acromusashi.stream.example.topology.DecisionTestTopology ConfigPath isExecuteLocal(true|false)");
      return;
    }

    // 起動引数として使用したパスからStorm設定オブジェクトを生成
    Config conf = StormConfigGenerator.loadStormConfig(args[0]);

    // プログラム引数から設定値を取得(ローカル環境or分散環境)
    boolean isLocal = Boolean.valueOf(args[1]);

    TopologyBuilder builder = new TopologyBuilder();

    // Get setting from StormConfig Object
    int wordSpoutPara = StormConfigUtil.getIntValue(conf, "WordSpout.Parallelism", 2);
    int judgeBoltPara = StormConfigUtil.getIntValue(conf, "JudgeBolt.Parallelism", 2);
    int shortWordBoltPara = StormConfigUtil.getIntValue(conf, "ShortWord.Parallelism", 2);
    int longWordBoltPara = StormConfigUtil.getIntValue(conf, "LongWord.Parallelism", 2);

    builder.setSpout("WordSpout", new TestWordSpout(), wordSpoutPara);
    builder
        .setBolt("JudgeBolt", new JudgeBolt(), judgeBoltPara)
        .fieldsGrouping("WordSpout", new Fields("word"));

    // ShortWordのStreamを読み込むよう定義
    builder
        .setBolt("ShortWord", new ShortWordBolt(), shortWordBoltPara)
        .fieldsGrouping("JudgeBolt", "ShortWord", new Fields("word"));

    // LongWordのStreamを読み込むよう定義
    builder
        .setBolt("LongWord", new LongWordBolt(), longWordBoltPara)
        .fieldsGrouping("JudgeBolt", "LongWord", new Fields("word"));

    if (isLocal) {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("DecisionTest", conf, builder.createTopology());
      Utils.sleep(10000000);
      cluster.killTopology("DecisionTest");
      cluster.shutdown();
    } else {
      StormSubmitter.submitTopology("DecisionTest", conf, builder.createTopology());
    }
  }
  public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationLoggingBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationLoggingBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
      conf.setNumWorkers(2);
      StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("test", conf, builder.createTopology());
      Utils.sleep(10000);
      cluster.killTopology("test");
      cluster.shutdown();
    }
  }
예제 #7
0
  public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setDebug(false);

    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("internet-radio-play-stats", config, TopologyBuilder.buildLocal(drpc));

    // Utils.sleep(ONE_MINUTE);

    String result = drpc.execute("count-request-by-tag", "Classic Rock,Punk,Post Punk");
    System.out.println("RESULTS");
    System.out.println(
        "==========================================================================");
    System.out.println(result);
    System.out.println(
        "==========================================================================");

    cluster.killTopology("internet-radio-play-stats");
    cluster.shutdown();
    drpc.shutdown();
  }
  public static void main(String[] args) throws Exception {
    Config config = new Config();

    String host = TEST_REDIS_HOST;
    int port = TEST_REDIS_PORT;

    if (args.length >= 2) {
      host = args[0];
      port = Integer.parseInt(args[1]);
    }

    JedisPoolConfig poolConfig = new JedisPoolConfig.Builder().setHost(host).setPort(port).build();

    WordSpout spout = new WordSpout();
    WordCounter bolt = new WordCounter();
    StoreCountRedisBolt redisBolt = new StoreCountRedisBolt(poolConfig);

    // wordSpout ==> countBolt ==> RedisBolt
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(WORD_SPOUT, spout, 1);
    builder.setBolt(COUNT_BOLT, bolt, 1).shuffleGrouping(WORD_SPOUT);
    builder.setBolt(REDIS_BOLT, redisBolt, 1).fieldsGrouping(COUNT_BOLT, new Fields("word"));

    if (args.length == 2) {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("test", config, builder.createTopology());
      Thread.sleep(30000);
      cluster.killTopology("test");
      cluster.shutdown();
      System.exit(0);
    } else if (args.length == 3) {
      StormSubmitter.submitTopology(args[2], config, builder.createTopology());
    } else {
      System.out.println("Usage: PersistentWordCount <redis host> <redis port> (topology name)");
    }
  }
  public static void main(String[] args) throws Exception {
    TopologyBuilder topologyBuilder = new TopologyBuilder();

    topologyBuilder.setSpout(
        "READ_THREE_LINES_FROM_RANDOM_NUMBER_TEXT_FILE",
        new ReadNRandomLinesFromTextFile("randomNumbers.txt", 3));
    topologyBuilder
        .setBolt(
            "PROCESS_THE_THREE_SELECTED_LINES", new ProcessThe3LinesToCreateTheInitialClusters())
        .shuffleGrouping("READ_THREE_LINES_FROM_RANDOM_NUMBER_TEXT_FILE");
    topologyBuilder
        .setBolt(
            "CREATE_THE_INITIAL_FUZZY_CENTROIDS_OF_RANDOM_NUMBERS",
            new CreateTheInitialFuzzyCentroidsOfRandomNumbers())
        .shuffleGrouping("PROCESS_THE_THREE_SELECTED_LINES");

    topologyBuilder.setSpout(
        "READ_AND_INDEX_READ_LINES", new ReadAndIndexLinesRead("randomNumbers.txt"));
    topologyBuilder
        .setBolt("PROCESS_TEXT_FILE_WITH_RANDOM_NUMBERS", new ProcessTextFileWithRandomNumbers())
        .shuffleGrouping("READ_AND_INDEX_READ_LINES");

    topologyBuilder
        .setBolt("EMIT_DATA_FOR_FUZZY_CLUSTERING", new EmitRandomNumbersForFuzzyClustering())
        .fieldsGrouping("PROCESS_TEXT_FILE_WITH_RANDOM_NUMBERS", new Fields("INDEX"));
    topologyBuilder
        .setBolt("FUZZY_CLUSTERING", new FuzzyClustering())
        .fieldsGrouping("EMIT_DATA_FOR_FUZZY_CLUSTERING", new Fields("INDEX"));

    topologyBuilder
        .setBolt(
            "COLLECT_ALL_AUTHORS_THAT_HAVE_FUZZY_CLUSTER_INDEX_EQUALS_0",
            new AuthorsThatHaveClusteredIndexEquals0())
        .fieldsGrouping("FUZZY_CLUSTERING", new Fields("INDEX"));
    topologyBuilder
        .setBolt(
            "WRITE_ALL_AUTHORS_THAT_HAVE_CLUSTER_INDEX_EQUALS_0_TO_TEXT_FILE",
            new FileWriter("random_numbers_for_fuzzy_centroid_equals_0.txt"))
        .shuffleGrouping("COLLECT_ALL_AUTHORS_THAT_HAVE_FUZZY_CLUSTER_INDEX_EQUALS_0");

    topologyBuilder
        .setBolt(
            "COLLECT_ALL_AUTHORS_THAT_HAVE_FUZZY_CLUSTER_INDEX_EQUALS_1",
            new AuthorsThatHaveClusteredIndexEquals1())
        .fieldsGrouping("FUZZY_CLUSTERING", new Fields("INDEX"));
    topologyBuilder
        .setBolt(
            "WRITE_ALL_AUTHORS_THAT_HAVE_CLUSTER_INDEX_EQUALS_1_TO_TEXT_FILE",
            new FileWriter("random_numbers_for_fuzzy_centroid_equals_1.txt"))
        .shuffleGrouping("COLLECT_ALL_AUTHORS_THAT_HAVE_FUZZY_CLUSTER_INDEX_EQUALS_1");

    topologyBuilder
        .setBolt(
            "COLLECT_ALL_AUTHORS_THAT_HAVE_FUZZY_CLUSTER_INDEX_EQUALS_2",
            new AuthorsThatHaveClusteredIndexEquals2())
        .fieldsGrouping("FUZZY_CLUSTERING", new Fields("INDEX"));
    topologyBuilder
        .setBolt(
            "WRITE_ALL_AUTHORS_THAT_HAVE_CLUSTER_INDEX_EQUALS_2_TO_TEXT_FILE",
            new FileWriter("random_numbers_for_fuzzy_centroid_equals_2.txt"))
        .shuffleGrouping("COLLECT_ALL_AUTHORS_THAT_HAVE_FUZZY_CLUSTER_INDEX_EQUALS_2");

    Config config = new Config();
    if (args != null && args.length > 0) {
      config.setNumWorkers(10);
      config.setNumAckers(5);
      config.setMaxSpoutPending(100);
      StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
    } else {
      LocalCluster localCluster = new LocalCluster();
      localCluster.submitTopology("Test", config, topologyBuilder.createTopology());
      Utils.sleep(1 * 60 * 1000);
      localCluster.killTopology("Test");
      localCluster.shutdown();
    }
  }
  public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    // ----------type1
    /*
    builder.setSpout("word", new MyTestWordSpout(), 1);
    builder.setSpout("test_signal", new SignalSpout(), 1);
    builder.setBolt("exclaim1", new ExclamationBolt(), 6).shuffleGrouping(
    		"word");
    //signals 为SignalSpout的streamId
    builder.setBolt("exclaim2", new ExclamationBolt(), 8)
    		.fieldsGrouping("exclaim1", new Fields("word"))
    		.allGrouping("test_signal", "signals");

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(5);
    */

    // -----------type2
    Config conf = new Config();
    // topic
    conf.put("meta.topic", "test_hotel_count");
    conf.setDebug(true);

    MetaClientConfig mcc = new MetaClientConfig();
    ZKConfig zkConfig = new ZKConfig();
    zkConfig.zkConnect = "10.10.100.1:12181";
    mcc.setZkConfig(zkConfig);

    ConsumerConfig cc = new ConsumerConfig();
    cc.setGroup("test_gsj_000652");

    Scheme scheme = new StringScheme();
    // spout
    MetaSpout ms = new MetaSpout(mcc, cc, scheme);
    SignalSpout ss = new SignalSpout();

    // bolt
    ExclamationBolt1 bolt1 = new ExclamationBolt1();
    ExclamationBolt2 bolt2 = new ExclamationBolt2();

    // builder
    builder.setSpout("metaq_test", ms, 1);
    builder.setSpout("test_signal", ss, 1);
    builder.setBolt("bolt1", bolt1, 6).shuffleGrouping("metaq_test");
    // signals 为SignalSpout的streamId
    builder
        .setBolt("bolt2", bolt2, 1)
        .fieldsGrouping("bolt1", new Fields("cat"))
        .allGrouping("test_signal", "signals");

    if (args != null && args.length > 0) {
      // topology 的工作进程数,若worker数量>1,则在某些worker的bolt2中无法收到消息,但是在其他某个特定的worker中可接受到消息,正常处理;
      // 是否某个bolt只在某个worker中运行?有待考证
      conf.setNumWorkers(1);
      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("test", conf, builder.createTopology());
      Utils.sleep(10000000);
      cluster.killTopology("test");
      cluster.shutdown();
    }
  }
예제 #11
0
  public static void main(String[] args) throws Exception {
    // create the topology
    TopologyBuilder builder = new TopologyBuilder();

    /*
     * In order to create the spout, you need to get twitter credentials
     * If you need to use Twitter firehose/Tweet stream for your idea,
     * create a set of credentials by following the instructions at
     *
     * https://dev.twitter.com/discussions/631
     *
     */

    // now create the tweet spout with the credentials
    TweetSpout tweetSpout =
        new TweetSpout(
            "9LD0JsKsn6BdAV7Z5GL7u4yFC",
            "QJ3qV3YD5LLyiLqzQnnzH3RyuahKU9dS6pI1D33Z4vKKHPCo9t",
            "113301027-2HUdgDS9NxuLjPPz4lQ4DdJtiMY9OyCGvpjh8vLa",
            "vZshPsVGuxLx9q9K4eM32KsznjMFhAHZMYl6Khg5tW5cd");

    // attach the tweet spout to the topology - parallelism of 1
    builder.setSpout("tweet-spout", tweetSpout, 1);

    // attach the parse tweet bolt using shuffle grouping
    // builder.setBolt("parse-tweet-bolt", new ParseTweetBolt(), 10).shuffleGrouping("tweet-spout");

    // ************* replace with URLBolt emitting text using shuffle grouping
    builder.setBolt("python-URL-bolt", new URLBolt(), 10).shuffleGrouping("tweet-spout");

    // ************* replace Java ParseTweetBolt with Java/Python SplitSentence
    builder
        .setBolt("python-split-sentence", new SplitSentence(), 10)
        .shuffleGrouping("python-URL-bolt");

    // attach the count bolt using fields grouping - parallelism of 15
    // builder.setBolt("count-bolt", new CountBolt(), 15).fieldsGrouping("parse-tweet-bolt", new
    // Fields("tweet-word"));

    // ************* replace Java "parse-tweet-bolt" with Java/Python "python-split-sentence"
    builder
        .setBolt("count-bolt", new CountBolt(), 15)
        .fieldsGrouping("python-split-sentence", new Fields("word"));

    // attach the report bolt using global grouping - parallelism of 1
    builder.setBolt("report-bolt", new ReportBolt(), 1).globalGrouping("count-bolt");

    // create the default config object
    Config conf = new Config();

    // set the config in debugging mode
    conf.setDebug(true);

    if (args != null && args.length > 0) {

      // run it in a live cluster

      // set the number of workers for running all spout and bolt tasks
      conf.setNumWorkers(3);

      // create the topology and submit with config
      StormSubmitter.submitTopology(args[0], conf, builder.createTopology());

    } else {

      // run it in a simulated local cluster

      // set the number of threads to run - similar to setting number of workers in live cluster
      conf.setMaxTaskParallelism(3);

      // create the local cluster instance
      LocalCluster cluster = new LocalCluster();

      // submit the topology to the local cluster
      cluster.submitTopology("tweet-word-count", conf, builder.createTopology());

      // let the topology run for 1000*30 seconds. note topologies never terminate!
      Utils.sleep(1000 * 30000);

      // now kill the topology
      cluster.killTopology("tweet-word-count");

      // we are done, so shutdown the local cluster
      cluster.shutdown();
    }
  }