public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("PlayStream1", new FootballDataSpout(), 1); builder.setBolt("AvgRunPlay", configureSiddhiBolt1(), 1).shuffleGrouping("PlayStream1"); builder.setBolt("FastRunPlay", configureSiddhiBolt2(), 1).shuffleGrouping("AvgRunPlay"); builder.setBolt("LeafEcho", new EchoBolt(), 1).shuffleGrouping("FastRunPlay"); Config conf = new Config(); // conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }
public static void main(String[] args) throws Exception { String topic = "wordcount"; String zkRoot = "/kafka-storm"; String spoutId = "KafkaSpout"; // 整合kafka 设置brober 及 spout 配置信息 BrokerHosts brokerHosts = new ZkHosts("hadoop04:2181,hadoop05:2181,hadoop06:2181"); SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId); spoutConfig.scheme = new SchemeAsMultiScheme(new MessageSpout()); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(spoutId, new KafkaSpout(spoutConfig)); builder.setBolt("word-spilter", new WordSpliter(), 4).shuffleGrouping(spoutId); builder .setBolt("writer", new WriterBolt(), 4) .fieldsGrouping("word-spilter", new Fields("word")); Config conf = new Config(); conf.setNumWorkers(4); conf.setNumAckers(0); conf.setDebug(true); // LocalCluster用来将topology提交到本地模拟器运行,方便开发调试 // LocalCluster cluster = new LocalCluster(); // cluster.submitTopology("WordCount",conf,builder.createTopology()); StormSubmitter.submitTopology("WordCount", conf, builder.createTopology()); }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new TwitterStreamSpout(), 5); builder .setBolt("windowingTwitterStream", new WindowingTwitterStream(300), 1) .shuffleGrouping("spout"); builder .setBolt( "PrepareUserSimilarityCalculationBolt", new PrepareUserSimilarityCalculationBolt(), 1) .shuffleGrouping("windowingTwitterStream"); builder .setBolt("UserSimilarityCalculationBolt", new UserSimilarityCalculationBolt(), 1) .shuffleGrouping("PrepareUserSimilarityCalculationBolt"); Config conf = new Config(); conf.setDebug(false); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); // Thread.sleep(10000); // cluster.shutdown(); } }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { /*设置该topology在storm集群中要抢占的资源slot数,一个slot对应这supervisor节点上的以个worker进程 如果你分配的spot数超过了你的物理节点所拥有的worker数目的话,有可能提交不成功,加入你的集群上面已经有了 一些topology而现在还剩下2个worker资源,如果你在代码里分配4个给你的topology的话,那么这个topology可以提交 但是提交以后你会发现并没有运行。 而当你kill掉一些topology后释放了一些slot后你的这个topology就会恢复正常运行。 */ conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); // 指定为本地模式运行 LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(1000000); cluster.shutdown(); } }
public static void main(String[] args) { Config config = new Config(); builder.setSpout("RandomSentence", new RandomSentenceSpout(), 2); builder .setBolt("WordNormalizer", new WordNormalizerBolt(), 2) .shuffleGrouping("RandomSentence"); builder .setBolt("WordCount", new WordCountBolt(), 2) .fieldsGrouping("WordNormalizer", new Fields("word")); builder.setBolt("Print", new PrintBolt(), 1).shuffleGrouping("WordCount"); config.setDebug(false); // 通过是否有参数来控制是否启动集群,或者本地模式执行 if (args != null && args.length > 0) { try { config.setNumWorkers(1); StormSubmitter.submitTopology(args[0], config, builder.createTopology()); } catch (Exception e) { e.printStackTrace(); } } else { config.setMaxTaskParallelism(1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("wordcount", config, builder.createTopology()); } }
@Test public void testStorm() throws Exception { SentenceSpout spout = new SentenceSpout(); SplitSentenceBolt splitSentenceBolt = new SplitSentenceBolt(); WordCountBolt countBolt = new WordCountBolt(); ReportBolt reportBolt = new ReportBolt(); TopologyBuilder builder = new TopologyBuilder(); // 注册一个sentence spout,并且赋予一个唯一值 builder.setSpout(SENTENCE_SPOUT_ID, spout); // 然后注册一个SplitSentenceBolt,这个bolt订阅SentenceSpout发射出来的数据流 // 将SentenceSpout的唯一ID赋值给shuffleGrouping()方法确立了这种订阅关系 // shuffleGrouping()方法告诉storm,要将SentenceSpout发射出来的tuple随机均匀的分发给SplitSentenceBolt的实例 builder.setBolt(SPLIT_BOLT_ID, splitSentenceBolt).shuffleGrouping(SENTENCE_SPOUT_ID); // 将含有特定数据的tuple路由到特殊的bolt实例中 // 此处使用BoltDeclarer的fieldsGrouping()方法保证所有“word”字段值相同的tuple会被路由到同一个WordCountBolt实例中 builder.setBolt(COUNT_BOLT_ID, countBolt).fieldsGrouping(SPLIT_BOLT_ID, new Fields("word")); // 将WordCountBolt发出的所有tuple流路由到唯一的ReportBolt中,使用globalGrouping builder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(COUNT_BOLT_ID); Config config = new Config(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology()); Thread.sleep(10000); cluster.killTopology(TOPOLOGY_NAME); cluster.shutdown(); }
@Test public void testEPL() { // should say fieldsTypes, maybe with object/component prefix Map<String, Object> eventTypes = new HashMap<>(); eventTypes.put(LITERAL_SYMBOL, String.class); eventTypes.put(LITERAL_PRICE, Integer.class); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(LITERAL_QUOTES, new RandomSentenceSpout()); builder .setBolt( LITERAL_ESPER, (new EsperBolt()) .addEventTypes(eventTypes) .addOutputTypes( Collections.singletonMap( LITERAL_RETURN_OBJ, Arrays.asList(LITERAL_AVG, LITERAL_PRICE))) .addStatements( Collections.singleton( "insert into Result " + "select avg(price) as avg, price from " + "quotes_default(symbol='A').win:length(2) " + "having avg(price) > 60.0"))) .shuffleGrouping(LITERAL_QUOTES); builder.setBolt("print", new PrinterBolt()).shuffleGrouping(LITERAL_ESPER, LITERAL_RETURN_OBJ); Config conf = new Config(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("test", conf, builder.createTopology()); Utils.sleep(10000); cluster.shutdown(); assertEquals(resultEPL.get(100), new Double(75.0)); assertEquals(resultEPL.get(50), new Double(75.0)); }
public static void main(String[] args) { Config conf = new Config(); conf.setDebug(true); // SessionFactory sessionFactory = // HibernateSessionFactory.createSession("mySql"); if (args != null && args.length > 0) { TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(null, null); builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt"); conf.setNumWorkers(1); try { StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } catch (AlreadyAliveException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (InvalidTopologyException e) { // TODO Auto-generated catch block e.printStackTrace(); } } else { LocalDRPC drpc = new LocalDRPC(); TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(drpc, null); builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt"); conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("mongooseTopologyLocal", conf, builder.createTopology()); for (String request : new String[] {"{\"studentId\":\"1\",\"greScore\":380.0}"}) { System.out.println("Result for: " + request + "\": " + drpc.execute("mongoose", request)); } cluster.shutdown(); drpc.shutdown(); } }
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException { Config config = new Config(); // 设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数。 builder.setSpout("readlog", new ReadLogSpout(), 1); // 创建monitor监控过滤节点 builder.setBolt("monitor", new MonitorBolt("MonitorBolt.xml"), 3).shuffleGrouping("readlog"); // 创建mysql数据存储节点 builder.setBolt("mysql", new MysqlBolt("MysqlBolt.xml"), 3).shuffleGrouping("monitor"); builder.setBolt("print", new PrintBolt(), 3).shuffleGrouping("monitor"); config.setDebug(false); if (args != null && args.length > 0) { config.setNumWorkers(1); StormSubmitter.submitTopology(args[0], config, builder.createTopology()); } else { // 这里是本地模式下运行的启动代码。 config.setMaxTaskParallelism(1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("simple", config, builder.createTopology()); } }
public static void main(String[] args) { FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender")); FeederSpout ageSpout = new FeederSpout(new Fields("id", "age")); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("gender", genderSpout); builder.setSpout("age", ageSpout); builder .setBolt("join", new SingleJoinBolt(new Fields("gender", "age"))) .fieldsGrouping("gender", new Fields("id")) .fieldsGrouping("age", new Fields("id")); Config conf = new Config(); conf.setDebug(true); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("join-example", conf, builder.createTopology()); for (int i = 0; i < 10; i++) { String gender; if (i % 2 == 0) { gender = "male"; } else { gender = "female"; } genderSpout.feed(new Values(i, gender)); } for (int i = 9; i >= 0; i--) { ageSpout.feed(new Values(i, i + 20)); } Utils.sleep(2000); cluster.shutdown(); }
public static void main(String[] args) { TopologyBuilder builder = new TopologyBuilder(); List<String> zks = new ArrayList<String>(); zks.add("127.0.0.1"); List<String> cFs = new ArrayList<String>(); cFs.add("personal"); cFs.add("company"); builder.setSpout("hbase_spout", new HbaseSpout(), 2); builder .setBolt("hbase_bolt", new HbaseBolt("user", cFs, zks, 2181), 2) .shuffleGrouping("hbase_spout"); Config config = new Config(); config.setDebug(true); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("hbase_topology", config, builder.createTopology()); try { Thread.sleep(20000); } catch (InterruptedException e) { System.out.println("Thread interrupted:" + e); } System.out.println("stopped called..."); cluster.killTopology("hbase_topology"); cluster.shutdown(); }
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { List<String> hosts = new ArrayList<String>(); hosts.add("127.0.0.1"); SpoutConfig spoutConfig = new SpoutConfig( KafkaConfig.StaticHosts.fromHostString(hosts, 1), "test", "/kafkastorm", "discovery"); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("kafkaSpout", new KafkaSpout(spoutConfig), 1); builder .setBolt("flightInformationParserBolt", new FlightInformationParserBolt(), 1) .shuffleGrouping("kafkaSpout"); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } else { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("flightInformationTest", conf, builder.createTopology()); // Utils.sleep(10000); // cluster.killTopology("flightInformationTest"); // cluster.shutdown(); } }
public static void main(String[] args) { try { // 实例化TopologyBuilder类。 TopologyBuilder topologyBuilder = new TopologyBuilder(); // 设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数。 topologyBuilder.setSpout("SimpleSpout", new SimpleSpout(), 1); // 设置数据处理节点并分配并发数。指定该节点接收喷发节点的策略为随机方式。 topologyBuilder.setBolt("SimpleBolt", new SimpleBolt(), 3).shuffleGrouping("SimpleSpout"); Config config = new Config(); config.setDebug(true); if (args != null && args.length > 0) { config.setNumWorkers(1); StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology()); } else { // 这里是本地模式下运行的启动代码。 config.setMaxTaskParallelism(1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("simple", config, topologyBuilder.createTopology()); Utils.sleep(5000); cluster.shutdown(); } } catch (Exception e) { e.printStackTrace(); } }
@Ignore @Test public void testSiddhiSpout() throws AlreadyAliveException, InvalidTopologyException, InterruptedException { eventsReceived = false; ExecutionPlanConfiguration executionPlanConfiguration = new ExecutionPlanConfiguration(); StreamDefinition siddhiStreamDef = new StreamDefinition().name("wordStream").attribute("word", Attribute.Type.STRING); ConsumingQueuedEventSource eventSource = new ConsumingQueuedEventSource(siddhiStreamDef, executionPlanConfiguration); SiddhiSpout siddhiSpout = new SiddhiSpout(siddhiStreamDef, eventSource); siddhiSpout.setUseDefaultAsStreamName(false); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("siddhi-spout", siddhiSpout); // builder.setBolt("count", wordCount, 12).fieldsGrouping("siddhi-spout", new Fields("word")); builder .setBolt("count", new WordCount(), 8) .fieldsGrouping("siddhi-spout", "wordStream", new Fields("word")); Config conf = new Config(); conf.setDebug(false); conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); eventSource.consumeEvents(new Object[][] {{"GOOG"}, {"WSO2"}, {"FB"}}); Thread.sleep(10000); Assert.assertTrue("No events received.", eventsReceived); Assert.assertTrue("Event count is zero", eventCount > 0); cluster.shutdown(); }
public static void main(String[] args) throws Exception { if (args.length < 4) { System.err.println( "Usage: PrintSampleStream <consumer-key> <consumer-secret> <access-token> <access-token-secret>"); return; } String consumerKey = args[0]; String consumerSecret = args[1]; String accessToken = args[2]; String accessTokenSecret = args[3]; // keywords start with the 5th parameter String[] keyWords = Arrays.copyOfRange(args, 4, args.length); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout( "twitter", new TwitterSampleSpout( consumerKey, consumerSecret, accessToken, accessTokenSecret, keyWords)); builder.setBolt("print", new PrinterBolt()).shuffleGrouping("twitter"); Config conf = new Config(); final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster(); cluster.submitTopology("Print", conf, FlinkTopology.createTopology(builder)); Utils.sleep(10 * 1000); cluster.shutdown(); }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); // 登录 builder.setBolt("mlogin_verify_bolt", new MLoginVeriBolt(), 10).shuffleGrouping("mlogin_spout"); builder .setBolt("mlogin_calc_bolt", new MLoginCalcBolt(), 10) .shuffleGrouping("mlogin_verify_bolt"); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { String gamecfg_path = ""; try { gamecfg_path = args[1]; } catch (ArrayIndexOutOfBoundsException e) { System.out.println("NOTICE: 请输入游戏配置文件路径(param 2)!"); e.printStackTrace(); System.exit(-999); } conf.put("gamecfg_path", gamecfg_path); conf.put("isOnline", true); String topicLogin = "******"; String zkRoot = "/home/ztgame/storm/zkroot"; String spoutIdLogin = "******"; BrokerHosts brokerHosts = new ZkHosts("172.29.201.208:2181,172.29.201.207:2181,172.29.201.205:2181"); SpoutConfig spoutConfLogin = new SpoutConfig(brokerHosts, topicLogin, zkRoot, spoutIdLogin); spoutConfLogin.scheme = new SchemeAsMultiScheme(new StringScheme()); builder.setSpout("mlogin_spout", new KafkaSpout(spoutConfLogin), 1); conf.setNumWorkers(2); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { conf.put("isOnline", false); builder.setSpout("mlogin_spout", new SampleMLoginSpout(), 1); conf.setMaxTaskParallelism(2); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(200000); cluster.shutdown(); } }
/** * プログラムエントリポイント<br> * * <ul> * <li>起動引数:arg[0] 設定値を記述したyamlファイルパス * <li>起動引数:arg[1] Stormの起動モード(true:LocalMode、false:DistributeMode) * </ul> * * @param args 起動引数 * @throws Exception 初期化例外発生時 */ public static void main(String[] args) throws Exception { // プログラム引数の不足をチェック if (args.length < 2) { System.out.println( "Usage: java acromusashi.stream.example.topology.DecisionTestTopology ConfigPath isExecuteLocal(true|false)"); return; } // 起動引数として使用したパスからStorm設定オブジェクトを生成 Config conf = StormConfigGenerator.loadStormConfig(args[0]); // プログラム引数から設定値を取得(ローカル環境or分散環境) boolean isLocal = Boolean.valueOf(args[1]); TopologyBuilder builder = new TopologyBuilder(); // Get setting from StormConfig Object int wordSpoutPara = StormConfigUtil.getIntValue(conf, "WordSpout.Parallelism", 2); int judgeBoltPara = StormConfigUtil.getIntValue(conf, "JudgeBolt.Parallelism", 2); int shortWordBoltPara = StormConfigUtil.getIntValue(conf, "ShortWord.Parallelism", 2); int longWordBoltPara = StormConfigUtil.getIntValue(conf, "LongWord.Parallelism", 2); builder.setSpout("WordSpout", new TestWordSpout(), wordSpoutPara); builder .setBolt("JudgeBolt", new JudgeBolt(), judgeBoltPara) .fieldsGrouping("WordSpout", new Fields("word")); // ShortWordのStreamを読み込むよう定義 builder .setBolt("ShortWord", new ShortWordBolt(), shortWordBoltPara) .fieldsGrouping("JudgeBolt", "ShortWord", new Fields("word")); // LongWordのStreamを読み込むよう定義 builder .setBolt("LongWord", new LongWordBolt(), longWordBoltPara) .fieldsGrouping("JudgeBolt", "LongWord", new Fields("word")); if (isLocal) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("DecisionTest", conf, builder.createTopology()); Utils.sleep(10000000); cluster.killTopology("DecisionTest"); cluster.shutdown(); } else { StormSubmitter.submitTopology("DecisionTest", conf, builder.createTopology()); } }
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { // TODO Auto-generated method stub TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("sourceSpout", new SourceSpout()); builder.setBolt("formatBolt", new FormatBolt()).shuffleGrouping("sourceSpout"); builder .setBolt("slideWindowBolt", new SlideWindowBolt(30)) .fieldsGrouping("formatBolt", new Fields("ip")); builder .setBolt("dbBolt", new DBBolt("access_ip_num")) .fieldsGrouping("slideWindowBolt", new Fields("ip")); Config config = new Config(); // config.setNumWorkers(4); config.setMaxSpoutPending(1000); StormSubmitter.submitTopology(args[0], config, builder.createTopology()); }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); List<String> componentList = new ArrayList<String>(Arrays.asList("spout", "split", "count")); conf.put("components", componentList); Map<String, List<String>> streamMap = new HashMap<String, List<String>>(); List<String> splitstream = new ArrayList<String>(Arrays.asList("spout")); List<String> countstream = new ArrayList<String>(Arrays.asList("split")); streamMap.put("split", splitstream); streamMap.put("count", countstream); conf.put("streams", streamMap); conf.put("traffic.improvement", 5); conf.put("alfa", "0"); conf.put("beta", "1"); conf.put("gamma", "1"); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); WorkerMonitor.getInstance(); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); Config config = new Config(); config.setDebug(true); /* ----------------------TODO----------------------- Task: wire up the topology NOTE:make sure when connecting components together, using the functions setBolt(name,…) and setSpout(name,…), you use the following names for each component: FileReaderSpout -> "spout" SplitSentenceBolt -> "split" WordCountBolt -> "count" NormalizerBolt -> "normalize" TopNFinderBolt -> "top-n" ------------------------------------------------- */ builder.setSpout("spout", new FileReaderSpout(args[0]), 1); builder.setBolt("split", new SplitSentenceBolt(), 8).shuffleGrouping("spout"); builder .setBolt("normalize", new NormalizerBolt(), 12) .fieldsGrouping("split", new Fields("word")); builder .setBolt("count", new WordCountBolt(), 12) .fieldsGrouping("normalize", new Fields("word")); builder.setBolt("top-n", new TopNFinderBolt(N), 1).fieldsGrouping("count", new Fields("word")); config.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", config, builder.createTopology()); // wait for 2 minutes and then kill the job Thread.sleep(2 * 60 * 1000); cluster.shutdown(); }
// Entry point for the topology public static void main(String[] args) throws Exception { // Read and set configuration Properties properties = new Properties(); // Arguments? Or from config file? if (args.length > 1) { properties.load(new FileReader(args[1])); } else { properties.load( EventHubWriter.class.getClassLoader().getResourceAsStream("EventHubs.properties")); } // Configure the bolt for Event Hub String policyName = properties.getProperty("eventhubs.writerpolicyname"); String policyKey = properties.getProperty("eventhubs.writerpolicykey"); String namespaceName = properties.getProperty("eventhubs.namespace"); String entityPath = properties.getProperty("eventhubs.entitypath"); EventHubBoltConfig spoutConfig = new EventHubBoltConfig( policyName, policyKey, namespaceName, "servicebus.windows.net", entityPath); // Used to build the topology TopologyBuilder builder = new TopologyBuilder(); // Add the spout, with a name of 'spout' // and parallelism hint of 5 executors builder.setSpout("spout", new DeviceSpout(), 5); builder.setBolt("eventhubbolt", new EventHubBolt(spoutConfig), 8).shuffleGrouping("spout"); // new configuration Config conf = new Config(); conf.setDebug(true); // If there are arguments, we are running on a cluster if (args != null && args.length > 0) { // parallelism hint to set the number of workers conf.setNumWorkers(3); // submit the topology StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } // Otherwise, we are running locally else { // Cap the maximum number of executors that can be spawned // for a component to 3 conf.setMaxTaskParallelism(3); // LocalCluster is used to run locally LocalCluster cluster = new LocalCluster(); // submit the topology cluster.submitTopology("writer", conf, builder.createTopology()); // sleep Thread.sleep(10000); // shut down the cluster cluster.shutdown(); } }
public static void main(String[] args) throws Exception { ParameterTool pt = ParameterTool.fromArgs(args); int par = pt.getInt("para"); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism")); int i = 0; for (; i < pt.getInt("repartitions", 1) - 1; i++) { System.out.println("adding source" + i + " --> source" + (i + 1)); builder .setBolt("source" + (i + 1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism")) .fieldsGrouping("source" + i, new Fields("id")); } System.out.println("adding final source" + i + " --> sink"); builder .setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")) .fieldsGrouping("source" + i, new Fields("id")); Config conf = new Config(); conf.setDebug(false); // System.exit(1); if (!pt.has("local")) { conf.setNumWorkers(par); StormSubmitter.submitTopologyWithProgressBar( "throughput-" + pt.get("name", "no_name"), conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(par); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("throughput", conf, builder.createTopology()); Thread.sleep(300000); cluster.shutdown(); } }
public static void main(String[] args) throws InterruptedException { // Topology definition TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("word-reader", new WordReader()); builder.setBolt("word-normalizer", new WordNormalizer()).shuffleGrouping("word-reader"); builder .setBolt("word-counter", new WordCounter(), 2) .fieldsGrouping("word-normalizer", new Fields("word")); // Configuration Config conf = new Config(); conf.put("wordsFile", args[0]); conf.setDebug(false); // Topology run conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("Getting-Started-Toplogie", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); }
@Test public void testSODA() { // should say fieldsTypes, maybe with object/component prefix Map<String, Object> eventTypes = new HashMap<>(); eventTypes.put(LITERAL_SYMBOL, String.class); eventTypes.put(LITERAL_PRICE, Integer.class); EPStatementObjectModel model = new EPStatementObjectModel(); model.setInsertInto(InsertIntoClause.create(LITERAL_RETURN_OBJ)); model.setSelectClause( SelectClause.create().add(Expressions.avg(LITERAL_PRICE), LITERAL_AVG).add(LITERAL_PRICE)); Filter filter = Filter.create("quotes_default", Expressions.eq(LITERAL_SYMBOL, "A")); model.setFromClause( FromClause.create( FilterStream.create(filter).addView("win", "length", Expressions.constant(2)))); model.setHavingClause( Expressions.gt(Expressions.avg(LITERAL_PRICE), Expressions.constant(60.0))); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(LITERAL_QUOTES, new RandomSentenceSpout()); builder .setBolt( LITERAL_ESPER, (new EsperBolt()) .addEventTypes(eventTypes) .addOutputTypes( Collections.singletonMap( LITERAL_RETURN_OBJ, Arrays.asList(LITERAL_AVG, LITERAL_PRICE))) .addObjectStatemens(Collections.singleton(model))) .shuffleGrouping(LITERAL_QUOTES); builder.setBolt("print", new PrinterBolt()).shuffleGrouping(LITERAL_ESPER, LITERAL_RETURN_OBJ); Config conf = new Config(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("test", conf, builder.createTopology()); Utils.sleep(10000); cluster.shutdown(); assertEquals(resultSODA.get(100), new Double(75.0)); assertEquals(resultSODA.get(50), new Double(75.0)); }
public static void main(String[] args) throws Exception { final TopologyBuilder builder = new TopologyBuilder(); final KestrelThriftSpout spout = new KestrelThriftSpout("localhost", 2229, "test", new StringScheme()); builder.setSpout("spout", spout).setDebug(true); builder.setBolt("bolt", new FailEveryOther()).shuffleGrouping("spout"); final LocalCluster cluster = new LocalCluster(); final Config conf = new Config(); cluster.submitTopology("test", conf, builder.createTopology()); Thread.sleep(600000); }
public void buildEvaluatePart( IStateFactory cassandra, InstanceStreamSource source, TopologyBuilder builder, MoaConfig config) { int num_workers = config.getNumWorkers(); int ensemble_size = config.getEnsembleSize(); int num_classifier_executors = config.getNumClassifierExecutors(); int num_combiners = config.getNumCombiners(); int num_aggregators = config.getNumAggregators(); EvaluateSpout evaluate_spout = new EvaluateSpout(source, cassandra, 100); builder.setSpout("prediction_stream", evaluate_spout); builder .setBolt( "shared_storage", new SharedStorageBolt<Classifier>(cassandra, "evaluate_classifier"), num_workers) .customGrouping("prediction_stream", EvaluateSpout.NOTIFICATION_STREAM, new AllGrouping()); builder .setBolt( "p_deserialize", new TopologyBroadcastBolt("evaluate", LearnSpout.LEARN_STREAM_FIELDS), num_workers) .shuffleGrouping("prediction_stream"); builder .setBolt( "evaluate_local_grouping", new WorkerBroadcastBolt("evaluate", LearnSpout.LEARN_STREAM_FIELDS), num_workers) .customGrouping("p_deserialize", "evaluate", new AllGrouping()); builder .setBolt( "evaluate_classifier", new EvaluateClassifierBolt(cassandra), Math.max(num_classifier_executors, num_workers)) .customGrouping("evaluate_local_grouping", "evaluate", new AllLocalGrouping()) .setNumTasks(ensemble_size); builder .setBolt( "combine_result", new CombinerBolt("evaluate_classifier"), Math.max(num_workers, num_combiners)) .customGrouping("evaluate_classifier", new LocalGrouping(new IdBasedGrouping())) .setNumTasks(Math.max(num_workers, num_combiners)); builder .setBolt( "prediction_result", new CombinerBolt(ensemble_size), Math.max(num_workers, num_combiners)) .customGrouping("combine_result", new IdBasedGrouping()) .setNumTasks(Math.max(num_workers, num_aggregators)); }
public void extendTopology(TopologyBuilder builder) { BoltDeclarer declarer = builder.setBolt(_masterId, new CoordinatedBolt(_masterBolt.bolt), _masterBolt.parallelism); for (InputDeclaration decl : _masterBolt.declarations) { decl.declare(declarer); } for (Map conf : _masterBolt.componentConfs) { declarer.addConfigurations(conf); } for (String id : _bolts.keySet()) { Component component = _bolts.get(id); Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>(); for (String c : componentBoltSubscriptions(component)) { SourceArgs source; if (c.equals(_masterId)) { source = SourceArgs.single(); } else { source = SourceArgs.all(); } coordinatedArgs.put(c, source); } BoltDeclarer input = builder.setBolt( id, new CoordinatedBolt(component.bolt, coordinatedArgs, null), component.parallelism); for (Map conf : component.componentConfs) { input.addConfigurations(conf); } for (String c : componentBoltSubscriptions(component)) { input.directGrouping(c, Constants.COORDINATED_STREAM_ID); } for (InputDeclaration d : component.declarations) { d.declare(input); } } }
/** * A topology that produces random sentences using {@link RandomSentenceSpout} and publishes the * sentences using a KafkaBolt to kafka "test" topic. * * @return the storm topology */ public StormTopology buildProducerTopology() { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 2); /** * The output field of the RandomSentenceSpout ("word") is provided as the boltMessageField so * that this gets written out as the message in the kafka topic. */ KafkaBolt bolt = new KafkaBolt() .withTopicSelector(new DefaultTopicSelector("test")) .withTupleToKafkaMapper(new FieldNameBasedTupleToKafkaMapper("key", "word")); builder.setBolt("forwardToKafka", bolt, 1).shuffleGrouping("spout"); return builder.createTopology(); }
private void wireTopology() throws InterruptedException, FileNotFoundException { String spoutId_TweetSparklineSpout = "TweetSparklineSpout"; String boltId_AggregateSparklineData = "BoltAggregateSparklineData"; String boltId_BoltSaveSparklineJSONToMySQL = "BoltSaveSparklineJSONToMySQL"; // TopologyBuilder builder = new TopologyBuilder(); boolean isDebug = true; // no database inserts ApplicationConfigurationFile configFile = read(propertiesFile); builder.setSpout(spoutId_TweetSparklineSpout, new SpoutTweetSparkline(configFile, isDebug), 1); builder .setBolt( boltId_AggregateSparklineData, new BoltAggregateSparklineData(configFile, isDebug), 1) .shuffleGrouping(spoutId_TweetSparklineSpout); builder .setBolt( boltId_BoltSaveSparklineJSONToMySQL, new BoltSaveSparklineJSONToMySQL(configFile, isDebug), 1) .shuffleGrouping(boltId_AggregateSparklineData); }