public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("PlayStream1", new FootballDataSpout(), 1); builder.setBolt("AvgRunPlay", configureSiddhiBolt1(), 1).shuffleGrouping("PlayStream1"); builder.setBolt("FastRunPlay", configureSiddhiBolt2(), 1).shuffleGrouping("AvgRunPlay"); builder.setBolt("LeafEcho", new EchoBolt(), 1).shuffleGrouping("FastRunPlay"); Config conf = new Config(); // conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }
@Test public void testEPL() { // should say fieldsTypes, maybe with object/component prefix Map<String, Object> eventTypes = new HashMap<>(); eventTypes.put(LITERAL_SYMBOL, String.class); eventTypes.put(LITERAL_PRICE, Integer.class); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(LITERAL_QUOTES, new RandomSentenceSpout()); builder .setBolt( LITERAL_ESPER, (new EsperBolt()) .addEventTypes(eventTypes) .addOutputTypes( Collections.singletonMap( LITERAL_RETURN_OBJ, Arrays.asList(LITERAL_AVG, LITERAL_PRICE))) .addStatements( Collections.singleton( "insert into Result " + "select avg(price) as avg, price from " + "quotes_default(symbol='A').win:length(2) " + "having avg(price) > 60.0"))) .shuffleGrouping(LITERAL_QUOTES); builder.setBolt("print", new PrinterBolt()).shuffleGrouping(LITERAL_ESPER, LITERAL_RETURN_OBJ); Config conf = new Config(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("test", conf, builder.createTopology()); Utils.sleep(10000); cluster.shutdown(); assertEquals(resultEPL.get(100), new Double(75.0)); assertEquals(resultEPL.get(50), new Double(75.0)); }
/** * @param args * @throws InterruptedException */ public static void main(String[] args) throws InterruptedException { // Add transactional spout MemoryTransactionalSpout spout = new MemoryTransactionalSpout(values, new Fields("shortid", "url", "user", "date"), 3); TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("shorturl-count", "spout", spout, 2); // Build TupleTableConifg TupleTableConfig ttConfig = new TupleTableConfig("shorturl", "shortid"); ttConfig.setBatch(false); ttConfig.addColumn("data", "clicks"); ttConfig.addColumn("daily", "date"); builder .setBolt("hbase-counters", new HBaseCountersBatchBolt(ttConfig), 2) .fieldsGrouping("spout", new Fields("shortid")); LocalCluster cluster = new LocalCluster(); Config stormConfig = new Config(); stormConfig.setDebug(true); stormConfig.setMaxSpoutPending(3); cluster.submitTopology("hbase-example", stormConfig, builder.buildTopology()); Thread.sleep(10000); cluster.shutdown(); }
public static void main(String[] args) throws Exception { Config conf = new Config(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("cdc", conf, buildTopology()); Thread.sleep(200000); cluster.shutdown(); }
@Ignore @Test public void testSiddhiSpout() throws AlreadyAliveException, InvalidTopologyException, InterruptedException { eventsReceived = false; ExecutionPlanConfiguration executionPlanConfiguration = new ExecutionPlanConfiguration(); StreamDefinition siddhiStreamDef = new StreamDefinition().name("wordStream").attribute("word", Attribute.Type.STRING); ConsumingQueuedEventSource eventSource = new ConsumingQueuedEventSource(siddhiStreamDef, executionPlanConfiguration); SiddhiSpout siddhiSpout = new SiddhiSpout(siddhiStreamDef, eventSource); siddhiSpout.setUseDefaultAsStreamName(false); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("siddhi-spout", siddhiSpout); // builder.setBolt("count", wordCount, 12).fieldsGrouping("siddhi-spout", new Fields("word")); builder .setBolt("count", new WordCount(), 8) .fieldsGrouping("siddhi-spout", "wordStream", new Fields("word")); Config conf = new Config(); conf.setDebug(false); conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); eventSource.consumeEvents(new Object[][] {{"GOOG"}, {"WSO2"}, {"FB"}}); Thread.sleep(10000); Assert.assertTrue("No events received.", eventsReceived); Assert.assertTrue("Event count is zero", eventCount > 0); cluster.shutdown(); }
public static void main(String[] args) { Config config = new Config(); builder.setSpout("RandomSentence", new RandomSentenceSpout(), 2); builder .setBolt("WordNormalizer", new WordNormalizerBolt(), 2) .shuffleGrouping("RandomSentence"); builder .setBolt("WordCount", new WordCountBolt(), 2) .fieldsGrouping("WordNormalizer", new Fields("word")); builder.setBolt("Print", new PrintBolt(), 1).shuffleGrouping("WordCount"); config.setDebug(false); // 通过是否有参数来控制是否启动集群,或者本地模式执行 if (args != null && args.length > 0) { try { config.setNumWorkers(1); StormSubmitter.submitTopology(args[0], config, builder.createTopology()); } catch (Exception e) { e.printStackTrace(); } } else { config.setMaxTaskParallelism(1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("wordcount", config, builder.createTopology()); } }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(1000000); cluster.shutdown(); } }
public static void main(String[] args) { Config conf = new Config(); conf.setDebug(true); // SessionFactory sessionFactory = // HibernateSessionFactory.createSession("mySql"); if (args != null && args.length > 0) { TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(null, null); builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt"); conf.setNumWorkers(1); try { StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } catch (AlreadyAliveException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (InvalidTopologyException e) { // TODO Auto-generated catch block e.printStackTrace(); } } else { LocalDRPC drpc = new LocalDRPC(); TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(drpc, null); builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt"); conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("mongooseTopologyLocal", conf, builder.createTopology()); for (String request : new String[] {"{\"studentId\":\"1\",\"greScore\":380.0}"}) { System.out.println("Result for: " + request + "\": " + drpc.execute("mongoose", request)); } cluster.shutdown(); drpc.shutdown(); } }
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { List<String> hosts = new ArrayList<String>(); hosts.add("127.0.0.1"); SpoutConfig spoutConfig = new SpoutConfig( KafkaConfig.StaticHosts.fromHostString(hosts, 1), "test", "/kafkastorm", "discovery"); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("kafkaSpout", new KafkaSpout(spoutConfig), 1); builder .setBolt("flightInformationParserBolt", new FlightInformationParserBolt(), 1) .shuffleGrouping("kafkaSpout"); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } else { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("flightInformationTest", conf, builder.createTopology()); // Utils.sleep(10000); // cluster.killTopology("flightInformationTest"); // cluster.shutdown(); } }
public static void main(String[] args) { FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender")); FeederSpout ageSpout = new FeederSpout(new Fields("id", "age")); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("gender", genderSpout); builder.setSpout("age", ageSpout); builder .setBolt("join", new SingleJoinBolt(new Fields("gender", "age"))) .fieldsGrouping("gender", new Fields("id")) .fieldsGrouping("age", new Fields("id")); Config conf = new Config(); conf.setDebug(true); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("join-example", conf, builder.createTopology()); for (int i = 0; i < 10; i++) { String gender; if (i % 2 == 0) { gender = "male"; } else { gender = "female"; } genderSpout.feed(new Values(i, gender)); } for (int i = 9; i >= 0; i--) { ageSpout.feed(new Values(i, i + 20)); } Utils.sleep(2000); cluster.shutdown(); }
public static void main(String[] args) { try { // 实例化TopologyBuilder类。 TopologyBuilder topologyBuilder = new TopologyBuilder(); // 设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数。 topologyBuilder.setSpout("SimpleSpout", new SimpleSpout(), 1); // 设置数据处理节点并分配并发数。指定该节点接收喷发节点的策略为随机方式。 topologyBuilder.setBolt("SimpleBolt", new SimpleBolt(), 3).shuffleGrouping("SimpleSpout"); Config config = new Config(); config.setDebug(true); if (args != null && args.length > 0) { config.setNumWorkers(1); StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology()); } else { // 这里是本地模式下运行的启动代码。 config.setMaxTaskParallelism(1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("simple", config, topologyBuilder.createTopology()); Utils.sleep(5000); cluster.shutdown(); } } catch (Exception e) { e.printStackTrace(); } }
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException { Config config = new Config(); // 设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数。 builder.setSpout("readlog", new ReadLogSpout(), 1); // 创建monitor监控过滤节点 builder.setBolt("monitor", new MonitorBolt("MonitorBolt.xml"), 3).shuffleGrouping("readlog"); // 创建mysql数据存储节点 builder.setBolt("mysql", new MysqlBolt("MysqlBolt.xml"), 3).shuffleGrouping("monitor"); builder.setBolt("print", new PrintBolt(), 3).shuffleGrouping("monitor"); config.setDebug(false); if (args != null && args.length > 0) { config.setNumWorkers(1); StormSubmitter.submitTopology(args[0], config, builder.createTopology()); } else { // 这里是本地模式下运行的启动代码。 config.setMaxTaskParallelism(1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("simple", config, builder.createTopology()); } }
public static void main(String[] args) { TopologyBuilder builder = new TopologyBuilder(); List<String> zks = new ArrayList<String>(); zks.add("127.0.0.1"); List<String> cFs = new ArrayList<String>(); cFs.add("personal"); cFs.add("company"); builder.setSpout("hbase_spout", new HbaseSpout(), 2); builder .setBolt("hbase_bolt", new HbaseBolt("user", cFs, zks, 2181), 2) .shuffleGrouping("hbase_spout"); Config config = new Config(); config.setDebug(true); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("hbase_topology", config, builder.createTopology()); try { Thread.sleep(20000); } catch (InterruptedException e) { System.out.println("Thread interrupted:" + e); } System.out.println("stopped called..."); cluster.killTopology("hbase_topology"); cluster.shutdown(); }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new TwitterStreamSpout(), 5); builder .setBolt("windowingTwitterStream", new WindowingTwitterStream(300), 1) .shuffleGrouping("spout"); builder .setBolt( "PrepareUserSimilarityCalculationBolt", new PrepareUserSimilarityCalculationBolt(), 1) .shuffleGrouping("windowingTwitterStream"); builder .setBolt("UserSimilarityCalculationBolt", new UserSimilarityCalculationBolt(), 1) .shuffleGrouping("PrepareUserSimilarityCalculationBolt"); Config conf = new Config(); conf.setDebug(false); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); // Thread.sleep(10000); // cluster.shutdown(); } }
@Test public void testStorm() throws Exception { SentenceSpout spout = new SentenceSpout(); SplitSentenceBolt splitSentenceBolt = new SplitSentenceBolt(); WordCountBolt countBolt = new WordCountBolt(); ReportBolt reportBolt = new ReportBolt(); TopologyBuilder builder = new TopologyBuilder(); // 注册一个sentence spout,并且赋予一个唯一值 builder.setSpout(SENTENCE_SPOUT_ID, spout); // 然后注册一个SplitSentenceBolt,这个bolt订阅SentenceSpout发射出来的数据流 // 将SentenceSpout的唯一ID赋值给shuffleGrouping()方法确立了这种订阅关系 // shuffleGrouping()方法告诉storm,要将SentenceSpout发射出来的tuple随机均匀的分发给SplitSentenceBolt的实例 builder.setBolt(SPLIT_BOLT_ID, splitSentenceBolt).shuffleGrouping(SENTENCE_SPOUT_ID); // 将含有特定数据的tuple路由到特殊的bolt实例中 // 此处使用BoltDeclarer的fieldsGrouping()方法保证所有“word”字段值相同的tuple会被路由到同一个WordCountBolt实例中 builder.setBolt(COUNT_BOLT_ID, countBolt).fieldsGrouping(SPLIT_BOLT_ID, new Fields("word")); // 将WordCountBolt发出的所有tuple流路由到唯一的ReportBolt中,使用globalGrouping builder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(COUNT_BOLT_ID); Config config = new Config(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology()); Thread.sleep(10000); cluster.killTopology(TOPOLOGY_NAME); cluster.shutdown(); }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { /*设置该topology在storm集群中要抢占的资源slot数,一个slot对应这supervisor节点上的以个worker进程 如果你分配的spot数超过了你的物理节点所拥有的worker数目的话,有可能提交不成功,加入你的集群上面已经有了 一些topology而现在还剩下2个worker资源,如果你在代码里分配4个给你的topology的话,那么这个topology可以提交 但是提交以后你会发现并没有运行。 而当你kill掉一些topology后释放了一些slot后你的这个topology就会恢复正常运行。 */ conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); // 指定为本地模式运行 LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }
public static void main(String[] args) throws IOException, ConnectionException { Logger.getRootLogger().setLevel(Level.WARN); // *** start the storm cluster LocalCluster cluster = new LocalCluster(); // *** start kafka LocalKafkaBroker broker = new LocalKafkaBroker(0, 9090, 4, "localhost:2000"); ReplayConfig replay = new ReplayConfig().staticHosts(broker.getHostPortStrings(), broker.getNumPartitions()); // *** build a topology // KarmaConfig karmaConfig = new KarmaConfigImpl("a", replay, new // InMemoryReducerState()); KarmaConfig karmaConfig = new KarmaConfigImpl("a", replay, new CassandraReducerState("demo")); StormTopology topology = buildTopology(karmaConfig); // *** submit the topology to storm Config config = new Config(); config.setMaxSpoutPending(50); cluster.submitTopology("bankdemo", config, topology); // *** send some events Producer<Long, Message> kafkaProducer = broker.buildSyncProducer(); JsonProducer mb = new JsonProducer(kafkaProducer).printSendsToConsole(true); sendBankingDemoMessages(mb); Utils.sleep(100000); kafkaProducer.close(); }
public static void main(String[] args) { Config config = new Config(); config.setDebug(true); config.registerMetricsConsumer(LoggingMetricsConsumer.class, 1); LocalCluster localCluster = new LocalCluster(); localCluster.submitTopology("flash-sale-recommender", config, FlashSaleTopologyBuilder.build()); }
@Test public void testTridentTopology() throws Exception { Session session = cassandraCQLUnit.session; String[] stationIds = {"station-1", "station-2", "station-3"}; for (int i = 1; i < 4; i++) { ResultSet resultSet = session.execute( "INSERT INTO weather.station(id, name) VALUES(?, ?)", stationIds[i - 1], "Foo-Station-" + new Random().nextInt()); } ResultSet rows = cassandraCQLUnit.session.execute("SELECT * FROM weather.station"); for (Row row : rows) { System.out.println("####### row = " + row); } WeatherBatchSpout weatherBatchSpout = new WeatherBatchSpout( new Fields("weather_station_id", "temperature", "event_time"), 3, stationIds); TridentTopology topology = new TridentTopology(); Stream stream = topology.newStream("cassandra-trident-stream", weatherBatchSpout); CassandraStateFactory insertValuesStateFactory = getInsertTemperatureStateFactory(); CassandraStateFactory selectWeatherStationStateFactory = getSelectWeatherStationStateFactory(); TridentState selectState = topology.newStaticState(selectWeatherStationStateFactory); stream = stream.stateQuery( selectState, new Fields("weather_station_id"), new CassandraQuery(), new Fields("name")); stream = stream.each(new Fields("name"), new PrintFunction(), new Fields("name_x")); stream.partitionPersist( insertValuesStateFactory, new Fields("weather_station_id", "name", "event_time", "temperature"), new CassandraStateUpdater(), new Fields()); StormTopology stormTopology = topology.build(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("wordCounter", getConfig(), stormTopology); Thread.sleep(30 * 1000); rows = cassandraCQLUnit.session.execute("SELECT * FROM weather.temperature"); Assert.assertTrue(rows.iterator().hasNext()); // basic sanity check cluster.killTopology("wordCounter"); cluster.shutdown(); }
// Entry point for the topology public static void main(String[] args) throws Exception { // Read and set configuration Properties properties = new Properties(); // Arguments? Or from config file? if (args.length > 1) { properties.load(new FileReader(args[1])); } else { properties.load( EventHubWriter.class.getClassLoader().getResourceAsStream("EventHubs.properties")); } // Configure the bolt for Event Hub String policyName = properties.getProperty("eventhubs.writerpolicyname"); String policyKey = properties.getProperty("eventhubs.writerpolicykey"); String namespaceName = properties.getProperty("eventhubs.namespace"); String entityPath = properties.getProperty("eventhubs.entitypath"); EventHubBoltConfig spoutConfig = new EventHubBoltConfig( policyName, policyKey, namespaceName, "servicebus.windows.net", entityPath); // Used to build the topology TopologyBuilder builder = new TopologyBuilder(); // Add the spout, with a name of 'spout' // and parallelism hint of 5 executors builder.setSpout("spout", new DeviceSpout(), 5); builder.setBolt("eventhubbolt", new EventHubBolt(spoutConfig), 8).shuffleGrouping("spout"); // new configuration Config conf = new Config(); conf.setDebug(true); // If there are arguments, we are running on a cluster if (args != null && args.length > 0) { // parallelism hint to set the number of workers conf.setNumWorkers(3); // submit the topology StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); } // Otherwise, we are running locally else { // Cap the maximum number of executors that can be spawned // for a component to 3 conf.setMaxTaskParallelism(3); // LocalCluster is used to run locally LocalCluster cluster = new LocalCluster(); // submit the topology cluster.submitTopology("writer", conf, builder.createTopology()); // sleep Thread.sleep(10000); // shut down the cluster cluster.shutdown(); } }
public static void main(String[] args) throws Exception { final TopologyBuilder builder = new TopologyBuilder(); final KestrelThriftSpout spout = new KestrelThriftSpout("localhost", 2229, "test", new StringScheme()); builder.setSpout("spout", spout).setDebug(true); builder.setBolt("bolt", new FailEveryOther()).shuffleGrouping("spout"); final LocalCluster cluster = new LocalCluster(); final Config conf = new Config(); cluster.submitTopology("test", conf, builder.createTopology()); Thread.sleep(600000); }
public static void main(String[] args) throws Exception { if (args.length < 3) throw new RuntimeException("requires 3 args: toponame, host, streamName [workerCnt]"); String topoName = args[0]; String xaphost = args[1]; String streamName = args[2]; int workerCnt = 4; if (args.length > 3) workerCnt = Integer.parseInt(args[3]); log.info(String.format("executing wordcount with %s %s %s", topoName, xaphost, streamName)); XAPConfig config = new XAPConfig(); config.setBatchSize(1000); config.setStreamName(streamName); config.setXapHost(xaphost); config.setFields("sentence"); config.setCollectStats(true); Config conf = new Config(); // conf.setDebug(true); XAPTridentSpout spout = new XAPTridentSpout(config); TridentTopology topology = new TridentTopology(); TridentState wordCounts = topology .newStream("spout1", spout) .each(new Fields("sentence"), new SplitLarge(6), new Fields("word")) .groupBy(new Fields("word")) .persistentAggregate( XAPState2.nonTransactional( String.format("jini://*/*/streamspace?locators=%s", xaphost), true), new Count(), new Fields("count")); if (args != null && args.length > 0) { conf.setNumWorkers(workerCnt); StormSubmitter.submitTopology(topoName, conf, topology.build()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, topology.build()); Thread.sleep(10000); cluster.shutdown(); } }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); // 登录 builder.setBolt("mlogin_verify_bolt", new MLoginVeriBolt(), 10).shuffleGrouping("mlogin_spout"); builder .setBolt("mlogin_calc_bolt", new MLoginCalcBolt(), 10) .shuffleGrouping("mlogin_verify_bolt"); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { String gamecfg_path = ""; try { gamecfg_path = args[1]; } catch (ArrayIndexOutOfBoundsException e) { System.out.println("NOTICE: 请输入游戏配置文件路径(param 2)!"); e.printStackTrace(); System.exit(-999); } conf.put("gamecfg_path", gamecfg_path); conf.put("isOnline", true); String topicLogin = "******"; String zkRoot = "/home/ztgame/storm/zkroot"; String spoutIdLogin = "******"; BrokerHosts brokerHosts = new ZkHosts("172.29.201.208:2181,172.29.201.207:2181,172.29.201.205:2181"); SpoutConfig spoutConfLogin = new SpoutConfig(brokerHosts, topicLogin, zkRoot, spoutIdLogin); spoutConfLogin.scheme = new SchemeAsMultiScheme(new StringScheme()); builder.setSpout("mlogin_spout", new KafkaSpout(spoutConfLogin), 1); conf.setNumWorkers(2); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { conf.put("isOnline", false); builder.setSpout("mlogin_spout", new SampleMLoginSpout(), 1); conf.setMaxTaskParallelism(2); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(200000); cluster.shutdown(); } }
/** * プログラムエントリポイント<br> * * <ul> * <li>起動引数:arg[0] 設定値を記述したyamlファイルパス * <li>起動引数:arg[1] Stormの起動モード(true:LocalMode、false:DistributeMode) * </ul> * * @param args 起動引数 * @throws Exception 初期化例外発生時 */ public static void main(String[] args) throws Exception { // プログラム引数の不足をチェック if (args.length < 2) { System.out.println( "Usage: java acromusashi.stream.example.topology.DecisionTestTopology ConfigPath isExecuteLocal(true|false)"); return; } // 起動引数として使用したパスからStorm設定オブジェクトを生成 Config conf = StormConfigGenerator.loadStormConfig(args[0]); // プログラム引数から設定値を取得(ローカル環境or分散環境) boolean isLocal = Boolean.valueOf(args[1]); TopologyBuilder builder = new TopologyBuilder(); // Get setting from StormConfig Object int wordSpoutPara = StormConfigUtil.getIntValue(conf, "WordSpout.Parallelism", 2); int judgeBoltPara = StormConfigUtil.getIntValue(conf, "JudgeBolt.Parallelism", 2); int shortWordBoltPara = StormConfigUtil.getIntValue(conf, "ShortWord.Parallelism", 2); int longWordBoltPara = StormConfigUtil.getIntValue(conf, "LongWord.Parallelism", 2); builder.setSpout("WordSpout", new TestWordSpout(), wordSpoutPara); builder .setBolt("JudgeBolt", new JudgeBolt(), judgeBoltPara) .fieldsGrouping("WordSpout", new Fields("word")); // ShortWordのStreamを読み込むよう定義 builder .setBolt("ShortWord", new ShortWordBolt(), shortWordBoltPara) .fieldsGrouping("JudgeBolt", "ShortWord", new Fields("word")); // LongWordのStreamを読み込むよう定義 builder .setBolt("LongWord", new LongWordBolt(), longWordBoltPara) .fieldsGrouping("JudgeBolt", "LongWord", new Fields("word")); if (isLocal) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("DecisionTest", conf, builder.createTopology()); Utils.sleep(10000000); cluster.killTopology("DecisionTest"); cluster.shutdown(); } else { StormSubmitter.submitTopology("DecisionTest", conf, builder.createTopology()); } }
public static void main(String[] args) throws Exception { LocalDRPC drpc = new LocalDRPC(); Config conf = new Config(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("reach", conf, buildTopology(drpc)); Thread.sleep(2000); System.out.println("REACH: " + drpc.execute("reach", "aaa")); System.out.println("REACH: " + drpc.execute("reach", "foo.com/blog/1")); System.out.println("REACH: " + drpc.execute("reach", "engineering.twitter.com/blog/5")); cluster.shutdown(); drpc.shutdown(); }
public static void main(String[] args) throws InterruptedException { // Topology definition TopologyBuilder builder = new TopologyBuilder(); builder.setSpout( Constants.objectLocationGenerator, new ObjectLocationGenerator(), Constants.dataSpoutParallelism); builder.setSpout( Constants.queryGenerator, new RangeQueryGenerator(), Constants.querySpoutParallelism); builder .setBolt( Constants.rangeFilterBolt, new NonIncrementalRangeFilter(), Constants.boltParallelism) .customGrouping( Constants.queryGenerator, new QueryStaticGridCustomGrouping( Constants.numberOfBolts, Constants.xMaxRange, Constants.yMaxRange, Constants.xCellsNum, Constants.yCellsNum)) .customGrouping( Constants.objectLocationGenerator, new DataStaticGridCustomGrouping( Constants.numberOfBolts, Constants.xMaxRange, Constants.yMaxRange, Constants.xCellsNum, Constants.yCellsNum)); // Configuration Config conf = new Config(); conf.setDebug(false); // Topology run // conf.put(Config., 1); // conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1); // conf.put(Config., 1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology( "OptimizedNonIncrementalRange-Queries_toplogy", conf, builder.createTopology()); // while (true) // Thread.sleep(1000); }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); List<String> componentList = new ArrayList<String>(Arrays.asList("spout", "split", "count")); conf.put("components", componentList); Map<String, List<String>> streamMap = new HashMap<String, List<String>>(); List<String> splitstream = new ArrayList<String>(Arrays.asList("spout")); List<String> countstream = new ArrayList<String>(Arrays.asList("split")); streamMap.put("split", splitstream); streamMap.put("count", countstream); conf.put("streams", streamMap); conf.put("traffic.improvement", 5); conf.put("alfa", "0"); conf.put("beta", "1"); conf.put("gamma", "1"); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); WorkerMonitor.getInstance(); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); Config config = new Config(); config.setDebug(true); /* ----------------------TODO----------------------- Task: wire up the topology NOTE:make sure when connecting components together, using the functions setBolt(name,…) and setSpout(name,…), you use the following names for each component: FileReaderSpout -> "spout" SplitSentenceBolt -> "split" WordCountBolt -> "count" NormalizerBolt -> "normalize" TopNFinderBolt -> "top-n" ------------------------------------------------- */ builder.setSpout("spout", new FileReaderSpout(args[0]), 1); builder.setBolt("split", new SplitSentenceBolt(), 8).shuffleGrouping("spout"); builder .setBolt("normalize", new NormalizerBolt(), 12) .fieldsGrouping("split", new Fields("word")); builder .setBolt("count", new WordCountBolt(), 12) .fieldsGrouping("normalize", new Fields("word")); builder.setBolt("top-n", new TopNFinderBolt(N), 1).fieldsGrouping("count", new Fields("word")); config.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", config, builder.createTopology()); // wait for 2 minutes and then kill the job Thread.sleep(2 * 60 * 1000); cluster.shutdown(); }
/** * 提交一个非事务性的trident-topology * * @param args 命令行参数 */ private static void simpleTopology(String[] args) { Config config = new Config(); config.setMaxSpoutPending(20); if (args[0].equals("local")) { // 本地测试提交 LocalCluster cluster = new LocalCluster(); cluster.submitTopology("Count", config, buildSimpleTopology()); } else { // 提交到服务器 config.setNumWorkers(3); try { StormSubmitter.submitTopology("trident_simple_topology", config, buildSimpleTopology()); } catch (Exception e) { e.printStackTrace(); } } }
public static void main(String[] args) throws Exception { MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH); TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3); builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout"); builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count"); LocalCluster cluster = new LocalCluster(); Config config = new Config(); config.setDebug(true); config.setMaxSpoutPending(3); cluster.submitTopology("global-count-topology", config, builder.buildTopology()); Thread.sleep(3000); cluster.shutdown(); }