public static void main(String[] args) {

    Config conf = new Config();
    conf.setDebug(true);
    // SessionFactory sessionFactory =
    // HibernateSessionFactory.createSession("mySql");

    if (args != null && args.length > 0) {
      TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(null, null);
      builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt");
      conf.setNumWorkers(1);
      try {
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
      } catch (AlreadyAliveException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      } catch (InvalidTopologyException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    } else {
      LocalDRPC drpc = new LocalDRPC();
      TopologyBuilder builder = MongooseTopologyCreator.CreateTopologyBuilder(drpc, null);
      builder.setBolt("result", new ReturnResults(), 1).shuffleGrouping("rulebolt");
      conf.setMaxTaskParallelism(3);
      LocalCluster cluster = new LocalCluster();
      cluster.submitTopology("mongooseTopologyLocal", conf, builder.createTopology());

      for (String request : new String[] {"{\"studentId\":\"1\",\"greScore\":380.0}"}) {
        System.out.println("Result for: " + request + "\": " + drpc.execute("mongoose", request));
      }
      cluster.shutdown();
      drpc.shutdown();
    }
  }
  public static void main(String[] args) {
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout("aa", new DataSourceSpout());
    topologyBuilder.setBolt("bb", new SumBolt(), 3).shuffleGrouping("aa");

    /*//创建本地storm集群
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("topology-name", new Config(), topologyBuilder.createTopology());*/

    try {
      StormSubmitter.submitTopology(
          "topology-name", new Config(), topologyBuilder.createTopology());
    } catch (AlreadyAliveException e) {
      e.printStackTrace();
    } catch (InvalidTopologyException e) {
      e.printStackTrace();
    }
  }
  public int compareTo(AlreadyAliveException other) {
    if (!getClass().equals(other.getClass())) {
      return getClass().getName().compareTo(other.getClass().getName());
    }

    int lastComparison = 0;
    AlreadyAliveException typedOther = (AlreadyAliveException) other;

    lastComparison = Boolean.valueOf(is_set_msg()).compareTo(typedOther.is_set_msg());
    if (lastComparison != 0) {
      return lastComparison;
    }
    if (is_set_msg()) {
      lastComparison = org.apache.thrift7.TBaseHelper.compareTo(this.msg, typedOther.msg);
      if (lastComparison != 0) {
        return lastComparison;
      }
    }
    return 0;
  }
  public boolean equals(AlreadyAliveException that) {
    if (that == null) return false;

    boolean this_present_msg = true && this.is_set_msg();
    boolean that_present_msg = true && that.is_set_msg();
    if (this_present_msg || that_present_msg) {
      if (!(this_present_msg && that_present_msg)) return false;
      if (!this.msg.equals(that.msg)) return false;
    }

    return true;
  }
  public static void main(String[] args) throws Exception {

    final int TOP_N = 3;

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout(1, new TestWordSpout(), 5);

    builder.setBolt(2, new RollingCountObjects(60, 10), 4).fieldsGrouping(1, new Fields("word"));
    builder.setBolt(3, new RankObjects(TOP_N), 4).fieldsGrouping(2, new Fields("obj"));
    builder.setBolt(4, new MergeObjects(TOP_N)).globalGrouping(3);

    Config conf = new Config();
    conf.setNumWorkers(10);
    conf.setMaxSpoutPending(5000);
    StormSubmitter ss = new StormSubmitter();
    try {
      ss.submitTopology("rolling-demo", conf, builder.createTopology());
    } catch (AlreadyAliveException e) {
      e.printStackTrace();
    } catch (InvalidTopologyException e) {
      e.printStackTrace();
    }
  }
 /** Performs a deep copy on <i>other</i>. */
 public AlreadyAliveException(AlreadyAliveException other) {
   if (other.is_set_msg()) {
     this.msg = other.msg;
   }
 }
  public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();

    // Use pipe as record boundary
    RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(",");

    // Synchronize data buffer with the filesystem every 1000 tuples
    SyncPolicy syncPolicy = new CountSyncPolicy(1000);

    // Rotate data files when they reach five MB
    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);

    // Use default, Storm-generated file names
    FileNameFormat deviceLogFileNameFormat =
        new DefaultFileNameFormat().withPath(Constants.hivePath);
    HdfsBolt deviceLogHdfsBolt =
        new HdfsBolt()
            .withFsUrl(Constants.nameNode)
            .withFileNameFormat(deviceLogFileNameFormat)
            .withRecordFormat(format)
            .withRotationPolicy(rotationPolicy)
            .withSyncPolicy(syncPolicy);

    Config conf = new Config();
    BrokerHosts hosts = new ZkHosts(Constants.zkConnString);

    SpoutConfig deviceKafkaSpoutConfig =
        new SpoutConfig(
            hosts,
            Constants.deviceTopicName,
            "/" + Constants.deviceTopicName,
            UUID.randomUUID().toString());
    deviceKafkaSpoutConfig.scheme = new SchemeAsMultiScheme(new DeviceEventJSONScheme());
    deviceKafkaSpoutConfig.useStartOffsetTimeIfOffsetOutOfRange = true;
    deviceKafkaSpoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
    KafkaSpout deviceKafkaSpout = new KafkaSpout(deviceKafkaSpoutConfig);

    SpoutConfig technicianKafkaSpoutConfig =
        new SpoutConfig(
            hosts,
            Constants.technicianTopicName,
            "/" + Constants.technicianTopicName,
            UUID.randomUUID().toString());
    technicianKafkaSpoutConfig.scheme = new SchemeAsMultiScheme(new TechnicianEventJSONScheme());
    technicianKafkaSpoutConfig.useStartOffsetTimeIfOffsetOutOfRange = true;
    technicianKafkaSpoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
    KafkaSpout technicianKafkaSpout = new KafkaSpout(technicianKafkaSpoutConfig);

    SimpleHBaseMapper technicianLocationMapper =
        new SimpleHBaseMapper()
            .withRowKeyField("TechnicianId")
            .withColumnFields(new Fields("TechnicianStatus", "Latitude", "Longitude"))
            .withColumnFamily("cf");

    Map<String, Object> hbConf = new HashMap<String, Object>();
    hbConf.put("hbase.rootdir", Constants.nameNode + "/apps/hbase/data/");
    hbConf.put("hbase.zookeeper.quorum", Constants.zkHost);
    hbConf.put("hbase.zookeeper.property.clientPort", Constants.zkPort);
    hbConf.put("zookeeper.znode.parent", "/hbase-unsecure");
    conf.put("hbase.conf", hbConf);

    // HBaseBolt hbasePersistTechnicianLocation = new HBaseBolt("TechnicianEvents",
    // technicianLocationMapper).withConfigKey("hbase.conf");

    // builder.setSpout("DeviceSpout", new DeviceSpout());
    builder.setSpout("DeviceKafkaSpout", deviceKafkaSpout);
    builder
        .setBolt("EnrichDeviceStatus", new EnrichDeviceStatus(), 1)
        .shuffleGrouping("DeviceKafkaSpout");
    builder
        .setBolt("PublishDeviceStatus", new PublishDeviceStatus(), 1)
        .shuffleGrouping("EnrichDeviceStatus");
    builder
        .setBolt("DetectIncident", new IncidentDetector(), 1)
        .shuffleGrouping("EnrichDeviceStatus");
    builder
        .setBolt("PrintDeviceStatus", new PrintDeviceStatus(), 1)
        .shuffleGrouping("DetectIncident", "DeviceStatusStream");
    builder
        .setBolt("PrintDeviceAlert", new PrintDeviceAlert(), 1)
        .shuffleGrouping("DetectIncident", "DeviceAlertStream");
    builder
        .setBolt("DeviceLogHDFSBolt", deviceLogHdfsBolt, 1)
        .shuffleGrouping("DetectIncident", "DeviceStatusLogStream");
    builder.setBolt("PublishAlert", new PublishAlert(), 1).shuffleGrouping("PrintDeviceAlert");
    builder
        .setBolt("RecommendTechnician", new RecommendTechnician(), 1)
        .shuffleGrouping("PrintDeviceAlert");
    builder
        .setBolt("RouteTechnician", new RouteTechnician(), 1)
        .shuffleGrouping("RecommendTechnician");
    // builder.setBolt("persist", printerHdfsBolt).shuffleGrouping("print");

    builder.setSpout("TechnicianKafkaSpout", technicianKafkaSpout);
    // builder.setBolt("PersistTechnicianLocation",
    // hbasePersistTechnicianLocation).shuffleGrouping("TechnicianKafkaSpout");
    builder
        .setBolt("PersistTechnicianLocation", new PersistTechnicianLocation(), 1)
        .shuffleGrouping("TechnicianKafkaSpout");
    builder
        .setBolt("PublishTechnicianLocation", new PublishTechnicianLocation(), 1)
        .shuffleGrouping("PersistTechnicianLocation");

    // LocalCluster cluster = new LocalCluster();
    conf.setNumWorkers(1);
    conf.setMaxSpoutPending(5000);
    conf.setMaxTaskParallelism(1);
    // cluster.submitTopology("DeviceMonitor", conf, builder.createTopology());

    try {
      StormSubmitter.submitTopology("DeviceMonitor", conf, builder.createTopology());
    } catch (AlreadyAliveException e) {
      e.printStackTrace();
    } catch (InvalidTopologyException e) {
      e.printStackTrace();
    } catch (AuthorizationException e) {
      e.printStackTrace();
    }
  }