Esempio n. 1
0
  @Override
  public void execute(Tuple tuple) {
    LOG.info(
        "TopologyKiller: Received EOF message from componentId: "
            + tuple.getSourceComponent()
            + ", taskId: "
            + tuple.getSourceTask());
    _numberRegisteredTasks--;
    LOG.info("TopologyKiller: " + _numberRegisteredTasks + " remaining");
    if (_numberRegisteredTasks == 0) {
      LOG.info("TopologyKiller: Received EOF from all the registered tasks. Killing cluster...");
      // EVENT WHEN ALL THE SPOUTS FINISHED EMITTING AND ACKED or
      //  WHEN ALL THE TASKS FROM THE LAST COMPONENTS SENT EOF SIGNAL
      // Instrument all the components for which printOut is set to dump their results
      _collector.emit(
          SystemParameters.DUMP_RESULTS_STREAM, new Values(SystemParameters.DUMP_RESULTS));

      long timeout = SystemParameters.LOCAL_SLEEP_BEFORE_KILL_MILLIS;
      if (SystemParameters.getBoolean(_conf, "DIP_DISTRIBUTED")) {
        // write down statistics (the same which is shown in Storm UI web interface)
        StormWrapper.writeStats(_conf);
        timeout = SystemParameters.CLUSTER_SLEEP_BEFORE_KILL_MILLIS;
      }
      if (SystemParameters.getBoolean(_conf, "DIP_KILL_AT_THE_END")) {
        /*  Give enough time to dump the results
         *  We couldn't use Storm ack mechanism for dumping results,
         *    since our final result might be on Spout (StormDataSource).
         *    Spouts cannot send ack to other spout (TopologyKiller spout).
         *    They use EOF boolean to indicate when done.
         */
        Utils.sleep(timeout);
        StormWrapper.killExecution(_conf);
      }
    }
  }
Esempio n. 2
0
  // String[] sizes: {"1G", "2G", "4G", ...}
  public Map createConfig(String parserConfPath) {
    Map map = SystemParameters.fileToMap(parserConfPath);

    if (!SystemParameters.getBoolean(map, "DIP_ACK_EVERY_TUPLE")) {
      // we don't ack after each tuple is sent,
      //  so we don't need any node to be dedicated for acking
      CLUSTER_ACKERS = 0;
      LOCAL_ACKERS = 0;
    }

    if (SystemParameters.getBoolean(map, "DIP_DISTRIBUTED")) {
      // default value is already set, but for scheduling we might need to change that
      // SystemParameters.putInMap(map, "DIP_NUM_WORKERS", CLUSTER_WORKERS);
      SystemParameters.putInMap(map, "DIP_NUM_ACKERS", CLUSTER_ACKERS);
    } else {
      SystemParameters.putInMap(map, "DIP_NUM_ACKERS", LOCAL_ACKERS);
    }

    String dbSize = SystemParameters.getString(map, "DIP_DB_SIZE") + "G";
    String dataRoot = SystemParameters.getString(map, "DIP_DATA_ROOT");
    String dataPath = dataRoot + "/" + dbSize + "/";

    SystemParameters.putInMap(map, "DIP_DATA_PATH", dataPath);

    return map;
  }
Esempio n. 3
0
 private Optimizer pickOptimizer(Map map) {
   String optStr = SystemParameters.getString(map, "DIP_OPTIMIZER_TYPE");
   System.out.println("Selected optimizer: " + optStr);
   if ("INDEX_SIMPLE".equalsIgnoreCase(optStr)) {
     // Simple optimizer provides lefty plans
     return new IndexSimpleOptimizer(map);
   } else if ("INDEX_RULE_BUSHY".equalsIgnoreCase(optStr)) {
     return new IndexRuleOptimizer(map);
   } else if ("NAME_MANUAL_PAR_LEFTY".equalsIgnoreCase(optStr)) {
     return new NameManualParOptimizer(map);
   } else if ("NAME_MANUAL_COST_LEFTY".equalsIgnoreCase(optStr)) {
     return new NameManualOptimizer(map);
   } else if ("NAME_RULE_LEFTY".equalsIgnoreCase(optStr)) {
     return new NameRuleOptimizer(map);
   } else if ("NAME_COST_LEFTY".equalsIgnoreCase(optStr)) {
     return new NameCostOptimizer(map);
   }
   throw new RuntimeException("Unknown " + optStr + " optimizer!");
 }