/** * Configuration parameters for an interval are: * * <ul> * <li><code>[@left] (double)</code> Left extremum * <li><code>[@right] (double)</code> Right extremum * <li><code>[@closure] (String)</code> Interval closure. Supported values are "closed-closed", * "closed-open", "open-closed" and "open-open". Default value is "closed-closed". * </ul> */ public void configure(Configuration configuration) { // Get left extremum double left = configuration.getDouble("[@left]"); // Set left extremum setLeft(left); // Get right extremum double right = configuration.getDouble("[@right]"); // Set right extremum setRight(right); // Get closure string String closureString = configuration.getString("[@closure]", "closed-closed"); // Convert closureString Closure closure; if (closureString.equals("closed-closed")) { closure = Closure.ClosedClosed; } else if (closureString.equals("open-open")) { closure = Closure.OpenOpen; } else if (closureString.equals("closed-open")) { closure = Closure.ClosedOpen; } else if (closureString.equals("open-closed")) { closure = Closure.OpenClosed; } else { throw new ConfigurationRuntimeException("Illegal value for interval closure"); } // Set closure setClosure(closure); }
@Override public void configure(Configuration conf) { super.configure(conf); if (conf.containsKey("Max")) { this.max = conf.getDouble("Max"); } else { ConfWarning w = new ConfWarning("Max", this.max); } }
/** * Reads GMPE logic tree data and returns a {@link GmpeLogicTreeData} containing {@link LogicTree} * object(s) defining epistemic uncertainties on GMPES. */ public GmpeLogicTreeData createGmpeLogicTreeData() throws IOException { // read GMPE params from config file. Distinguish between reading from // file or kvs. Then read and instantiate logic tree objects using a // GmpeLogicTreeData object. String component = config.getString(ConfigItems.COMPONENT.name()); String intensityMeasureType = config.getString(ConfigItems.INTENSITY_MEASURE_TYPE.name()); Double period = config.getDouble(ConfigItems.PERIOD.name()); Double damping = config.getDouble(ConfigItems.DAMPING.name()); String gmpeTruncationType = config.getString(ConfigItems.GMPE_TRUNCATION_TYPE.name()); Double truncationLevel = config.getDouble(ConfigItems.TRUNCATION_LEVEL.name()); String standardDeviationType = config.getString(ConfigItems.STANDARD_DEVIATION_TYPE.name()); Double referenceVs30Value = config.getDouble(ConfigItems.REFERENCE_VS30_VALUE.name()); // instantiate eventually GmpeLogicTreeData gmpeLogicTree = null; if (hasPath == true) { String relativePath = getRelativePath(ConfigItems.GMPE_LOGIC_TREE_FILE.name()); gmpeLogicTree = new GmpeLogicTreeData(relativePath); gmpeLogicTree.parse_tree( component, intensityMeasureType, period, damping, gmpeTruncationType, truncationLevel, standardDeviationType, referenceVs30Value); } else { String gmpeSha = config.getString(ConfigItems.GMPE_LOGIC_TREE_FILE.name()); gmpeLogicTree = new GmpeLogicTreeData(kvs, gmpeSha); gmpeLogicTree.parse_tree( component, intensityMeasureType, period, damping, gmpeTruncationType, truncationLevel, standardDeviationType, referenceVs30Value); } return gmpeLogicTree; }
@Override public void configure(Configuration conf) { if (conf.containsKey("MaxNumRestarts")) { this.max_num_restarts = conf.getInt("MaxNumRestarts"); } else { ConfWarning w = new ConfWarning( this.getClass().getSimpleName() + ".MaxNumRestarts", this.max_num_restarts); w.warn(); } if (conf.containsKey("IncrPopFactor")) { this.incr_factor = conf.getDouble("IncrPopFactor"); } else { ConfWarning w = new ConfWarning(this.getClass().getSimpleName() + ".IncrPopFactor", this.incr_factor); w.warn(); } }
/** * Retrieves weight for the axis * * @return the weight if exists, the default value otherwise */ protected double computeAxisWeight() { return configuration.getDouble(axisWeight, Double.valueOf(defaultAxisWeight)); }
@Override public void init(Configuration config) throws ConfigurationException { setSpamdHost(config.getString("spamdHost", "localhost")); setSpamdPort(config.getInt("spamdPort", 783)); setSpamdRejectionHits(config.getDouble("spamdRejectionHits", 0.0)); }
/** * Set the GEM1ERF params given the parameters defined in * * @param erf : erf for which parameters have to be set * @param calcConfig : calculator configuration obejct containing parameters for the ERF */ public void setGEM1ERFParams(GEM1ERF erf) { // set minimum magnitude /* * xxr: TODO: !!!type safety!!! apache's Configuration interface handles * a similar problem this way: Instead of defining one single method * like public void setParameter(String key, Object value) {...} there * is one method per type defined: setString(), setDouble(), setInt(), * ... */ erf.setParameter(GEM1ERF.MIN_MAG_NAME, config.getDouble(ConfigItems.MINIMUM_MAGNITUDE.name())); // set time span TimeSpan timeSpan = new TimeSpan(TimeSpan.NONE, TimeSpan.YEARS); timeSpan.setDuration(config.getDouble(ConfigItems.INVESTIGATION_TIME.name())); erf.setTimeSpan(timeSpan); // params for area source // set inclusion of area sources in the calculation erf.setParameter( GEM1ERF.INCLUDE_AREA_SRC_PARAM_NAME, config.getBoolean(ConfigItems.INCLUDE_AREA_SOURCES.name())); // set rupture type ("area source rupture model / // area_source_rupture_model / AreaSourceRuptureModel) erf.setParameter( GEM1ERF.AREA_SRC_RUP_TYPE_NAME, config.getString(ConfigItems.TREAT_AREA_SOURCE_AS.name())); // set area discretization erf.setParameter( GEM1ERF.AREA_SRC_DISCR_PARAM_NAME, config.getDouble(ConfigItems.AREA_SOURCE_DISCRETIZATION.name())); // set mag-scaling relationship erf.setParameter( GEM1ERF.AREA_SRC_MAG_SCALING_REL_PARAM_NAME, config.getString(ConfigItems.AREA_SOURCE_MAGNITUDE_SCALING_RELATIONSHIP.name())); // params for grid source // inclusion of grid sources in the calculation erf.setParameter( GEM1ERF.INCLUDE_GRIDDED_SEIS_PARAM_NAME, config.getBoolean(ConfigItems.INCLUDE_GRID_SOURCES.name())); // rupture model erf.setParameter( GEM1ERF.GRIDDED_SEIS_RUP_TYPE_NAME, config.getString(ConfigItems.TREAT_GRID_SOURCE_AS.name())); // mag-scaling relationship erf.setParameter( GEM1ERF.GRIDDED_SEIS_MAG_SCALING_REL_PARAM_NAME, config.getString(ConfigItems.AREA_SOURCE_MAGNITUDE_SCALING_RELATIONSHIP.name())); // params for fault source // inclusion of fault sources in the calculation erf.setParameter( GEM1ERF.INCLUDE_FAULT_SOURCES_PARAM_NAME, config.getBoolean(ConfigItems.INCLUDE_FAULT_SOURCE.name())); // rupture offset erf.setParameter( GEM1ERF.FAULT_RUP_OFFSET_PARAM_NAME, config.getDouble(ConfigItems.FAULT_RUPTURE_OFFSET.name())); // surface discretization erf.setParameter( GEM1ERF.FAULT_DISCR_PARAM_NAME, config.getDouble(ConfigItems.FAULT_SURFACE_DISCRETIZATION.name())); // mag-scaling relationship erf.setParameter( GEM1ERF.FAULT_MAG_SCALING_REL_PARAM_NAME, config.getString(ConfigItems.FAULT_MAGNITUDE_SCALING_RELATIONSHIP.name())); // mag-scaling sigma erf.setParameter( GEM1ERF.FAULT_SCALING_SIGMA_PARAM_NAME, config.getDouble(ConfigItems.FAULT_MAGNITUDE_SCALING_SIGMA.name())); // rupture aspect ratio erf.setParameter( GEM1ERF.FAULT_RUP_ASPECT_RATIO_PARAM_NAME, config.getDouble(ConfigItems.RUPTURE_ASPECT_RATIO.name())); // rupture floating type erf.setParameter( GEM1ERF.FAULT_FLOATER_TYPE_PARAM_NAME, config.getString(ConfigItems.RUPTURE_FLOATING_TYPE.name())); // params for subduction fault // inclusion of fault sources in the calculation erf.setParameter( GEM1ERF.INCLUDE_SUBDUCTION_SOURCES_PARAM_NAME, config.getBoolean(ConfigItems.INCLUDE_SUBDUCTION_FAULT_SOURCE.name())); // rupture offset erf.setParameter( GEM1ERF.SUB_RUP_OFFSET_PARAM_NAME, config.getDouble(ConfigItems.SUBDUCTION_FAULT_RUPTURE_OFFSET.name())); // surface discretization erf.setParameter( GEM1ERF.SUB_DISCR_PARAM_NAME, config.getDouble(ConfigItems.SUBDUCTION_FAULT_SURFACE_DISCRETIZATION.name())); // mag-scaling relationship erf.setParameter( GEM1ERF.SUB_MAG_SCALING_REL_PARAM_NAME, config.getString(ConfigItems.SUBDUCTION_FAULT_MAGNITUDE_SCALING_RELATIONSHIP.name())); // mag-scaling sigma erf.setParameter( GEM1ERF.SUB_SCALING_SIGMA_PARAM_NAME, config.getDouble(ConfigItems.SUBDUCTION_FAULT_MAGNITUDE_SCALING_SIGMA.name())); // rupture aspect ratio erf.setParameter( GEM1ERF.SUB_RUP_ASPECT_RATIO_PARAM_NAME, config.getDouble(ConfigItems.SUBDUCTION_RUPTURE_ASPECT_RATIO.name())); // rupture floating type erf.setParameter( GEM1ERF.SUB_FLOATER_TYPE_PARAM_NAME, config.getString(ConfigItems.SUBDUCTION_RUPTURE_FLOATING_TYPE.name())); // update erf.updateForecast(); } // setGEM1ERFParams()
/** * Generate N source models (each represented by an array list of GEMSourceData objects), by * randomly sampling the source model logic tree. * * @param lt : source model logic tree * @param N : number of models to be generated * @param seed : seed number for the random number generator * @return */ public List<GEMSourceData> sampleSourceModelLogicTree( LogicTree<ArrayList<GEMSourceData>> lt, long seed) { List<GEMSourceData> srcList = null; Random rn = new Random(seed); // sample first branching level to get the starting source model int branchNumber = lt.sampleBranchingLevel(0, rn); LogicTreeBranch branch = lt.getBranchingLevel(0).getBranch(branchNumber - 1); if (branch.getNameInputFile() != null) { String sourceName = null; if (hasPath) { // job from file sourceName = configFilesPath() + branch.getNameInputFile(); } else { // job from kvs sourceName = FilenameUtils.concat(config.getString("BASE_PATH"), branch.getNameInputFile()); } SourceModelReader sourceModelReader = new SourceModelReader(sourceName, config.getDouble(ConfigItems.WIDTH_OF_MFD_BIN.name())); // load sources srcList = sourceModelReader.read(); } else { String msg = "The first branching level of the ERF logic tree does" + " not contain a source model!!\n" + "Please correct your input!\n Execution stopped!"; logger.info(msg); throw new IllegalArgumentException(msg); } // loop over sources // for each source, loop over remaining branching levels and apply // uncertainties int numBranchingLevels = lt.getBranchingLevelsList().size(); int sourceIndex = 0; for (GEMSourceData src : srcList) { for (int i = 1; i < numBranchingLevels; i++) { // sample the current branching level branchNumber = lt.sampleBranchingLevel(i, rn); // get the sampled branch branch = lt.getBranchingLevel(i).getBranch(branchNumber - 1); if (branch.getRule() != null) { // at the moment we apply rules to all source // typologies. In // the future we may want // to apply some filter (i.e. apply rule to this source // type // only...) // if area source if (src instanceof GEMAreaSourceData) { // replace the old source with the new source // accordingly to the rule srcList.set( sourceIndex, applyRuleToAreaSource((GEMAreaSourceData) src, branch.getRule())); } // if point source if (src instanceof GEMPointSourceData) { // replace the old source with the new source // accordingly to the rule srcList.set( sourceIndex, applyRuleToPointSource((GEMPointSourceData) src, branch.getRule())); } // if fault source if (src instanceof GEMFaultSourceData) { // replace the old source with the new source // accordingly to the rule srcList.set( sourceIndex, applyRuleToFaultSource((GEMFaultSourceData) src, branch.getRule())); } // if subduction source if (src instanceof GEMSubductionFaultSourceData) { // replace the old source with the new source // accordingly to the rule srcList.set( sourceIndex, applyRuleToSubductionFaultSource( (GEMSubductionFaultSourceData) src, branch.getRule())); } } else { // rule is not defined: String msg = "No rule is defined at branching level: " + i + "\n" + "Please correct your input!\n" + "Execution stopped!"; logger.info(msg); throw new IllegalArgumentException(msg); } // end if no rule is defined } // end loop over branching levels sourceIndex = sourceIndex + 1; } // end loop over sources return srcList; }
public void run(String[] args) { try { OptionParser parser = new OptionParser(); parser.accepts("c", "configuration file").withRequiredArg().ofType(String.class); parser.accepts("help", "print help statement"); OptionSet options = parser.parse(args); if (options.has("help")) { parser.printHelpOn(System.out); System.exit(-1); } // Logger configuration: log to the console BasicConfigurator.configure(); LOG.setLevel(Level.DEBUG); Configuration conf = new PropertiesConfiguration(); if (options.has("c")) { String configFile = (String) options.valueOf("c"); conf = new PropertiesConfiguration(configFile); } double warmup_lambda = conf.getDouble("warmup_job_arrival_rate_s", DEFAULT_WARMUP_JOB_ARRIVAL_RATE_S); int warmup_duration_s = conf.getInt("warmup_s", DEFAULT_WARMUP_S); int post_warmup_s = conf.getInt("post_warmup_s", DEFAULT_POST_WARMUP_S); // We use this to represent the the rate to fully load the cluster. This is a hack. double lambda = conf.getDouble("job_arrival_rate_s", DEFAULT_JOB_ARRIVAL_RATE_S); int experiment_duration_s = conf.getInt("experiment_s", DEFAULT_EXPERIMENT_S); LOG.debug( "Using arrival rate of " + lambda + " tasks per second and running experiment for " + experiment_duration_s + " seconds."); int tasksPerJob = conf.getInt("tasks_per_job", DEFAULT_TASKS_PER_JOB); int numPreferredNodes = conf.getInt("num_preferred_nodes", DEFAULT_NUM_PREFERRED_NODES); LOG.debug("Using " + numPreferredNodes + " preferred nodes for each task."); int benchmarkIterations = conf.getInt("benchmark.iterations", DEFAULT_BENCHMARK_ITERATIONS); int benchmarkId = conf.getInt("benchmark.id", DEFAULT_TASK_BENCHMARK); List<String> backends = new ArrayList<String>(); if (numPreferredNodes > 0) { /* Attempt to parse the list of slaves, which we'll need to (randomly) select preferred * nodes. */ if (!conf.containsKey(BACKENDS)) { LOG.fatal( "Missing configuration backend list, which is needed to randomly select " + "preferred nodes (num_preferred_nodes set to " + numPreferredNodes + ")"); } for (String node : conf.getStringArray(BACKENDS)) { backends.add(node); } if (backends.size() < numPreferredNodes) { LOG.fatal("Number of backends smaller than number of preferred nodes!"); } } List<SubExperiment> experiments = new ArrayList<SubExperiment>(); double fullyUtilizedArrivalRate = lambda; // For the first twenty seconds, the first user submits at a rate to fully utilize the // cluster. List<UserInfo> onlyUser0 = new ArrayList<UserInfo>(); onlyUser0.add(new UserInfo("user0", 1, 0)); experiments.add(new SubExperiment(onlyUser0, 20, fullyUtilizedArrivalRate)); // For the next 10 seconds, user1 increases her rate to 25% of the cluster. List<UserInfo> user1QuarterDemand = new ArrayList<UserInfo>(); user1QuarterDemand.add(new UserInfo("user0", 4, 0)); user1QuarterDemand.add(new UserInfo("user1", 5, 0)); experiments.add(new SubExperiment(user1QuarterDemand, 10, 1.25 * fullyUtilizedArrivalRate)); // For the next 10 seconds, user 1 increases her rate to 50% of the cluster (using exactly // her share, but no more). List<UserInfo> user1HalfDemand = new ArrayList<UserInfo>(); user1HalfDemand.add(new UserInfo("user0", 2, 0)); user1HalfDemand.add(new UserInfo("user1", 3, 0)); experiments.add(new SubExperiment(user1HalfDemand, 10, 1.5 * fullyUtilizedArrivalRate)); // Next user 1 goes back down to 25%. experiments.add(new SubExperiment(user1QuarterDemand, 10, 1.25 * fullyUtilizedArrivalRate)); // Finally user 1 goes back to 0. experiments.add(new SubExperiment(onlyUser0, 20, fullyUtilizedArrivalRate)); SparrowFrontendClient client = new SparrowFrontendClient(); int schedulerPort = conf.getInt("scheduler_port", SchedulerThrift.DEFAULT_SCHEDULER_THRIFT_PORT); client.initialize(new InetSocketAddress("localhost", schedulerPort), APPLICATION_ID, this); if (warmup_duration_s > 0) { List<SubExperiment> warmupExperiment = new ArrayList<SubExperiment>(); List<UserInfo> warmupUsers = new ArrayList<UserInfo>(); warmupUsers.add(new UserInfo("warmupUser", 1, 0)); warmupExperiment.add(new SubExperiment(warmupUsers, warmup_duration_s, warmup_lambda)); LOG.debug( "Warming up for " + warmup_duration_s + " seconds at arrival rate of " + warmup_lambda + " jobs per second"); launchTasks( warmupExperiment, tasksPerJob, numPreferredNodes, benchmarkIterations, benchmarkId, backends, client); LOG.debug( "Waiting for queues to drain after warmup (waiting " + post_warmup_s + " seconds)"); Thread.sleep(post_warmup_s * 1000); } LOG.debug("Launching experiment for " + experiment_duration_s + " seconds"); launchTasks( experiments, tasksPerJob, numPreferredNodes, benchmarkIterations, benchmarkId, backends, client); } catch (Exception e) { LOG.error("Fatal exception", e); } }