/** * Process a cloudlet (job) return event. * * @param ev a SimEvent object * @pre ev != $null * @post $none */ @Override protected void processCloudletReturn(SimEvent ev) { Cloudlet cloudlet = (Cloudlet) ev.getData(); Job job = (Job) cloudlet; /** Generate a failure if failure rate is not zeros. */ FailureGenerator.generate(job); getCloudletReceivedList().add(cloudlet); getCloudletSubmittedList().remove(cloudlet); CondorVM vm = (CondorVM) getVmsCreatedList().get(cloudlet.getVmId()); // so that this resource is released vm.setState(WorkflowSimTags.VM_STATUS_IDLE); double delay = 0.0; if (Parameters.getOverheadParams().getPostDelay() != null) { delay = Parameters.getOverheadParams().getPostDelay(job); } schedule(this.workflowEngineId, delay, CloudSimTags.CLOUDLET_RETURN, cloudlet); cloudletsSubmitted--; // not really update right now, should wait 1 s until many jobs have returned schedule(this.getId(), 0.0, WorkflowSimTags.CLOUDLET_UPDATE); }
/** Creates main() to run this example This example has only one datacenter and one storage */ public static void main(String[] args) { try { // First step: Initialize the WorkflowSim package. /** * However, the exact number of vms may not necessarily be vmNum If the data center or the * host doesn't have sufficient resources the exact vmNum would be smaller than that. Take * care. */ int vmNum = 20; // number of vms; /** Should change this based on real physical path */ String daxPath = "/Users/chenweiwei/Work/WorkflowSim-1.0/config/dax/Montage_100.xml"; if (daxPath == null) { Log.printLine( "Warning: Please replace daxPath with the physical path in your working environment!"); return; } File daxFile = new File(daxPath); if (!daxFile.exists()) { Log.printLine( "Warning: Please replace daxPath with the physical path in your working environment!"); return; } /* * Use default Fault Tolerant Parameters */ Parameters.FTCMonitor ftc_monitor = Parameters.FTCMonitor.MONITOR_NONE; Parameters.FTCFailure ftc_failure = Parameters.FTCFailure.FAILURE_NONE; Parameters.FTCluteringAlgorithm ftc_method = null; /** * Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID * such that the planner would not override the result of the scheduler */ Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN; Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID; ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED; /** * clustering delay must be added, if you don't need it, you can set all the clustering delay * to be zero, but not null */ Map<Integer, Double> clusteringDelay = new HashMap(); /** Montage has at most 11 horizontal levels */ int maxLevel = 11; for (int level = 0; level < maxLevel; level++) { clusteringDelay.put( level, 1.0); // the clustering delay specified to each level is 1.0 seconds } // Add clustering delay to the overhead parameters OverheadParameters op = new OverheadParameters(0, null, null, null, clusteringDelay, 0); ; /** Horizontal Clustering */ ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.HORIZONTAL; /** * You can only specify clusters.num or clusters.size clusters.num is the number of clustered * jobs per horizontal level clusters.size is the number of tasks per clustered job * clusters.num * clusters.size = the number of tasks per horizontal level In this case, we * specify the clusters.num = 20, which means we have 20 jobs per level */ ClusteringParameters cp = new ClusteringParameters(20, 0, method, null); /** Initialize static parameters */ Parameters.init( ftc_method, ftc_monitor, ftc_failure, null, vmNum, daxPath, null, null, op, cp, sch_method, pln_method, null, 0); ReplicaCatalog.init(file_system); FailureMonitor.init(); FailureGenerator.init(); // before creating any entities. int num_user = 1; // number of grid users Calendar calendar = Calendar.getInstance(); boolean trace_flag = false; // mean trace events // Initialize the CloudSim library CloudSim.init(num_user, calendar, trace_flag); DatacenterExtended datacenter0 = createDatacenter("Datacenter_0"); /** Create a WorkflowPlanner with one schedulers. */ WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1); /** Create a WorkflowEngine. */ WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine(); /** * Create a list of VMs.The userId of a vm is basically the id of the scheduler that controls * this vm. */ List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum()); /** Submits this list of vms to this WorkflowEngine. */ wfEngine.submitVmList(vmlist0, 0); /** Binds the data centers with the scheduler. */ wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0); CloudSim.startSimulation(); List<Job> outputList0 = wfEngine.getJobsReceivedList(); CloudSim.stopSimulation(); printJobList(outputList0); } catch (Exception e) { Log.printLine("The simulation has been terminated due to an unexpected error"); } }
/** Creates main() to run this example This example has only one datacenter and one storage */ public static void main(String[] args) { try { // First step: Initialize the WorkflowSim package. /** * However, the exact number of vms may not necessarily be vmNum If the data center or the * host doesn't have sufficient resources the exact vmNum would be smaller than that. Take * care. */ int vmNum = 20; // number of vms; /** Should change this based on real physical path */ String daxPath = "/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml"; File daxFile = new File(daxPath); if (!daxFile.exists()) { Log.printLine( "Warning: Please replace daxPath with the physical path in your working environment!"); return; } /* * Fault Tolerant Parameters */ /** * MONITOR_JOB classifies failures based on the level of jobs; MONITOR_VM classifies failures * based on the vm id; MOINTOR_ALL does not do any classification; MONITOR_NONE does not * record any failiure. */ FailureParameters.FTCMonitor ftc_monitor = FailureParameters.FTCMonitor.MONITOR_JOB; /** Similar to FTCMonitor, FTCFailure controls the way how we generate failures. */ FailureParameters.FTCFailure ftc_failure = FailureParameters.FTCFailure.FAILURE_JOB; /** In this example, we have horizontal clustering and we use Dynamic Reclustering. */ FailureParameters.FTCluteringAlgorithm ftc_method = FailureParameters.FTCluteringAlgorithm.FTCLUSTERING_DR; /** Task failure rate for each level */ int maxLevel = 11; // most workflows we use has a maximum of 11 levels DistributionGenerator[][] failureGenerators = new DistributionGenerator[vmNum][maxLevel]; for (int level = 0; level < maxLevel; level++) { /* * For simplicity, set the task failure rate of each level to be 0.1. Which means 10% * of submitted tasks will fail. It doesn't have to be the same task * failure rate at each level. */ DistributionGenerator generator = new DistributionGenerator( DistributionGenerator.DistributionFamily.WEIBULL, 100, 1.0, 30, 300, 0.78); for (int vmId = 0; vmId < vmNum; vmId++) { failureGenerators[vmId][level] = generator; } } /** * Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID * such that the planner would not override the result of the scheduler */ Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN; Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID; ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED; /** No overheads */ OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0); /** No Clustering */ ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE; ClusteringParameters cp = new ClusteringParameters(0, 0, method, null); /** Initialize static parameters */ FailureParameters.init(ftc_method, ftc_monitor, ftc_failure, failureGenerators); Parameters.init(vmNum, daxPath, null, null, op, cp, sch_method, pln_method, null, 0); ReplicaCatalog.init(file_system); FailureMonitor.init(); FailureGenerator.init(); // before creating any entities. int num_user = 1; // number of grid users Calendar calendar = Calendar.getInstance(); boolean trace_flag = false; // mean trace events // Initialize the CloudSim library CloudSim.init(num_user, calendar, trace_flag); WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0"); /** Create a WorkflowPlanner with one schedulers. */ WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1); /** Create a WorkflowEngine. */ WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine(); /** * Create a list of VMs.The userId of a vm is basically the id of the scheduler that controls * this vm. */ List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum()); /** Submits this list of vms to this WorkflowEngine. */ wfEngine.submitVmList(vmlist0, 0); /** Binds the data centers with the scheduler. */ wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0); CloudSim.startSimulation(); List<Job> outputList0 = wfEngine.getJobsReceivedList(); CloudSim.stopSimulation(); printJobList(outputList0); } catch (Exception e) { Log.printLine("The simulation has been terminated due to an unexpected error"); } }