/**
   * Process a cloudlet (job) return event.
   *
   * @param ev a SimEvent object
   * @pre ev != $null
   * @post $none
   */
  @Override
  protected void processCloudletReturn(SimEvent ev) {
    Cloudlet cloudlet = (Cloudlet) ev.getData();
    Job job = (Job) cloudlet;

    /** Generate a failure if failure rate is not zeros. */
    FailureGenerator.generate(job);

    getCloudletReceivedList().add(cloudlet);
    getCloudletSubmittedList().remove(cloudlet);

    CondorVM vm = (CondorVM) getVmsCreatedList().get(cloudlet.getVmId());
    // so that this resource is released
    vm.setState(WorkflowSimTags.VM_STATUS_IDLE);

    double delay = 0.0;
    if (Parameters.getOverheadParams().getPostDelay() != null) {
      delay = Parameters.getOverheadParams().getPostDelay(job);
    }
    schedule(this.workflowEngineId, delay, CloudSimTags.CLOUDLET_RETURN, cloudlet);

    cloudletsSubmitted--;
    // not really update right now, should wait 1 s until many jobs have returned
    schedule(this.getId(), 0.0, WorkflowSimTags.CLOUDLET_UPDATE);
  }
  /**
   * Update a cloudlet (job)
   *
   * @param ev a simEvent object
   */
  protected void processCloudletUpdate(SimEvent ev) {

    BaseSchedulingAlgorithm scheduler = getScheduler(Parameters.getSchedulingAlgorithm());
    scheduler.setCloudletList(getCloudletList());
    scheduler.setVmList(getVmsCreatedList());

    try {
      scheduler.run();
    } catch (Exception e) {
      Log.printLine("Error in configuring scheduler_method");
      e.printStackTrace();
    }

    List<Cloudlet> scheduledList = scheduler.getScheduledList();
    for (Cloudlet cloudlet : scheduledList) {
      int vmId = cloudlet.getVmId();
      double delay = 0.0;
      if (Parameters.getOverheadParams().getQueueDelay() != null) {
        delay = Parameters.getOverheadParams().getQueueDelay(cloudlet);
      }
      schedule(getVmsToDatacentersMap().get(vmId), delay, CloudSimTags.CLOUDLET_SUBMIT, cloudlet);
    }
    getCloudletList().removeAll(scheduledList);
    getCloudletSubmittedList().addAll(scheduledList);
    cloudletsSubmitted += scheduledList.size();
  }
  /**
   * Initialize a WorkflowParser
   *
   * @param userId the user id. Currently we have just checked single user mode
   */
  public WorkflowParser(int userId) {
    this.userId = userId;
    this.mName2Task = new HashMap<String, Task>();
    this.daxPath = Parameters.getDaxPath();
    this.daxPaths = Parameters.getDAXPaths();
    this.jobIdStartsFrom = 1;

    setTaskList(new ArrayList<Task>());
  }
  /** The main function */
  @Override
  public void run() {

    for (Iterator it = getTaskList().iterator(); it.hasNext(); ) {
      Task task = (Task) it.next();
      double duration = task.getCloudletLength() / 1000;

      for (int i = 0; i < task.getParentList().size(); i++) {
        Task parent = task.getParentList().get(i);
      }

      for (int i = 0; i < task.getChildList().size(); i++) {
        Task child = task.getChildList().get(i);
      }

      int vmNum = getVmList().size();
      /** Randomly choose a vm */
      Random random = new Random((long) duration);
      int vmId = random.nextInt(vmNum);

      CondorVM vm = (CondorVM) getVmList().get(vmId);
      // This shows the cpu capability of a vm
      double mips = vm.getMips();

      task.setVmId(vm.getId());

      long deadline = Parameters.getDeadline();
    }
  }
  /** Parse a DAX file with jdom */
  private void parseXmlFile(String path) {

    try {

      SAXBuilder builder = new SAXBuilder();
      // parse using builder to get DOM representation of the XML file
      Document dom = builder.build(new File(path));
      Element root = dom.getRootElement();
      List list = root.getChildren();
      for (Iterator it = list.iterator(); it.hasNext(); ) {
        Element node = (Element) it.next();
        if (node.getName().toLowerCase().equals("job")) {

          long length = 0;
          String nodeName = node.getAttributeValue("id");
          String nodeType = node.getAttributeValue("name");

          /**
           * capture runtime. If not exist, by default the runtime is 0.1. Otherwise CloudSim would
           * ignore this task. BUG/#11
           */
          double runtime = 0.1;
          if (node.getAttributeValue("runtime") != null) {
            String nodeTime = node.getAttributeValue("runtime");
            runtime = 1000 * Double.parseDouble(nodeTime);
            if (runtime < 100) {
              runtime = 100;
            }
            length = (long) runtime;
          } else {
            Log.printLine("Cannot find runtime for " + nodeName + ",set it to be 0");
          }
          // multiple the scale, by default it is 1.0
          length *= Parameters.getRuntimeScale();

          List fileList = node.getChildren();

          List mFileList = new ArrayList<org.cloudbus.cloudsim.File>();

          for (Iterator itf = fileList.iterator(); itf.hasNext(); ) {
            Element file = (Element) itf.next();
            if (file.getName().toLowerCase().equals("uses")) {
              String fileName = file.getAttributeValue("name"); // DAX version 3.3
              if (fileName == null) {
                fileName = file.getAttributeValue("file"); // DAX version 3.0
              }
              if (fileName == null) {
                Log.print("Error in parsing xml");
              }

              String inout = file.getAttributeValue("link");
              double size = 0.0;

              String fileSize = file.getAttributeValue("size");
              if (fileSize != null) {
                size = Double.parseDouble(fileSize) /*/ 1024*/;
              } else {
                Log.printLine("File Size not found for " + fileName);
              }

              /** a bug of cloudsim, size 0 causes a problem. 1 is ok. */
              if (size == 0) {
                size++;
              }
              /** Sets the file type 1 is input 2 is output */
              int type = 0;
              if (inout.equals("input")) {
                type = Parameters.FileType.INPUT.value;
              } else if (inout.equals("output")) {
                type = Parameters.FileType.OUTPUT.value;
              } else {
                Log.printLine("Parsing Error");
              }
              org.cloudbus.cloudsim.File tFile;
              /*
               * Already exists an input file (forget output file)
               */
              if (size < 0) {
                /*
                 * Assuming it is a parsing error
                 */
                size = 0 - size;
                Log.printLine("Size is negative, I assume it is a parser error");
              }
              if (type == Parameters.FileType.OUTPUT.value) {
                /** It is good that CloudSim does tell whether a size is zero */
                tFile = new org.cloudbus.cloudsim.File(fileName, (int) size);
              } else if (ReplicaCatalog.containsFile(fileName)) {
                tFile = ReplicaCatalog.getFile(fileName);
              } else {

                tFile = new org.cloudbus.cloudsim.File(fileName, (int) size);
                ReplicaCatalog.setFile(fileName, tFile);
              }

              tFile.setType(type);
              mFileList.add(tFile);
            }
          }
          Task task;
          // In case of multiple workflow submission. Make sure the jobIdStartsFrom is consistent.
          synchronized (this) {
            task = new Task(this.jobIdStartsFrom, length);
            this.jobIdStartsFrom++;
          }
          task.setType(nodeType);

          task.setUserId(userId);
          mName2Task.put(nodeName, task);

          for (Iterator itm = mFileList.iterator(); itm.hasNext(); ) {
            org.cloudbus.cloudsim.File file = (org.cloudbus.cloudsim.File) itm.next();
            task.addRequiredFile(file.getName());
          }

          task.setFileList(mFileList);
          this.getTaskList().add(task);

          /** Add dependencies info. */
        } else if (node.getName().toLowerCase().equals("child")) {
          List pList = node.getChildren();
          String childName = node.getAttributeValue("ref");
          if (mName2Task.containsKey(childName)) {

            Task childTask = (Task) mName2Task.get(childName);

            for (Iterator itc = pList.iterator(); itc.hasNext(); ) {
              Element parent = (Element) itc.next();
              String parentName = parent.getAttributeValue("ref");
              if (mName2Task.containsKey(parentName)) {
                Task parentTask = (Task) mName2Task.get(parentName);
                parentTask.addChild(childTask);
                childTask.addParent(parentTask);
              }
            }
          }
        }
      }
      /** If a task has no parent, then it is root task. */
      ArrayList roots = new ArrayList<Task>();
      for (Iterator it = mName2Task.values().iterator(); it.hasNext(); ) {
        Task task = (Task) it.next();
        task.setDepth(0);
        if (task.getParentList().isEmpty()) {
          roots.add(task);
        }
      }

      /** Add depth from top to bottom. */
      for (Iterator it = roots.iterator(); it.hasNext(); ) {
        Task task = (Task) it.next();
        setDepth(task, 1);
      }
      /** Clean them so as to save memory. Parsing workflow may take much memory */
      this.mName2Task.clear();

    } catch (JDOMException jde) {
      Log.printLine("JDOM Exception;Please make sure your dax file is valid");

    } catch (IOException ioe) {
      Log.printLine("IO Exception;Please make sure dax.path is correctly set in your config file");

    } catch (Exception e) {
      e.printStackTrace();
      Log.printLine("Parsing Exception");
    }
  }
  /** Creates main() to run this example This example has only one datacenter and one storage */
  public static void main(String[] args) {

    try {
      // First step: Initialize the WorkflowSim package.

      /**
       * However, the exact number of vms may not necessarily be vmNum If the data center or the
       * host doesn't have sufficient resources the exact vmNum would be smaller than that. Take
       * care.
       */
      int vmNum = 20; // number of vms;
      /** Should change this based on real physical path */
      String daxPath = "/Users/chenweiwei/Work/WorkflowSim-1.0/config/dax/Montage_100.xml";
      if (daxPath == null) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      File daxFile = new File(daxPath);
      if (!daxFile.exists()) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      /*
       * Use default Fault Tolerant Parameters
       */
      Parameters.FTCMonitor ftc_monitor = Parameters.FTCMonitor.MONITOR_NONE;
      Parameters.FTCFailure ftc_failure = Parameters.FTCFailure.FAILURE_NONE;
      Parameters.FTCluteringAlgorithm ftc_method = null;

      /**
       * Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID
       * such that the planner would not override the result of the scheduler
       */
      Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
      Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
      ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

      /**
       * clustering delay must be added, if you don't need it, you can set all the clustering delay
       * to be zero, but not null
       */
      Map<Integer, Double> clusteringDelay = new HashMap();
      /** Montage has at most 11 horizontal levels */
      int maxLevel = 11;
      for (int level = 0; level < maxLevel; level++) {
        clusteringDelay.put(
            level, 1.0); // the clustering delay specified to each level is 1.0 seconds
      }
      // Add clustering delay to the overhead parameters
      OverheadParameters op = new OverheadParameters(0, null, null, null, clusteringDelay, 0);
      ;

      /** Horizontal Clustering */
      ClusteringParameters.ClusteringMethod method =
          ClusteringParameters.ClusteringMethod.HORIZONTAL;
      /**
       * You can only specify clusters.num or clusters.size clusters.num is the number of clustered
       * jobs per horizontal level clusters.size is the number of tasks per clustered job
       * clusters.num * clusters.size = the number of tasks per horizontal level In this case, we
       * specify the clusters.num = 20, which means we have 20 jobs per level
       */
      ClusteringParameters cp = new ClusteringParameters(20, 0, method, null);

      /** Initialize static parameters */
      Parameters.init(
          ftc_method,
          ftc_monitor,
          ftc_failure,
          null,
          vmNum,
          daxPath,
          null,
          null,
          op,
          cp,
          sch_method,
          pln_method,
          null,
          0);
      ReplicaCatalog.init(file_system);

      FailureMonitor.init();
      FailureGenerator.init();

      // before creating any entities.
      int num_user = 1; // number of grid users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      DatacenterExtended datacenter0 = createDatacenter("Datacenter_0");

      /** Create a WorkflowPlanner with one schedulers. */
      WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
      /** Create a WorkflowEngine. */
      WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
      /**
       * Create a list of VMs.The userId of a vm is basically the id of the scheduler that controls
       * this vm.
       */
      List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

      /** Submits this list of vms to this WorkflowEngine. */
      wfEngine.submitVmList(vmlist0, 0);

      /** Binds the data centers with the scheduler. */
      wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);

      CloudSim.startSimulation();

      List<Job> outputList0 = wfEngine.getJobsReceivedList();

      CloudSim.stopSimulation();

      printJobList(outputList0);

    } catch (Exception e) {
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }
  @Override
  public void run() {

    if (clusterNum > 0) {
      for (Iterator it = getTaskList().iterator(); it.hasNext(); ) {
        Task task = (Task) it.next();
        TaskSet set = new TaskSet();
        set.addTask(task);
        mTask2TaskSet.put(task, set);
      }
    }

    remove();
    updateTaskSetDependencies();

    printMetrics();
    String code = Parameters.getClusteringParameters().getCode();
    Map<Integer, ArrayList<TaskSet>> map = getCurrentTaskSetAtLevels();
    if (code != null) {
      for (char c : code.toCharArray()) {

        switch (c) {
          case 'v':

            // verticalClustering();
            VerticalBalancing v = new VerticalBalancing(map, this.mTask2TaskSet, this.clusterNum);
            v.run();
            break;
          case 'c':
            // childAwareHorizontalClustering();
            ChildAwareHorizontalClustering ch =
                new ChildAwareHorizontalClustering(map, this.mTask2TaskSet, this.clusterNum);
            ch.run();
            updateTaskSetDependencies();
            break;
          case 'r':
            // horizontalRuntimeBalancing();
            HorizontalRuntimeBalancing r =
                new HorizontalRuntimeBalancing(map, this.mTask2TaskSet, this.clusterNum);
            r.run();
            updateTaskSetDependencies();
            break;
          case 'i':
            HorizontalImpactBalancing i =
                new HorizontalImpactBalancing(map, this.mTask2TaskSet, this.clusterNum);
            i.run();
            break;
          case 'd':
            HorizontalDistanceBalancing d =
                new HorizontalDistanceBalancing(map, this.mTask2TaskSet, this.clusterNum);
            d.run();
            break;
          case 'h':
            HorizontalRandomClustering h =
                new HorizontalRandomClustering(map, this.mTask2TaskSet, this.clusterNum);
            h.run();
            break;
          default:
            break;
        }
      }
      printMetrics();
    }

    printOut();

    Collection sets = mTask2TaskSet.values();
    for (Iterator it = sets.iterator(); it.hasNext(); ) {
      TaskSet set = (TaskSet) it.next();
      if (!set.hasChecked) {
        set.hasChecked = true;
        addTasks2Job(set.getTaskList());
      }
    }
    // a good habit
    cleanTaskSetChecked();

    updateDependencies();
    addClustDelay();

    recover();
  }
  /** Creates main() to run this example This example has only one datacenter and one storage */
  public static void main(String[] args) {

    try {
      // First step: Initialize the WorkflowSim package.
      /**
       * However, the exact number of vms may not necessarily be vmNum If the data center or the
       * host doesn't have sufficient resources the exact vmNum would be smaller than that. Take
       * care.
       */
      int vmNum = 20; // number of vms;
      /** Should change this based on real physical path */
      String daxPath =
          "/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";
      File daxFile = new File(daxPath);
      if (!daxFile.exists()) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      /*
       *  Fault Tolerant Parameters
       */
      /**
       * MONITOR_JOB classifies failures based on the level of jobs; MONITOR_VM classifies failures
       * based on the vm id; MOINTOR_ALL does not do any classification; MONITOR_NONE does not
       * record any failiure.
       */
      FailureParameters.FTCMonitor ftc_monitor = FailureParameters.FTCMonitor.MONITOR_JOB;
      /** Similar to FTCMonitor, FTCFailure controls the way how we generate failures. */
      FailureParameters.FTCFailure ftc_failure = FailureParameters.FTCFailure.FAILURE_JOB;
      /** In this example, we have horizontal clustering and we use Dynamic Reclustering. */
      FailureParameters.FTCluteringAlgorithm ftc_method =
          FailureParameters.FTCluteringAlgorithm.FTCLUSTERING_DR;
      /** Task failure rate for each level */
      int maxLevel = 11; // most workflows we use has a maximum of 11 levels

      DistributionGenerator[][] failureGenerators = new DistributionGenerator[vmNum][maxLevel];

      for (int level = 0; level < maxLevel; level++) {
        /*
         * For simplicity, set the task failure rate of each level to be 0.1. Which means 10%
         * of submitted tasks will fail. It doesn't have to be the same task
         * failure rate at each level.
         */
        DistributionGenerator generator =
            new DistributionGenerator(
                DistributionGenerator.DistributionFamily.WEIBULL, 100, 1.0, 30, 300, 0.78);
        for (int vmId = 0; vmId < vmNum; vmId++) {
          failureGenerators[vmId][level] = generator;
        }
      }
      /**
       * Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID
       * such that the planner would not override the result of the scheduler
       */
      Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
      Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
      ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

      /** No overheads */
      OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);

      /** No Clustering */
      ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
      ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

      /** Initialize static parameters */
      FailureParameters.init(ftc_method, ftc_monitor, ftc_failure, failureGenerators);
      Parameters.init(vmNum, daxPath, null, null, op, cp, sch_method, pln_method, null, 0);
      ReplicaCatalog.init(file_system);

      FailureMonitor.init();
      FailureGenerator.init();

      // before creating any entities.
      int num_user = 1; // number of grid users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0");

      /** Create a WorkflowPlanner with one schedulers. */
      WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
      /** Create a WorkflowEngine. */
      WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
      /**
       * Create a list of VMs.The userId of a vm is basically the id of the scheduler that controls
       * this vm.
       */
      List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

      /** Submits this list of vms to this WorkflowEngine. */
      wfEngine.submitVmList(vmlist0, 0);

      /** Binds the data centers with the scheduler. */
      wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);

      CloudSim.startSimulation();
      List<Job> outputList0 = wfEngine.getJobsReceivedList();
      CloudSim.stopSimulation();
      printJobList(outputList0);
    } catch (Exception e) {
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }
  /** The main function */
  @Override
  public void run() {

    double[][] bandwidths = Parameters.getBandwidths();
    int vmNum = getVmList().size();
    int taskNum = getTaskList().size();
    double[] availableTime = new double[vmNum];
    // cloudlet id starts from 1
    double[][] earliestStartTime = new double[taskNum + 1][vmNum];
    double[][] earliestFinishTime = new double[taskNum + 1][vmNum];
    int[] allocation = new int[taskNum + 1];

    List<Task> taskList = new ArrayList<Task>(getTaskList());
    List<Task> readyList = new ArrayList<Task>();
    while (!taskList.isEmpty()) {
      readyList.clear();
      for (Task task : taskList) {
        boolean ready = true;
        for (Task parent : task.getParentList()) {
          if (taskList.contains(parent)) {
            ready = false;
            break;
          }
        }
        if (ready) {
          readyList.add(task);
        }
      }
      taskList.removeAll(readyList);
      // schedule readylist
      for (Task task : readyList) {
        long[] fileSizes = new long[task.getParentList().size()];
        int parentIndex = 0;
        for (Task parent : task.getParentList()) {
          long fileSize = 0;
          for (Iterator fileIter = task.getFileList().iterator(); fileIter.hasNext(); ) {
            File file = (File) fileIter.next();
            if (file.getType() == 1) {
              for (Iterator fileIter2 = parent.getFileList().iterator(); fileIter2.hasNext(); ) {
                File file2 = (File) fileIter2.next();
                if (file2.getType() == 2 && file2.getName().equals(file.getName())) {
                  fileSize += file.getSize();
                }
              }
            }
          }
          fileSizes[parentIndex] = fileSize;
          parentIndex++;
        }

        double minTime = Double.MAX_VALUE;
        int minTimeIndex = 0;

        for (int vmIndex = 0; vmIndex < getVmList().size(); vmIndex++) {
          Vm vm = (Vm) getVmList().get(vmIndex);
          double startTime = availableTime[vm.getId()];
          parentIndex = 0;
          for (Task parent : task.getParentList()) {
            int allocatedVmId = allocation[parent.getCloudletId()];
            double actualFinishTime = earliestFinishTime[parent.getCloudletId()][allocatedVmId];
            double communicationTime =
                fileSizes[parentIndex] / bandwidths[allocatedVmId][vm.getId()];

            if (actualFinishTime + communicationTime > startTime) {
              startTime = actualFinishTime + communicationTime;
            }
            parentIndex++;
          }
          earliestStartTime[task.getCloudletId()][vm.getId()] = startTime;
          double runtime = task.getCloudletLength() / vm.getMips();
          earliestFinishTime[task.getCloudletId()][vm.getId()] = runtime + startTime;

          if (runtime + startTime < minTime) {
            minTime = runtime + startTime;
            minTimeIndex = vmIndex;
          }
        }

        allocation[task.getCloudletId()] =
            minTimeIndex; // we do not really need it use task.getVmId
        task.setVmId(minTimeIndex);
        availableTime[minTimeIndex] = minTime;
      }
    }
  }
  public static void main(String[] args) {
    try {
      Log.disable();
      // First step: Initialize the WorkflowSim package.
      /** Should change this based on real physical path */
      String daxPath = "E:\\PhD\\ComplexCloudSim\\config\\dax\\Montage_1000.xml";
      File daxFile = new File(daxPath);
      if (!daxFile.exists()) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
      ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;
      OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);
      ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
      ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

      // For each scheduling algorithm (FCFS,RR,MinMin,MaxMin), run 100 times
      for (int sche = 0; sche < 4; sche++) {
        Parameters.SchedulingAlgorithm sch_method;
        switch (sche) {
          case 0:
            sch_method = Parameters.SchedulingAlgorithm.FCFS;
            break;
          case 1:
            sch_method = Parameters.SchedulingAlgorithm.ROUNDROBIN;
            break;
          case 2:
            sch_method = Parameters.SchedulingAlgorithm.MINMIN;
            break;
          case 3:
            sch_method = Parameters.SchedulingAlgorithm.MAXMIN;
            break;
          default:
            sch_method = Parameters.SchedulingAlgorithm.FCFS;
        }
        for (int runs = 0; runs < numRuns; runs++) {
          Parameters.init(numVMs, daxPath, null, null, op, cp, sch_method, pln_method, null, 0);
          ReplicaCatalog.init(file_system);

          // before creating any entities.
          int num_user = 1; // number of grid users
          Calendar calendar = Calendar.getInstance();
          boolean trace_flag = false; // mean trace events

          // Initialize the CloudSim library
          CloudSim.init(num_user, calendar, trace_flag);

          ComplexDatacenter datacenter0 = createDatacenter("Datacenter_0");

          /** Create a WorkflowPlanner with one schedulers. */
          WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
          /** Create a WorkflowEngine. */
          WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
          /**
           * Create a list of VMs.The userId of a vm is basically the id of the scheduler that
           * controls this vm.
           */
          List<ComplexVM> vmlist0 = createVM(wfEngine.getSchedulerId(0));

          /** Submits this list of vms to this WorkflowEngine. */
          wfEngine.submitVmList(vmlist0, 0);

          /** Binds the data centers with the scheduler. */
          wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);
          CloudSim.startSimulation();
          List<Job> outputList0 = wfEngine.getJobsReceivedList();
          CloudSim.stopSimulation();
          switch (sche) {
            case 0:
              FCFSResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
            case 1:
              RoundRobinResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
            case 2:
              MinMinResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
            case 3:
              MaxMinResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
            default:
              FCFSResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
          }
        }
        Log.enable();
        Log.printLine(
            "------ "
                + numVMs
                + " VMs "
                + numRuns
                + " Runs with Damage Ratio "
                + damageRatio
                + "------");
        Log.printLine(">> FCFS");
        printResult(FCFSResult);
        Log.printLine(">> RoundRobin");
        printResult(RoundRobinResult);
        Log.printLine(">> MinMin");
        printResult(MinMinResult);
        Log.printLine(">> MaxMin");
        printResult(MaxMinResult);
      }
    } catch (Exception e) {
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }