@Override
  protected void processVmCreate(SimEvent ev) {
    int[] data = (int[]) ev.getData();
    int result = data[2];

    if (result != CloudSimTags.TRUE) {
      int datacenterId = data[0];
      int vmId = data[1];
      System.out.println(
          CloudSim.clock()
              + ": "
              + getName()
              + ": Creation of VM #"
              + vmId
              + " failed in Datacenter #"
              + datacenterId);
      System.exit(0);
    }
    super.processVmCreate(ev);
  }
  public static void main(String args[]) {
    Log.printLine("Starting CloudSimNonChipAwareBestFit...");

    try {
      // First step: Initialize the CloudSim package. It should be called
      // before creating any entities.
      int num_user = 1; // number of cloud users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      // Second step: Create Datacenters
      // Datacenters are the resource providers in CloudSim. We need at
      // list one of them to run a CloudSim simulation
      CloudSimNonChipAwareBestFit csps = new CloudSimNonChipAwareBestFit();
      PAMDatacenter datacenter0 = csps.createDatacenter("Datacenter_0");
      csps.createDCManager(datacenter0);

      @SuppressWarnings("unused")
      PAMDatacenterBrokerExtended pamDBE =
          new PAMDatacenterBrokerExtended("PAMDatacenterBrokerExtended", "workloadpattern_101");

      CloudSim.startSimulation();
      CloudSim.stopSimulation();
      datacenter0.printDebts();

      Log.printLine("CloudSimNonChipAwareBestFit finished!");
    } catch (Exception e) {
      e.printStackTrace();
      Log.printLine("Unwanted errors happen");
    }
  }
  /*
   * (non-Javadoc)
   * @see org.cloudbus.cloudsim.VmAllocationPolicy#allocateHostForVm(org.cloudbus.cloudsim.Vm,
   * org.cloudbus.cloudsim.Host)
   */
  @Override
  public boolean allocateHostForVm(Vm vm, Host host) {
    if (host == null) {
      Log.formatLine("%.2f: No suitable host found for VM #" + vm.getId() + "\n", CloudSim.clock());
      return false;
    }
    if (host.vmCreate(vm)) { // if vm has been succesfully created in the host
      getVmTable().put(vm.getUid(), host);

      /*
       * update host cache pain information
       * in CacheMatrix.HOST_PAIN_LIST
       */
      CacheMatrix.update_host_pain_add_vm(vm, host);
      /*
       * end update host cache pain information
       */

      Log.formatLine(
          "%.2f: VM #" + vm.getId() + " has been allocated to the host #" + host.getId(),
          CloudSim.clock());
      return true;
    }
    Log.formatLine(
        "%.2f: Creation of VM #" + vm.getId() + " on the host #" + host.getId() + " failed\n",
        CloudSim.clock());
    return false;
  }
 /**
  * Gets the datacenter list with datacenter ids
  *
  * @return
  */
 public List<StorageDatacenter> getDatacenterList() {
   int datacenterSize = this.getDatacenterIdsList().size();
   datacenterList = new ArrayList<StorageDatacenter>();
   for (int i = 0; i < datacenterSize; i++) {
     int datacenterId = this.getDatacenterIdsList().get(i);
     StorageDatacenter datacenter = (StorageDatacenter) CloudSim.getEntity(datacenterId);
     datacenterList.add(datacenter);
   }
   return datacenterList;
 }
 /**
  * Process a request for the characteristics of a PowerDatacenter.
  *
  * @param ev a SimEvent object
  * @pre ev != $null
  * @post $none
  */
 @Override
 protected void processResourceCharacteristicsRequest(SimEvent ev) {
   setDatacenterCharacteristicsList(new HashMap<>());
   Log.printLine(
       CloudSim.clock()
           + ": "
           + getName()
           + ": Cloud Resource List received with "
           + getDatacenterIdsList().size()
           + " resource(s)");
   for (Integer datacenterId : getDatacenterIdsList()) {
     sendNow(datacenterId, CloudSimTags.RESOURCE_CHARACTERISTICS, getId());
   }
 }
  /** Start this entity (WorkflowScheduler) */
  @Override
  public void startEntity() {
    Log.printLine(getName() + " is starting...");
    // this resource should register to regional GIS.
    // However, if not specified, then register to system GIS (the
    // default CloudInformationService) entity.
    // int gisID = CloudSim.getEntityId(regionalCisName);
    int gisID = -1;
    if (gisID == -1) {
      gisID = CloudSim.getCloudInfoServiceEntityId();
    }

    // send the registration to GIS
    sendNow(gisID, CloudSimTags.REGISTER_RESOURCE, getId());
  }
 /**
  * Updates the processing of cloudlets running on this VM.
  *
  * @param currentTime current simulation time
  * @param mipsShare array with MIPS share of each Pe available to the scheduler
  * @return time predicted completion time of the earliest finishing cloudlet, or 0 if there is no
  *     next events
  * @pre currentTime >= 0
  * @post $none
  */
 @Override
 public double updateVmProcessing(final double currentTime, final List<Double> mipsShare) {
   double time = super.updateVmProcessing(currentTime, mipsShare);
   double schedulerPerviousTime = getCloudletScheduler().getPreviousTime();
   double utilization = getTotalUtilizationOfCpu(schedulerPerviousTime);
   double ramUtilization = getCloudletScheduler().getTotalUtilizationOfRam(schedulerPerviousTime);
   double bwUtilization = getCloudletScheduler().getTotalUtilizationOfBw(schedulerPerviousTime);
   if (currentTime > getPreviousTime() && (currentTime - 0.1) % getSchedulingInterval() == 0) {
     // double utilization = getTotalUtilizationOfCpu(getCloudletScheduler().getPreviousTime());
     if (CloudSim.clock() != 0 || utilization != 0) {
       addUtilizationHistoryValue(utilization);
       addRamUtilizationHistoryValue(ramUtilization);
       addBwUtilizationHistoryValue(bwUtilization);
     }
   }
   return time;
 }
  /** Creates main() to run this example This example has only one datacenter and one storage */
  public static void main(String[] args) {

    try {
      // First step: Initialize the WorkflowSim package.

      /**
       * However, the exact number of vms may not necessarily be vmNum If the data center or the
       * host doesn't have sufficient resources the exact vmNum would be smaller than that. Take
       * care.
       */
      int vmNum = 20; // number of vms;
      /** Should change this based on real physical path */
      String daxPath = "/Users/chenweiwei/Work/WorkflowSim-1.0/config/dax/Montage_100.xml";
      if (daxPath == null) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      File daxFile = new File(daxPath);
      if (!daxFile.exists()) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      /*
       * Use default Fault Tolerant Parameters
       */
      Parameters.FTCMonitor ftc_monitor = Parameters.FTCMonitor.MONITOR_NONE;
      Parameters.FTCFailure ftc_failure = Parameters.FTCFailure.FAILURE_NONE;
      Parameters.FTCluteringAlgorithm ftc_method = null;

      /**
       * Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID
       * such that the planner would not override the result of the scheduler
       */
      Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
      Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
      ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

      /**
       * clustering delay must be added, if you don't need it, you can set all the clustering delay
       * to be zero, but not null
       */
      Map<Integer, Double> clusteringDelay = new HashMap();
      /** Montage has at most 11 horizontal levels */
      int maxLevel = 11;
      for (int level = 0; level < maxLevel; level++) {
        clusteringDelay.put(
            level, 1.0); // the clustering delay specified to each level is 1.0 seconds
      }
      // Add clustering delay to the overhead parameters
      OverheadParameters op = new OverheadParameters(0, null, null, null, clusteringDelay, 0);
      ;

      /** Horizontal Clustering */
      ClusteringParameters.ClusteringMethod method =
          ClusteringParameters.ClusteringMethod.HORIZONTAL;
      /**
       * You can only specify clusters.num or clusters.size clusters.num is the number of clustered
       * jobs per horizontal level clusters.size is the number of tasks per clustered job
       * clusters.num * clusters.size = the number of tasks per horizontal level In this case, we
       * specify the clusters.num = 20, which means we have 20 jobs per level
       */
      ClusteringParameters cp = new ClusteringParameters(20, 0, method, null);

      /** Initialize static parameters */
      Parameters.init(
          ftc_method,
          ftc_monitor,
          ftc_failure,
          null,
          vmNum,
          daxPath,
          null,
          null,
          op,
          cp,
          sch_method,
          pln_method,
          null,
          0);
      ReplicaCatalog.init(file_system);

      FailureMonitor.init();
      FailureGenerator.init();

      // before creating any entities.
      int num_user = 1; // number of grid users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      DatacenterExtended datacenter0 = createDatacenter("Datacenter_0");

      /** Create a WorkflowPlanner with one schedulers. */
      WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
      /** Create a WorkflowEngine. */
      WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
      /**
       * Create a list of VMs.The userId of a vm is basically the id of the scheduler that controls
       * this vm.
       */
      List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

      /** Submits this list of vms to this WorkflowEngine. */
      wfEngine.submitVmList(vmlist0, 0);

      /** Binds the data centers with the scheduler. */
      wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);

      CloudSim.startSimulation();

      List<Job> outputList0 = wfEngine.getJobsReceivedList();

      CloudSim.stopSimulation();

      printJobList(outputList0);

    } catch (Exception e) {
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }
  /**
   * Resumes execution of a paused cloudlet.
   *
   * @param cloudletId ID of the cloudlet being resumed
   * @return $true if the cloudlet was resumed, $false otherwise
   * @pre $none
   * @post $none
   */
  @Override
  public double cloudletResume(int cloudletId) {
    boolean found = false;
    int position = 0;

    // look for the cloudlet in the paused list
    for (ResCloudlet rcl : getCloudletPausedList()) {
      if (rcl.getCloudletId() == cloudletId) {
        found = true;
        break;
      }
      position++;
    }

    if (found) {
      ResCloudlet rcl = getCloudletPausedList().remove(position);

      // it can go to the exec list
      if ((currentCpus - usedPes) >= rcl.getNumberOfPes()) {
        rcl.setCloudletStatus(Cloudlet.INEXEC);
        for (int i = 0; i < rcl.getNumberOfPes(); i++) {
          rcl.setMachineAndPeId(0, i);
        }

        long size = rcl.getRemainingCloudletLength();
        size *= rcl.getNumberOfPes();
        rcl.getCloudlet().setCloudletLength(size);

        getCloudletExecList().add(rcl);
        usedPes += rcl.getNumberOfPes();

        // calculate the expected time for cloudlet completion
        double capacity = 0.0;
        int cpus = 0;
        for (Double mips : getCurrentMipsShare()) {
          capacity += mips;
          if (mips > 0) {
            cpus++;
          }
        }
        currentCpus = cpus;
        capacity /= cpus;

        long remainingLength = rcl.getRemainingCloudletLength();
        double estimatedFinishTime =
            CloudSim.clock() + (remainingLength / (capacity * rcl.getNumberOfPes()));

        return estimatedFinishTime;
      } else { // no enough free PEs: go to the waiting queue
        rcl.setCloudletStatus(Cloudlet.QUEUED);

        long size = rcl.getRemainingCloudletLength();
        size *= rcl.getNumberOfPes();
        rcl.getCloudlet().setCloudletLength(size);

        getCloudletWaitingList().add(rcl);
        return 0.0;
      }
    }

    // not found in the paused list: either it is in in the queue, executing or not exist
    return 0.0;
  }
Beispiel #10
0
  /** Creates main() to run this example */
  public static void main(String[] args) {

    Log.printLine("Starting CloudSimExample2...");

    try {
      // First step: Initialize the CloudSim package. It should be called
      // before creating any entities.
      int num_user = 8; // number of cloud users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      // Second step: Create Datacenters
      // Datacenters are the resource providers in CloudSim. We need at list one of them to run a
      // CloudSim simulation
      Datacenter datacenter0 = createDatacenter("Datacenter_0");
      Datacenter datacenter1 = createDatacenter("Datacenter_1");
      Datacenter datacenter2 = createDatacenter("Datacenter_2");
      Datacenter datacenter3 = createDatacenter("Datacenter_3");

      // Third step: Create Broker
      DatacenterBroker broker = createBroker();
      int brokerId = broker.getId();

      // Fourth step: Create one virtual machine
      vmlist = new ArrayList<Vm>();

      // VM description
      int vmid = 0;
      int mips = 250;
      double costpersec = 20.0;
      long size = 10000; // image size (MB)
      int ram = 512; // vm memory (MB)
      long bw = 1000;
      int pesNumber = 1; // number of cpus
      String vmm = "Xen"; // VMM name

      // create two VMs
      Vm vm1 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      costpersec = 28.0;
      mips = 450;
      ram = 768;
      bw = 512;
      vmid++;
      Vm vm2 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      costpersec = 25.0;
      mips = 580;
      ram = 256;
      bw = 768;
      vmid++;
      Vm vm3 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      costpersec = 30.0;
      mips = 300;
      ram = 768;
      bw = 512;
      vmid++;
      Vm vm4 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      costpersec = 22.0;
      mips = 280;
      ram = 1000;
      bw = 128;
      vmid++;
      Vm vm5 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      vmid++;
      costpersec = 50.0;
      mips = 220;
      ram = 128;
      bw = 128;

      //                        Vm vm6 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw,
      // size, vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	costpersec=22.0;
      //                        mips =280;
      //                        ram = 2048;
      //                        bw = 64;
      //                        Vm vm7 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw,
      // size, vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm8 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm9 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm10 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm11 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm12 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm13 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm14 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm15 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm16 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());

      // add the VMs to the vmList
      vmlist.add(vm1);
      vmlist.add(vm2);
      vmlist.add(vm3);
      vmlist.add(vm4);
      vmlist.add(vm5);

      //                        vmlist.add(vm6);
      //                        vmlist.add(vm7);
      //                        vmlist.add(vm9);
      //                        vmlist.add(vm10);
      //                        vmlist.add(vm11);
      //                        vmlist.add(vm12);
      //                        vmlist.add(vm13);
      //                        vmlist.add(vm14);
      //                        vmlist.add(vm15);
      //                        vmlist.add(vm16);

      // submit vm list to the broker
      broker.submitVmList(vmlist);

      // Fifth step: Create two Cloudlets
      cloudletList = new ArrayList<Cloudlet>();

      // Cloudlet properties
      int id = 0;
      pesNumber = 1;
      long length = 2500;
      long fileSize = 300;
      long outputSize = 450;
      UtilizationModel utilizationModel = new UtilizationModelFull();
      double wt = 1.0;
      double wm = 0;

      Cloudlet cloudlet1 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet1.setUserId(brokerId);

      id++;
      pesNumber = 1;
      length = 2800;
      fileSize = 600;
      outputSize = 600;
      Cloudlet cloudlet2 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet2.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 1250;
      fileSize = 800;
      outputSize = 800;
      Cloudlet cloudlet3 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet3.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 2480;
      fileSize = 300;
      outputSize = 300;
      Cloudlet cloudlet4 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet4.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 7000;
      fileSize = 500;
      outputSize = 500;
      Cloudlet cloudlet5 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet5.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 1500;
      fileSize = 500;
      outputSize = 500;
      Cloudlet cloudlet6 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet6.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 800;
      fileSize = 500;
      outputSize = 500;
      Cloudlet cloudlet7 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet7.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 7500;
      fileSize = 500;
      outputSize = 500;
      Cloudlet cloudlet8 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet8.setUserId(brokerId);

      // add the cloudlets to the list
      cloudletList.add(cloudlet1);
      cloudletList.add(cloudlet2);
      cloudletList.add(cloudlet3);
      cloudletList.add(cloudlet4);
      cloudletList.add(cloudlet5);

      cloudletList.add(cloudlet6);
      cloudletList.add(cloudlet7);
      cloudletList.add(cloudlet8);

      // submit cloudlet list to the broker
      broker.submitCloudletList(cloudletList);

      // bind the cloudlets to the vms. This way, the broker
      // will submit the bound cloudlets only to the specific VM

      // broker.bindCloudletToVm(cloudlet1.getCloudletId(),vm1.getId());
      // broker.bindCloudletToVm(cloudlet2.getCloudletId(),vm2.getId());

      // Sixth step: Starts the simulation
      CloudSim.startSimulation();

      // Final step: Print results when simulation is over
      List<Cloudlet> newList = broker.getCloudletReceivedList();

      CloudSim.stopSimulation();

      printCloudletList(newList);

      // Print the debt of each user to each datacenter
      // datacenter0.printDebts();

      Log.printLine("CloudSimExample2 finished!");
    } catch (Exception e) {
      e.printStackTrace();
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }
  private void runSimulationAndPrintResults() {
    simulation.start();

    List<Cloudlet> newList = broker.getCloudletsFinishedList();
    new CloudletsTableBuilderHelper(newList).build();
  }
  /**
   * Process the ack received due to a request for VM creation.
   *
   * @param ev a SimEvent object
   * @pre ev != null
   * @post $none
   */
  @Override
  protected void processVmCreate(SimEvent ev) {
    int[] data = (int[]) ev.getData();
    int datacenterId = data[0];
    int vmId = data[1];
    int result = data[2];

    if (result == CloudSimTags.TRUE) {
      getVmsToDatacentersMap().put(vmId, datacenterId);
      /** Fix a bug of cloudsim Don't add a null to getVmsCreatedList() June 15, 2013 */
      if (VmList.getById(getVmList(), vmId) != null) {
        getVmsCreatedList().add(VmList.getById(getVmList(), vmId));
        Log.printLine(
            CloudSim.clock()
                + ": "
                + getName()
                + ": VM #"
                + vmId
                + " has been created in Datacenter #"
                + datacenterId
                + ", Host #"
                + VmList.getById(getVmsCreatedList(), vmId).getHost().getId());
      }
    } else {
      Log.printLine(
          CloudSim.clock()
              + ": "
              + getName()
              + ": Creation of VM #"
              + vmId
              + " failed in Datacenter #"
              + datacenterId);
    }

    incrementVmsAcks();

    // all the requested VMs have been created
    if (getVmsCreatedList().size() == getVmList().size() - getVmsDestroyed()) {
      submitCloudlets();
    } else {
      // all the acks received, but some VMs were not created
      if (getVmsRequested() == getVmsAcks()) {
        // find id of the next datacenter that has not been tried
        for (int nextDatacenterId : getDatacenterIdsList()) {
          if (!getDatacenterRequestedIdsList().contains(nextDatacenterId)) {
            createVmsInDatacenter(nextDatacenterId);
            return;
          }
        }

        // all datacenters already queried
        if (getVmsCreatedList().size() > 0) { // if some vm were created
          submitCloudlets();
        } else { // no vms created. abort
          Log.printLine(
              CloudSim.clock()
                  + ": "
                  + getName()
                  + ": none of the required VMs could be created. Aborting");
          finishExecution();
        }
      }
    }
  }
  /** Creates main() to run this example This example has only one datacenter and one storage */
  public static void main(String[] args) {

    try {
      // First step: Initialize the WorkflowSim package.
      /**
       * However, the exact number of vms may not necessarily be vmNum If the data center or the
       * host doesn't have sufficient resources the exact vmNum would be smaller than that. Take
       * care.
       */
      int vmNum = 20; // number of vms;
      /** Should change this based on real physical path */
      String daxPath =
          "/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";
      File daxFile = new File(daxPath);
      if (!daxFile.exists()) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      /*
       *  Fault Tolerant Parameters
       */
      /**
       * MONITOR_JOB classifies failures based on the level of jobs; MONITOR_VM classifies failures
       * based on the vm id; MOINTOR_ALL does not do any classification; MONITOR_NONE does not
       * record any failiure.
       */
      FailureParameters.FTCMonitor ftc_monitor = FailureParameters.FTCMonitor.MONITOR_JOB;
      /** Similar to FTCMonitor, FTCFailure controls the way how we generate failures. */
      FailureParameters.FTCFailure ftc_failure = FailureParameters.FTCFailure.FAILURE_JOB;
      /** In this example, we have horizontal clustering and we use Dynamic Reclustering. */
      FailureParameters.FTCluteringAlgorithm ftc_method =
          FailureParameters.FTCluteringAlgorithm.FTCLUSTERING_DR;
      /** Task failure rate for each level */
      int maxLevel = 11; // most workflows we use has a maximum of 11 levels

      DistributionGenerator[][] failureGenerators = new DistributionGenerator[vmNum][maxLevel];

      for (int level = 0; level < maxLevel; level++) {
        /*
         * For simplicity, set the task failure rate of each level to be 0.1. Which means 10%
         * of submitted tasks will fail. It doesn't have to be the same task
         * failure rate at each level.
         */
        DistributionGenerator generator =
            new DistributionGenerator(
                DistributionGenerator.DistributionFamily.WEIBULL, 100, 1.0, 30, 300, 0.78);
        for (int vmId = 0; vmId < vmNum; vmId++) {
          failureGenerators[vmId][level] = generator;
        }
      }
      /**
       * Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID
       * such that the planner would not override the result of the scheduler
       */
      Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
      Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
      ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

      /** No overheads */
      OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);

      /** No Clustering */
      ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
      ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

      /** Initialize static parameters */
      FailureParameters.init(ftc_method, ftc_monitor, ftc_failure, failureGenerators);
      Parameters.init(vmNum, daxPath, null, null, op, cp, sch_method, pln_method, null, 0);
      ReplicaCatalog.init(file_system);

      FailureMonitor.init();
      FailureGenerator.init();

      // before creating any entities.
      int num_user = 1; // number of grid users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0");

      /** Create a WorkflowPlanner with one schedulers. */
      WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
      /** Create a WorkflowEngine. */
      WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
      /**
       * Create a list of VMs.The userId of a vm is basically the id of the scheduler that controls
       * this vm.
       */
      List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

      /** Submits this list of vms to this WorkflowEngine. */
      wfEngine.submitVmList(vmlist0, 0);

      /** Binds the data centers with the scheduler. */
      wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);

      CloudSim.startSimulation();
      List<Job> outputList0 = wfEngine.getJobsReceivedList();
      CloudSim.stopSimulation();
      printJobList(outputList0);
    } catch (Exception e) {
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }
  public static void main(String[] args) {
    try {
      Log.disable();
      // First step: Initialize the WorkflowSim package.
      /** Should change this based on real physical path */
      String daxPath = "E:\\PhD\\ComplexCloudSim\\config\\dax\\Montage_1000.xml";
      File daxFile = new File(daxPath);
      if (!daxFile.exists()) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
      ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;
      OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);
      ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
      ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

      // For each scheduling algorithm (FCFS,RR,MinMin,MaxMin), run 100 times
      for (int sche = 0; sche < 4; sche++) {
        Parameters.SchedulingAlgorithm sch_method;
        switch (sche) {
          case 0:
            sch_method = Parameters.SchedulingAlgorithm.FCFS;
            break;
          case 1:
            sch_method = Parameters.SchedulingAlgorithm.ROUNDROBIN;
            break;
          case 2:
            sch_method = Parameters.SchedulingAlgorithm.MINMIN;
            break;
          case 3:
            sch_method = Parameters.SchedulingAlgorithm.MAXMIN;
            break;
          default:
            sch_method = Parameters.SchedulingAlgorithm.FCFS;
        }
        for (int runs = 0; runs < numRuns; runs++) {
          Parameters.init(numVMs, daxPath, null, null, op, cp, sch_method, pln_method, null, 0);
          ReplicaCatalog.init(file_system);

          // before creating any entities.
          int num_user = 1; // number of grid users
          Calendar calendar = Calendar.getInstance();
          boolean trace_flag = false; // mean trace events

          // Initialize the CloudSim library
          CloudSim.init(num_user, calendar, trace_flag);

          ComplexDatacenter datacenter0 = createDatacenter("Datacenter_0");

          /** Create a WorkflowPlanner with one schedulers. */
          WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
          /** Create a WorkflowEngine. */
          WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
          /**
           * Create a list of VMs.The userId of a vm is basically the id of the scheduler that
           * controls this vm.
           */
          List<ComplexVM> vmlist0 = createVM(wfEngine.getSchedulerId(0));

          /** Submits this list of vms to this WorkflowEngine. */
          wfEngine.submitVmList(vmlist0, 0);

          /** Binds the data centers with the scheduler. */
          wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);
          CloudSim.startSimulation();
          List<Job> outputList0 = wfEngine.getJobsReceivedList();
          CloudSim.stopSimulation();
          switch (sche) {
            case 0:
              FCFSResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
            case 1:
              RoundRobinResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
            case 2:
              MinMinResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
            case 3:
              MaxMinResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
            default:
              FCFSResult[runs] = wfEngine.getWorkflowFinishTime();
              break;
          }
        }
        Log.enable();
        Log.printLine(
            "------ "
                + numVMs
                + " VMs "
                + numRuns
                + " Runs with Damage Ratio "
                + damageRatio
                + "------");
        Log.printLine(">> FCFS");
        printResult(FCFSResult);
        Log.printLine(">> RoundRobin");
        printResult(RoundRobinResult);
        Log.printLine(">> MinMin");
        printResult(MinMinResult);
        Log.printLine(">> MaxMin");
        printResult(MaxMinResult);
      }
    } catch (Exception e) {
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }
  /** Creates main() to run this example */
  public static void main(String[] args) {

    Log.printLine("Starting NetworkExample3...");

    try {
      // First step: Initialize the CloudSim package. It should be called
      // before creating any entities.
      int num_user = 2; // number of cloud users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      // Second step: Create Datacenters
      // Datacenters are the resource providers in CloudSim. We need at list one of them to run a
      // CloudSim simulation
      Datacenter datacenter0 = createDatacenter("Datacenter_0");
      Datacenter datacenter1 = createDatacenter("Datacenter_1");

      // Third step: Create Brokers
      DatacenterBroker broker1 = createBroker(1);
      int brokerId1 = broker1.getId();

      DatacenterBroker broker2 = createBroker(2);
      int brokerId2 = broker2.getId();

      // Fourth step: Create one virtual machine for each broker/user
      vmlist1 = new ArrayList<Vm>();
      vmlist2 = new ArrayList<Vm>();

      // VM description
      int vmid = 0;
      long size = 10000; // image size (MB)
      int mips = 250;
      int ram = 512; // vm memory (MB)
      long bw = 1000;
      int pesNumber = 1; // number of cpus
      String vmm = "Xen"; // VMM name

      // create two VMs: the first one belongs to user1
      Vm vm1 =
          new Vm(
              vmid,
              brokerId1,
              mips,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());

      // the second VM: this one belongs to user2
      Vm vm2 =
          new Vm(
              vmid,
              brokerId2,
              mips,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());

      // add the VMs to the vmlists
      vmlist1.add(vm1);
      vmlist2.add(vm2);

      // submit vm list to the broker
      broker1.submitVmList(vmlist1);
      broker2.submitVmList(vmlist2);

      // Fifth step: Create two Cloudlets
      cloudletList1 = new ArrayList<Cloudlet>();
      cloudletList2 = new ArrayList<Cloudlet>();

      // Cloudlet properties
      int id = 0;
      long length = 40000;
      long fileSize = 300;
      long outputSize = 300;
      UtilizationModel utilizationModel = new UtilizationModelFull();

      Cloudlet cloudlet1 =
          new Cloudlet(
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet1.setUserId(brokerId1);

      Cloudlet cloudlet2 =
          new Cloudlet(
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet2.setUserId(brokerId2);

      // add the cloudlets to the lists: each cloudlet belongs to one user
      cloudletList1.add(cloudlet1);
      cloudletList2.add(cloudlet2);

      // submit cloudlet list to the brokers
      broker1.submitCloudletList(cloudletList1);
      broker2.submitCloudletList(cloudletList2);

      // Sixth step: configure network
      // load the network topology file
      NetworkTopology.buildNetworkTopology("topology.brite");

      // maps CloudSim entities to BRITE entities
      // Datacenter0 will correspond to BRITE node 0
      int briteNode = 0;
      NetworkTopology.mapNode(datacenter0.getId(), briteNode);

      // Datacenter1 will correspond to BRITE node 2
      briteNode = 2;
      NetworkTopology.mapNode(datacenter1.getId(), briteNode);

      // Broker1 will correspond to BRITE node 3
      briteNode = 3;
      NetworkTopology.mapNode(broker1.getId(), briteNode);

      // Broker2 will correspond to BRITE node 4
      briteNode = 4;
      NetworkTopology.mapNode(broker2.getId(), briteNode);

      // Sixth step: Starts the simulation
      CloudSim.startSimulation();

      // Final step: Print results when simulation is over
      List<Cloudlet> newList1 = broker1.getCloudletReceivedList();
      List<Cloudlet> newList2 = broker2.getCloudletReceivedList();

      CloudSim.stopSimulation();

      Log.print("=============> User " + brokerId1 + "    ");
      printCloudletList(newList1);

      Log.print("=============> User " + brokerId2 + "    ");
      printCloudletList(newList2);

      // Print the debt of each user to each datacenter
      datacenter0.printDebts();
      datacenter1.printDebts();

      Log.printLine("NetworkExample3 finished!");
    } catch (Exception e) {
      e.printStackTrace();
      Log.printLine("Unwanted errors happen");
    }
  }