public static void main(String args[]) {
    Log.printLine("Starting CloudSimNonChipAwareBestFit...");

    try {
      // First step: Initialize the CloudSim package. It should be called
      // before creating any entities.
      int num_user = 1; // number of cloud users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      // Second step: Create Datacenters
      // Datacenters are the resource providers in CloudSim. We need at
      // list one of them to run a CloudSim simulation
      CloudSimNonChipAwareBestFit csps = new CloudSimNonChipAwareBestFit();
      PAMDatacenter datacenter0 = csps.createDatacenter("Datacenter_0");
      csps.createDCManager(datacenter0);

      @SuppressWarnings("unused")
      PAMDatacenterBrokerExtended pamDBE =
          new PAMDatacenterBrokerExtended("PAMDatacenterBrokerExtended", "workloadpattern_101");

      CloudSim.startSimulation();
      CloudSim.stopSimulation();
      datacenter0.printDebts();

      Log.printLine("CloudSimNonChipAwareBestFit finished!");
    } catch (Exception e) {
      e.printStackTrace();
      Log.printLine("Unwanted errors happen");
    }
  }
  /*
   * (non-Javadoc)
   * @see org.cloudbus.cloudsim.VmAllocationPolicy#allocateHostForVm(org.cloudbus.cloudsim.Vm,
   * org.cloudbus.cloudsim.Host)
   */
  @Override
  public boolean allocateHostForVm(Vm vm, Host host) {
    if (host == null) {
      Log.formatLine("%.2f: No suitable host found for VM #" + vm.getId() + "\n", CloudSim.clock());
      return false;
    }
    if (host.vmCreate(vm)) { // if vm has been succesfully created in the host
      getVmTable().put(vm.getUid(), host);

      /*
       * update host cache pain information
       * in CacheMatrix.HOST_PAIN_LIST
       */
      CacheMatrix.update_host_pain_add_vm(vm, host);
      /*
       * end update host cache pain information
       */

      Log.formatLine(
          "%.2f: VM #" + vm.getId() + " has been allocated to the host #" + host.getId(),
          CloudSim.clock());
      return true;
    }
    Log.formatLine(
        "%.2f: Creation of VM #" + vm.getId() + " on the host #" + host.getId() + " failed\n",
        CloudSim.clock());
    return false;
  }
Ejemplo n.º 3
0
 protected static void printResult(double[] result) {
   Statistics statisticResult = new Statistics(result);
   Log.printLine("Mean :" + statisticResult.getMean());
   Log.printLine("Median : " + statisticResult.median());
   Log.printLine("Variance : " + statisticResult.getVariance());
   Log.printLine("Std : " + statisticResult.getStdDev());
 }
Ejemplo n.º 4
0
  /**
   * Prints the metric history.
   *
   * @param hosts the hosts
   * @param vmAllocationPolicy the vm allocation policy
   */
  public static void printMetricHistory(
      List<? extends Host> hosts, PowerVmAllocationPolicyMigrationAbstract vmAllocationPolicy) {
    for (int i = 0; i < 10; i++) {
      Host host = hosts.get(i);

      Log.printLine("Host #" + host.getId());
      Log.printLine("Time:");
      if (!vmAllocationPolicy.getTimeHistory().containsKey(host.getId())) {
        continue;
      }
      for (Double time : vmAllocationPolicy.getTimeHistory().get(host.getId())) {
        Log.format("%.2f, ", time);
      }
      Log.printLine();

      for (Double utilization : vmAllocationPolicy.getUtilizationHistory().get(host.getId())) {
        Log.format("%.2f, ", utilization);
      }
      Log.printLine();

      for (Double metric : vmAllocationPolicy.getMetricHistory().get(host.getId())) {
        Log.format("%.2f, ", metric);
      }
      Log.printLine();
    }
  }
  /**
   * Optimize allocation of the VMs according to current utilization.
   *
   * @param vmList the vm list
   * @return the array list< hash map< string, object>>
   */
  @Override
  public List<Map<String, Object>> optimizeAllocation(List<? extends Vm> vmList) {

    /*
     * Calculate total pain
     */
    Log.printLine();
    double total_pain = 0.0;
    for (PowerHostUtilizationHistory host : this.<PowerHostUtilizationHistory>getHostList()) {
      double host_pain = 0.0;
      for (Vm vm_i : host.getVmList()) {
        for (Vm vm_j : host.getVmList()) {
          host_pain += CacheMatrix.get_pain(vm_i.getId(), vm_j.getId());
        }
      }
      Log.printLine("Pain of Host " + host.getId() + " : " + host_pain);
      total_pain += host_pain;
    }
    Log.printLine("==========================");
    Log.printLine("Total pain : " + total_pain);
    Log.printLine("==========================");
    Log.printLine();

    ExecutionTimeMeasurer.start("optimizeAllocationTotal");

    ExecutionTimeMeasurer.start("optimizeAllocationHostSelection");
    List<PowerHostUtilizationHistory> overUtilizedHosts = getOverUtilizedHosts();
    getExecutionTimeHistoryHostSelection()
        .add(ExecutionTimeMeasurer.end("optimizeAllocationHostSelection"));

    printOverUtilizedHosts(overUtilizedHosts);

    saveAllocation();

    ExecutionTimeMeasurer.start("optimizeAllocationVmSelection");
    List<? extends Vm> vmsToMigrate = getVmsToMigrateFromHosts(overUtilizedHosts);
    getExecutionTimeHistoryVmSelection()
        .add(ExecutionTimeMeasurer.end("optimizeAllocationVmSelection"));

    Log.printLine("Reallocation of VMs from the over-utilized hosts:");
    ExecutionTimeMeasurer.start("optimizeAllocationVmReallocation");
    List<Map<String, Object>> migrationMap =
        getNewVmPlacement(vmsToMigrate, new HashSet<Host>(overUtilizedHosts));
    getExecutionTimeHistoryVmReallocation()
        .add(ExecutionTimeMeasurer.end("optimizeAllocationVmReallocation"));
    Log.printLine();

    /*
     * Comment this line to disable turning off underutilized hosts
     */
    // migrationMap.addAll(getMigrationMapFromUnderUtilizedHosts(overUtilizedHosts));

    restoreAllocation();

    getExecutionTimeHistoryTotal().add(ExecutionTimeMeasurer.end("optimizeAllocationTotal"));

    return migrationMap;
  }
 /**
  * Binds this scheduler to a datacenter
  *
  * @param datacenterId data center id
  */
 public void bindSchedulerDatacenter(int datacenterId) {
   if (datacenterId <= 0) {
     Log.printLine("Error in data center id");
     return;
   }
   this.datacenterIdsList.add(datacenterId);
 }
 /** store the data in the data center's nodes just by first fit first service */
 public void bindReplicasToStorageNodesSimple_Net() {
   int replicaNum = replicaList.size();
   nodeList = this.getNodeList();
   int nodeNum = nodeList.size();
   int idx = 0;
   for (int i = 0; i < replicaNum; i++) {
     StorageNode node = nodeList.get(idx);
     // BlockReplica replica = replicaList.remove(i);
     BlockReplica replica = replicaList.get(i);
     double replicaSize = replica.getSize();
     if (node.getAvailableSpace() - replicaSize >= 0.000000000000000000000001
         && !node.contains(replica.getName())) {
       double time = node.addFile(replica);
       double accrossLatency = NetworkTopology.getDelay(getId(), node.getDatacenter().getId());
       time += accrossLatency;
       Log.printLine(
           "it take "
               + time
               + " seconds to write the replica #"
               + replica.getId()
               + " to be stored in datacenter "
               + node.getDatacenter().getName()
               + " node #"
               + node.getId());
       // node.setCapacity(node.getCapacity()-replicaSize);
       idx = (idx + 1) % nodeNum;
     } else {
       idx = (idx + 1) % nodeNum;
       i--; // 该副本还未分配到node上,故重新再看接下来的node能否有空间存副本
     }
   }
 }
 /**
  * store data in the data center's nodes according the TOPSIS( Technique for Order Preference by
  * Similarity to Ideal Solution)
  *
  * @param none
  */
 @SuppressWarnings("unchecked")
 public void bindReplicasToStorageNodesTOPSIS() {
   // TODO Auto-generated method stub
   int replicaNum = replicaList.size();
   int nodeSize = nodeList.size();
   int idx = 0;
   for (int i = 0; i < replicaNum; i++) {
     //			TOPSIS();
     TOPSIS.buildTOPSIS(datacenterList, (List<StorageNode>) nodeList);
     BlockReplica rep = replicaList.get(i);
     double replicaSize = rep.getSize();
     //			StorageNode node = nodeList.get(this.rankedTOPSISnodeIndex[idx]);//
     // rankedTOPSISnodeIndex中对于小数据不行呢
     StorageNode node = nodeList.get(TOPSIS.rankedTOPSISnodeIndex[idx]);
     if (node.getAvailableSpace() - replicaSize > 0.000000000000000001
         && !node.contains(rep.getName())) {
       double time = node.addFile(rep); // if the node has already added
       // this file,the time just
       // include networkLatency and
       // transport time
       Log.printLine(
           "it take "
               + time
               + " seconds to write the replica #"
               + rep.getId()
               + "to be stored in datacenter "
               + node.getDatacenter().getName()
               + " node #"
               + node.getId());
     } else { // rep没有加进去
       i--;
     }
     idx = (idx + 1) % nodeSize;
   }
 }
  /**
   * Update a cloudlet (job)
   *
   * @param ev a simEvent object
   */
  protected void processCloudletUpdate(SimEvent ev) {

    BaseSchedulingAlgorithm scheduler = getScheduler(Parameters.getSchedulingAlgorithm());
    scheduler.setCloudletList(getCloudletList());
    scheduler.setVmList(getVmsCreatedList());

    try {
      scheduler.run();
    } catch (Exception e) {
      Log.printLine("Error in configuring scheduler_method");
      e.printStackTrace();
    }

    List<Cloudlet> scheduledList = scheduler.getScheduledList();
    for (Cloudlet cloudlet : scheduledList) {
      int vmId = cloudlet.getVmId();
      double delay = 0.0;
      if (Parameters.getOverheadParams().getQueueDelay() != null) {
        delay = Parameters.getOverheadParams().getQueueDelay(cloudlet);
      }
      schedule(getVmsToDatacentersMap().get(vmId), delay, CloudSimTags.CLOUDLET_SUBMIT, cloudlet);
    }
    getCloudletList().removeAll(scheduledList);
    getCloudletSubmittedList().addAll(scheduledList);
    cloudletsSubmitted += scheduledList.size();
  }
 /** Store data to StorageNode with the Node is selected randomly */
 public void bindReplicasToStorageNodeRand_Net() {
   int replicaNum = replicaList.size();
   int nodeSize = nodeList.size();
   for (int i = 0; i < replicaNum; i++) {
     BlockReplica replica = replicaList.get(i);
     StorageNode node =
         nodeList.get(java.util.concurrent.ThreadLocalRandom.current().nextInt(nodeSize));
     double replicaSize = replica.getSize();
     if (node.getAvailableSpace() - replicaSize >= 0.000000000000000000000001
         && !node.contains(replica.getName())) {
       double time = node.addFile(replica);
       double accrossLatency = NetworkTopology.getDelay(getId(), node.getDatacenter().getId());
       time += accrossLatency;
       /*Log.printLine("it take " + time
       + " seconds to write the replica #" + replica.getId()
       + " to be stored in datacenter "
       + node.getDatacenter().getName() + " node #"
       + node.getId());*/
       Log.printLine(
           "replica #"
               + replica.getId()
               + "    			"
               + node.getDatacenter().getName()
               + " node #"
               + node.getId()
               + " 			"
               + time);
     } else {
       i--;
     }
   }
 }
Ejemplo n.º 11
0
  /**
   * Prints the Cloudlet objects.
   *
   * @param list list of Cloudlets
   */
  public static void printCloudletList(List<Cloudlet> list) {
    int size = list.size();
    Cloudlet cloudlet;

    String indent = "\t";
    Log.printLine();
    Log.printLine("========== OUTPUT ==========");
    Log.printLine(
        "Cloudlet ID"
            + indent
            + "STATUS"
            + indent
            + "Resource ID"
            + indent
            + "VM ID"
            + indent
            + "Time"
            + indent
            + "Start Time"
            + indent
            + "Finish Time");

    DecimalFormat dft = new DecimalFormat("###.##");
    for (int i = 0; i < size; i++) {
      cloudlet = list.get(i);
      Log.print(indent + cloudlet.getCloudletId());

      if (cloudlet.getCloudletStatus() == Cloudlet.SUCCESS) {
        Log.printLine(
            indent
                + "SUCCESS"
                + indent
                + indent
                + cloudlet.getResourceId()
                + indent
                + cloudlet.getVmId()
                + indent
                + dft.format(cloudlet.getActualCPUTime())
                + indent
                + dft.format(cloudlet.getExecStartTime())
                + indent
                + indent
                + dft.format(cloudlet.getFinishTime()));
      }
    }
  }
  /** Print out all the balancing metrics */
  public void printMetrics() {
    Map<Integer, ArrayList<TaskSet>> map = getCurrentTaskSetAtLevels();
    for (TaskSet set : mTask2TaskSet.values()) {
      set.setImpactFafctor(0.0);
    }

    int maxDepth = 0;
    for (Entry entry : map.entrySet()) {
      int depth = (Integer) entry.getKey();
      if (depth > maxDepth) {
        maxDepth = depth;
      }
    }
    ArrayList<TaskSet> exits = map.get(maxDepth);
    double avg = 1.0 / exits.size();
    for (TaskSet set : exits) {
      // set.setImpactFafctor(avg);
      addImpact(set, avg);
    }

    for (Entry entry : map.entrySet()) {
      int depth = (Integer) entry.getKey();
      ArrayList<TaskSet> list = (ArrayList) entry.getValue();
      /** Horizontal Runtime Variance. */
      double hrv = new HorizontalRuntimeVariance().getMetric(list);
      /** Impact Factor Variance. */
      double ifv = new ImpactFactorVariance().getMetric(list);
      /** Pipeline Runtime Variance. */
      double prv = new PipelineRuntimeVariance().getMetric(list);
      /** Distance Variance. */
      double dv = new DistanceVariance().getMetric(list);
      Log.printLine(
          "HRV "
              + depth
              + " "
              + list.size()
              + " "
              + hrv
              + "\nIFV "
              + depth
              + " "
              + list.size()
              + " "
              + ifv
              + "\nPRV "
              + depth
              + " "
              + list.size()
              + " "
              + prv
              + "\nDV "
              + depth
              + " "
              + list.size()
              + " "
              + dv);
    }
  }
 /**
  * Read the given data from the storage node and log the time it cost
  *
  * @param replica the data that wants to read
  */
 public void readData(BlockReplica replica) {
   double time = 0.0;
   int nodeNum = nodeList.size();
   for (int i = 0; i < nodeNum; i++) {
     StorageNode node = nodeList.get(i);
     if (node.contains(replica)) {
       time += replica.getSize() / node.getMaxTransferRate();
       break;
     }
   }
   Log.printLine("read replica #" + replica.getId() + " cost: " + time + " secnods");
 }
  /** Print out the clustering information. */
  private void printOut() {
    Collection sets = mTask2TaskSet.values();
    for (Iterator it = sets.iterator(); it.hasNext(); ) {
      TaskSet set = (TaskSet) it.next();
      if (!set.hasChecked) {
        set.hasChecked = true;

        Log.printLine("Job");
        for (Task task : set.getTaskList()) {
          Log.printLine(
              "Task "
                  + task.getCloudletId()
                  + " "
                  + task.getImpact()
                  + " "
                  + task.getCloudletLength());
        }
      }
    }
    // within each method
    cleanTaskSetChecked();
  }
  /**
   * Store data to StorageNode with the Node is selected based on backward-cloud generator and AHP
   */
  public void bindReplicasToStorageNode_DcSelectAHP_Net() {
    int replicaNum = replicaList.size();
    //		int numDc = datacenterList.size();
    // 这里等于先赋值为0的话,对后面选择数据中心索引有误导
    /*rankedDCindex = new int[numDc];
    for(int i=0;i<numDc;i++){
    	rankedDCindex[i] = 0;
    }*/

    int dcIndex = 0, nodeId = 0;
    for (int i = 0; i < replicaNum; i++) {
      BlockReplica replica = replicaList.get(i);

      // 采用AHP-逆向云算法生成排序好了的数据中心索引
      AHP_BackwardCloud.AHP_BackwardCloud_Init(datacenterList);
      dcIndex = dcIndex % AHP_BackwardCloud.rankedDCindex.length;
      //			int localNodeId
      // =TOPSIS_Local(datacenterList.get(AHP_BackwardCloud.rankedDCindex[dcIndex]));
      TOPSIS_Local(datacenterList.get(AHP_BackwardCloud.rankedDCindex[dcIndex]));
      nodeId = nodeId % TOPSIS.rankedTOPSISnodeIndex.length;
      int localNodeId = TOPSIS.rankedTOPSISnodeIndex[nodeId];
      double replicaSize = replica.getSize();
      StorageDatacenter dc = datacenterList.get(AHP_BackwardCloud.rankedDCindex[dcIndex]);
      List<StorageNode> localNodeLst = (List<StorageNode>) dc.nodeList;
      StorageNode node = dc.NodeList.getById(localNodeLst, localNodeId);
      if (node.getAvailableSpace() - replicaSize >= 0.000000000000000000000001
          && !node.contains(replica.getName())) {
        double time = node.addFile(replica);
        double accrossLatency = NetworkTopology.getDelay(getId(), node.getDatacenter().getId());
        time += accrossLatency;
        /*Log.printLine("it take " + time
        + " seconds to write the replica #" + replica.getId()
        + " to be stored in datacenter "
        + node.getDatacenter().getName() + " node #"
        + node.getId());*/
        Log.printLine(
            "replica #"
                + replica.getId()
                + "    			"
                + node.getDatacenter().getName()
                + " node #"
                + node.getId()
                + " 			"
                + time);
      } else {
        i--;
      }
      dcIndex++;
      nodeId++;
    }
  }
 /**
  * Process a request for the characteristics of a PowerDatacenter.
  *
  * @param ev a SimEvent object
  * @pre ev != $null
  * @post $none
  */
 @Override
 protected void processResourceCharacteristicsRequest(SimEvent ev) {
   setDatacenterCharacteristicsList(new HashMap<>());
   Log.printLine(
       CloudSim.clock()
           + ": "
           + getName()
           + ": Cloud Resource List received with "
           + getDatacenterIdsList().size()
           + " resource(s)");
   for (Integer datacenterId : getDatacenterIdsList()) {
     sendNow(datacenterId, CloudSimTags.RESOURCE_CHARACTERISTICS, getId());
   }
 }
  /** Start this entity (WorkflowScheduler) */
  @Override
  public void startEntity() {
    Log.printLine(getName() + " is starting...");
    // this resource should register to regional GIS.
    // However, if not specified, then register to system GIS (the
    // default CloudInformationService) entity.
    // int gisID = CloudSim.getEntityId(regionalCisName);
    int gisID = -1;
    if (gisID == -1) {
      gisID = CloudSim.getCloudInfoServiceEntityId();
    }

    // send the registration to GIS
    sendNow(gisID, CloudSimTags.REGISTER_RESOURCE, getId());
  }
  /**
   * Gets the clustering delay for a particular job based on the depth(level)
   *
   * @param cl, the job
   * @return the clustering delay
   * @pre $none
   * @post $none
   */
  public double getClustDelay(Cloudlet cl) {
    double delay = 0.0;
    if (this.CLUST_DELAY == null) {
      return delay;
    }
    if (cl != null) {
      Job job = (Job) cl;

      if (this.CLUST_DELAY.containsKey(job.getDepth())) {
        delay = this.CLUST_DELAY.get(job.getDepth()).getNextSample();
      } else if (this.CLUST_DELAY.containsKey(0)) {
        delay = this.CLUST_DELAY.get(0).getNextSample();
      } else {
        delay = 0.0;
      }

    } else {
      Log.printLine("Not yet supported");
    }
    return delay;
  }
  /**
   * Gets the postscript delay for a particular job based on the depth(level)
   *
   * @param cl, the job
   * @return the postscript delay
   * @pre $none
   * @post $none
   */
  public double getPostDelay(Job job) {
    double delay = 0.0;

    if (this.POST_DELAY == null) {
      return delay;
    }
    if (job != null) {

      if (this.POST_DELAY.containsKey(job.getDepth())) {
        delay = this.POST_DELAY.get(job.getDepth()).getNextSample();
      } else if (this.POST_DELAY.containsKey(0)) {
        // the default one
        delay = this.POST_DELAY.get(0).getNextSample();
      } else {
        delay = 0.0;
      }

    } else {
      Log.printLine("Not yet supported");
    }
    return delay;
  }
Ejemplo n.º 20
0
  /** Parse a DAX file with jdom */
  private void parseXmlFile(String path) {

    try {

      SAXBuilder builder = new SAXBuilder();
      // parse using builder to get DOM representation of the XML file
      Document dom = builder.build(new File(path));
      Element root = dom.getRootElement();
      List list = root.getChildren();
      for (Iterator it = list.iterator(); it.hasNext(); ) {
        Element node = (Element) it.next();
        if (node.getName().toLowerCase().equals("job")) {

          long length = 0;
          String nodeName = node.getAttributeValue("id");
          String nodeType = node.getAttributeValue("name");

          /**
           * capture runtime. If not exist, by default the runtime is 0.1. Otherwise CloudSim would
           * ignore this task. BUG/#11
           */
          double runtime = 0.1;
          if (node.getAttributeValue("runtime") != null) {
            String nodeTime = node.getAttributeValue("runtime");
            runtime = 1000 * Double.parseDouble(nodeTime);
            if (runtime < 100) {
              runtime = 100;
            }
            length = (long) runtime;
          } else {
            Log.printLine("Cannot find runtime for " + nodeName + ",set it to be 0");
          }
          // multiple the scale, by default it is 1.0
          length *= Parameters.getRuntimeScale();

          List fileList = node.getChildren();

          List mFileList = new ArrayList<org.cloudbus.cloudsim.File>();

          for (Iterator itf = fileList.iterator(); itf.hasNext(); ) {
            Element file = (Element) itf.next();
            if (file.getName().toLowerCase().equals("uses")) {
              String fileName = file.getAttributeValue("name"); // DAX version 3.3
              if (fileName == null) {
                fileName = file.getAttributeValue("file"); // DAX version 3.0
              }
              if (fileName == null) {
                Log.print("Error in parsing xml");
              }

              String inout = file.getAttributeValue("link");
              double size = 0.0;

              String fileSize = file.getAttributeValue("size");
              if (fileSize != null) {
                size = Double.parseDouble(fileSize) /*/ 1024*/;
              } else {
                Log.printLine("File Size not found for " + fileName);
              }

              /** a bug of cloudsim, size 0 causes a problem. 1 is ok. */
              if (size == 0) {
                size++;
              }
              /** Sets the file type 1 is input 2 is output */
              int type = 0;
              if (inout.equals("input")) {
                type = Parameters.FileType.INPUT.value;
              } else if (inout.equals("output")) {
                type = Parameters.FileType.OUTPUT.value;
              } else {
                Log.printLine("Parsing Error");
              }
              org.cloudbus.cloudsim.File tFile;
              /*
               * Already exists an input file (forget output file)
               */
              if (size < 0) {
                /*
                 * Assuming it is a parsing error
                 */
                size = 0 - size;
                Log.printLine("Size is negative, I assume it is a parser error");
              }
              if (type == Parameters.FileType.OUTPUT.value) {
                /** It is good that CloudSim does tell whether a size is zero */
                tFile = new org.cloudbus.cloudsim.File(fileName, (int) size);
              } else if (ReplicaCatalog.containsFile(fileName)) {
                tFile = ReplicaCatalog.getFile(fileName);
              } else {

                tFile = new org.cloudbus.cloudsim.File(fileName, (int) size);
                ReplicaCatalog.setFile(fileName, tFile);
              }

              tFile.setType(type);
              mFileList.add(tFile);
            }
          }
          Task task;
          // In case of multiple workflow submission. Make sure the jobIdStartsFrom is consistent.
          synchronized (this) {
            task = new Task(this.jobIdStartsFrom, length);
            this.jobIdStartsFrom++;
          }
          task.setType(nodeType);

          task.setUserId(userId);
          mName2Task.put(nodeName, task);

          for (Iterator itm = mFileList.iterator(); itm.hasNext(); ) {
            org.cloudbus.cloudsim.File file = (org.cloudbus.cloudsim.File) itm.next();
            task.addRequiredFile(file.getName());
          }

          task.setFileList(mFileList);
          this.getTaskList().add(task);

          /** Add dependencies info. */
        } else if (node.getName().toLowerCase().equals("child")) {
          List pList = node.getChildren();
          String childName = node.getAttributeValue("ref");
          if (mName2Task.containsKey(childName)) {

            Task childTask = (Task) mName2Task.get(childName);

            for (Iterator itc = pList.iterator(); itc.hasNext(); ) {
              Element parent = (Element) itc.next();
              String parentName = parent.getAttributeValue("ref");
              if (mName2Task.containsKey(parentName)) {
                Task parentTask = (Task) mName2Task.get(parentName);
                parentTask.addChild(childTask);
                childTask.addParent(parentTask);
              }
            }
          }
        }
      }
      /** If a task has no parent, then it is root task. */
      ArrayList roots = new ArrayList<Task>();
      for (Iterator it = mName2Task.values().iterator(); it.hasNext(); ) {
        Task task = (Task) it.next();
        task.setDepth(0);
        if (task.getParentList().isEmpty()) {
          roots.add(task);
        }
      }

      /** Add depth from top to bottom. */
      for (Iterator it = roots.iterator(); it.hasNext(); ) {
        Task task = (Task) it.next();
        setDepth(task, 1);
      }
      /** Clean them so as to save memory. Parsing workflow may take much memory */
      this.mName2Task.clear();

    } catch (JDOMException jde) {
      Log.printLine("JDOM Exception;Please make sure your dax file is valid");

    } catch (IOException ioe) {
      Log.printLine("IO Exception;Please make sure dax.path is correctly set in your config file");

    } catch (Exception e) {
      e.printStackTrace();
      Log.printLine("Parsing Exception");
    }
  }
  /** Creates main() to run this example This example has only one datacenter and one storage */
  public static void main(String[] args) {

    try {
      // First step: Initialize the WorkflowSim package.

      /**
       * However, the exact number of vms may not necessarily be vmNum If the data center or the
       * host doesn't have sufficient resources the exact vmNum would be smaller than that. Take
       * care.
       */
      int vmNum = 20; // number of vms;
      /** Should change this based on real physical path */
      String daxPath = "/Users/chenweiwei/Work/WorkflowSim-1.0/config/dax/Montage_100.xml";
      if (daxPath == null) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      File daxFile = new File(daxPath);
      if (!daxFile.exists()) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      /*
       * Use default Fault Tolerant Parameters
       */
      Parameters.FTCMonitor ftc_monitor = Parameters.FTCMonitor.MONITOR_NONE;
      Parameters.FTCFailure ftc_failure = Parameters.FTCFailure.FAILURE_NONE;
      Parameters.FTCluteringAlgorithm ftc_method = null;

      /**
       * Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID
       * such that the planner would not override the result of the scheduler
       */
      Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
      Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
      ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

      /**
       * clustering delay must be added, if you don't need it, you can set all the clustering delay
       * to be zero, but not null
       */
      Map<Integer, Double> clusteringDelay = new HashMap();
      /** Montage has at most 11 horizontal levels */
      int maxLevel = 11;
      for (int level = 0; level < maxLevel; level++) {
        clusteringDelay.put(
            level, 1.0); // the clustering delay specified to each level is 1.0 seconds
      }
      // Add clustering delay to the overhead parameters
      OverheadParameters op = new OverheadParameters(0, null, null, null, clusteringDelay, 0);
      ;

      /** Horizontal Clustering */
      ClusteringParameters.ClusteringMethod method =
          ClusteringParameters.ClusteringMethod.HORIZONTAL;
      /**
       * You can only specify clusters.num or clusters.size clusters.num is the number of clustered
       * jobs per horizontal level clusters.size is the number of tasks per clustered job
       * clusters.num * clusters.size = the number of tasks per horizontal level In this case, we
       * specify the clusters.num = 20, which means we have 20 jobs per level
       */
      ClusteringParameters cp = new ClusteringParameters(20, 0, method, null);

      /** Initialize static parameters */
      Parameters.init(
          ftc_method,
          ftc_monitor,
          ftc_failure,
          null,
          vmNum,
          daxPath,
          null,
          null,
          op,
          cp,
          sch_method,
          pln_method,
          null,
          0);
      ReplicaCatalog.init(file_system);

      FailureMonitor.init();
      FailureGenerator.init();

      // before creating any entities.
      int num_user = 1; // number of grid users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      DatacenterExtended datacenter0 = createDatacenter("Datacenter_0");

      /** Create a WorkflowPlanner with one schedulers. */
      WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
      /** Create a WorkflowEngine. */
      WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
      /**
       * Create a list of VMs.The userId of a vm is basically the id of the scheduler that controls
       * this vm.
       */
      List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

      /** Submits this list of vms to this WorkflowEngine. */
      wfEngine.submitVmList(vmlist0, 0);

      /** Binds the data centers with the scheduler. */
      wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);

      CloudSim.startSimulation();

      List<Job> outputList0 = wfEngine.getJobsReceivedList();

      CloudSim.stopSimulation();

      printJobList(outputList0);

    } catch (Exception e) {
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }
  /**
   * Prints the job objects
   *
   * @param list list of jobs
   */
  private static void printJobList(List<Job> list) {
    int size = list.size();
    Job job;

    String indent = "    ";
    Log.printLine();
    Log.printLine("========== OUTPUT ==========");
    Log.printLine(
        "Cloudlet ID"
            + indent
            + "STATUS"
            + indent
            + "Data center ID"
            + indent
            + "VM ID"
            + indent
            + indent
            + "Time"
            + indent
            + "Start Time"
            + indent
            + "Finish Time"
            + indent
            + "Depth");

    DecimalFormat dft = new DecimalFormat("###.##");
    for (int i = 0; i < size; i++) {
      job = list.get(i);
      Log.print(indent + job.getCloudletId() + indent + indent);

      if (job.getCloudletStatus() == Cloudlet.SUCCESS) {
        Log.print("SUCCESS");

        Log.printLine(
            indent
                + indent
                + job.getResourceId()
                + indent
                + indent
                + indent
                + job.getVmId()
                + indent
                + indent
                + indent
                + dft.format(job.getActualCPUTime())
                + indent
                + indent
                + dft.format(job.getExecStartTime())
                + indent
                + indent
                + indent
                + dft.format(job.getFinishTime())
                + indent
                + indent
                + indent
                + job.getDepth());
      } else if (job.getCloudletStatus() == Cloudlet.FAILED) {
        Log.print("FAILED");

        Log.printLine(
            indent
                + indent
                + job.getResourceId()
                + indent
                + indent
                + indent
                + job.getVmId()
                + indent
                + indent
                + indent
                + dft.format(job.getActualCPUTime())
                + indent
                + indent
                + dft.format(job.getExecStartTime())
                + indent
                + indent
                + indent
                + dft.format(job.getFinishTime())
                + indent
                + indent
                + indent
                + job.getDepth());
      }
    }
  }
  /**
   * Process the ack received due to a request for VM creation.
   *
   * @param ev a SimEvent object
   * @pre ev != null
   * @post $none
   */
  @Override
  protected void processVmCreate(SimEvent ev) {
    int[] data = (int[]) ev.getData();
    int datacenterId = data[0];
    int vmId = data[1];
    int result = data[2];

    if (result == CloudSimTags.TRUE) {
      getVmsToDatacentersMap().put(vmId, datacenterId);
      /** Fix a bug of cloudsim Don't add a null to getVmsCreatedList() June 15, 2013 */
      if (VmList.getById(getVmList(), vmId) != null) {
        getVmsCreatedList().add(VmList.getById(getVmList(), vmId));
        Log.printLine(
            CloudSim.clock()
                + ": "
                + getName()
                + ": VM #"
                + vmId
                + " has been created in Datacenter #"
                + datacenterId
                + ", Host #"
                + VmList.getById(getVmsCreatedList(), vmId).getHost().getId());
      }
    } else {
      Log.printLine(
          CloudSim.clock()
              + ": "
              + getName()
              + ": Creation of VM #"
              + vmId
              + " failed in Datacenter #"
              + datacenterId);
    }

    incrementVmsAcks();

    // all the requested VMs have been created
    if (getVmsCreatedList().size() == getVmList().size() - getVmsDestroyed()) {
      submitCloudlets();
    } else {
      // all the acks received, but some VMs were not created
      if (getVmsRequested() == getVmsAcks()) {
        // find id of the next datacenter that has not been tried
        for (int nextDatacenterId : getDatacenterIdsList()) {
          if (!getDatacenterRequestedIdsList().contains(nextDatacenterId)) {
            createVmsInDatacenter(nextDatacenterId);
            return;
          }
        }

        // all datacenters already queried
        if (getVmsCreatedList().size() > 0) { // if some vm were created
          submitCloudlets();
        } else { // no vms created. abort
          Log.printLine(
              CloudSim.clock()
                  + ": "
                  + getName()
                  + ": none of the required VMs could be created. Aborting");
          finishExecution();
        }
      }
    }
  }
 /** Terminate this entity (WorkflowScheduler) */
 @Override
 public void shutdownEntity() {
   clearDatacenters();
   Log.printLine(getName() + " is shutting down...");
 }
 private void printLogMsg(String msg) {
   Log.print("RR_Allocator: " + msg + "\n");
 }
Ejemplo n.º 26
0
  /**
   * Prints the results.
   *
   * @param datacenter the datacenter
   * @param lastClock the last clock
   * @param experimentName the experiment name
   * @param outputInCsv the output in csv
   * @param outputFolder the output folder
   */
  public static void printResults(
      PowerDatacenter datacenter,
      List<Vm> vms,
      double lastClock,
      String experimentName,
      boolean outputInCsv,
      String outputFolder) {
    Log.enable();
    List<Host> hosts = datacenter.getHostList();

    int numberOfHosts = hosts.size();
    int numberOfVms = vms.size();

    double totalSimulationTime = lastClock;
    double energy = datacenter.getPower() / (3600 * 1000);
    int numberOfMigrations = datacenter.getMigrationCount();

    Map<String, Double> slaMetrics = getSlaMetrics(vms);

    double slaOverall = slaMetrics.get("overall");
    double slaAverage = slaMetrics.get("average");
    double slaDegradationDueToMigration = slaMetrics.get("underallocated_migration");
    // double slaTimePerVmWithMigration = slaMetrics.get("sla_time_per_vm_with_migration");
    // double slaTimePerVmWithoutMigration =
    // slaMetrics.get("sla_time_per_vm_without_migration");
    // double slaTimePerHost = getSlaTimePerHost(hosts);
    double slaTimePerActiveHost = getSlaTimePerActiveHost(hosts);

    double sla = slaTimePerActiveHost * slaDegradationDueToMigration;

    List<Double> timeBeforeHostShutdown = getTimesBeforeHostShutdown(hosts);

    int numberOfHostShutdowns = timeBeforeHostShutdown.size();

    double meanTimeBeforeHostShutdown = Double.NaN;
    double stDevTimeBeforeHostShutdown = Double.NaN;
    if (!timeBeforeHostShutdown.isEmpty()) {
      meanTimeBeforeHostShutdown = MathUtil.mean(timeBeforeHostShutdown);
      stDevTimeBeforeHostShutdown = MathUtil.stDev(timeBeforeHostShutdown);
    }

    List<Double> timeBeforeVmMigration = getTimesBeforeVmMigration(vms);
    double meanTimeBeforeVmMigration = Double.NaN;
    double stDevTimeBeforeVmMigration = Double.NaN;
    if (!timeBeforeVmMigration.isEmpty()) {
      meanTimeBeforeVmMigration = MathUtil.mean(timeBeforeVmMigration);
      stDevTimeBeforeVmMigration = MathUtil.stDev(timeBeforeVmMigration);
    }

    if (outputInCsv) {
      File folder = new File(outputFolder);
      if (!folder.exists()) {
        folder.mkdir();
      }
      File folder1 = new File(outputFolder + "/stats");
      if (!folder1.exists()) {
        folder1.mkdir();
      }
      File folder2 = new File(outputFolder + "/time_before_host_shutdown");
      if (!folder2.exists()) {
        folder2.mkdir();
      }
      File folder3 = new File(outputFolder + "/time_before_vm_migration");
      if (!folder3.exists()) {
        folder3.mkdir();
      }
      File folder4 = new File(outputFolder + "/metrics");
      if (!folder4.exists()) {
        folder4.mkdir();
      }

      StringBuilder data = new StringBuilder();
      String delimeter = ",";

      data.append(experimentName + delimeter);
      data.append(parseExperimentName(experimentName));
      data.append(String.format("%d", numberOfHosts) + delimeter);
      data.append(String.format("%d", numberOfVms) + delimeter);
      data.append(String.format("%.2f", totalSimulationTime) + delimeter);
      data.append(String.format("%.5f", energy) + delimeter);
      data.append(String.format("%d", numberOfMigrations) + delimeter);
      data.append(String.format("%.10f", sla) + delimeter);
      data.append(String.format("%.10f", slaTimePerActiveHost) + delimeter);
      data.append(String.format("%.10f", slaDegradationDueToMigration) + delimeter);
      data.append(String.format("%.10f", slaOverall) + delimeter);
      data.append(String.format("%.10f", slaAverage) + delimeter);
      // data.append(String.format("%.5f", slaTimePerVmWithMigration) + delimeter);
      // data.append(String.format("%.5f", slaTimePerVmWithoutMigration) + delimeter);
      // data.append(String.format("%.5f", slaTimePerHost) + delimeter);
      data.append(String.format("%d", numberOfHostShutdowns) + delimeter);
      data.append(String.format("%.2f", meanTimeBeforeHostShutdown) + delimeter);
      data.append(String.format("%.2f", stDevTimeBeforeHostShutdown) + delimeter);
      data.append(String.format("%.2f", meanTimeBeforeVmMigration) + delimeter);
      data.append(String.format("%.2f", stDevTimeBeforeVmMigration) + delimeter);

      if (datacenter.getVmAllocationPolicy() instanceof PowerVmAllocationPolicyMigrationAbstract) {
        PowerVmAllocationPolicyMigrationAbstract vmAllocationPolicy =
            (PowerVmAllocationPolicyMigrationAbstract) datacenter.getVmAllocationPolicy();

        double executionTimeVmSelectionMean =
            MathUtil.mean(vmAllocationPolicy.getExecutionTimeHistoryVmSelection());
        double executionTimeVmSelectionStDev =
            MathUtil.stDev(vmAllocationPolicy.getExecutionTimeHistoryVmSelection());
        double executionTimeHostSelectionMean =
            MathUtil.mean(vmAllocationPolicy.getExecutionTimeHistoryHostSelection());
        double executionTimeHostSelectionStDev =
            MathUtil.stDev(vmAllocationPolicy.getExecutionTimeHistoryHostSelection());
        double executionTimeVmReallocationMean =
            MathUtil.mean(vmAllocationPolicy.getExecutionTimeHistoryVmReallocation());
        double executionTimeVmReallocationStDev =
            MathUtil.stDev(vmAllocationPolicy.getExecutionTimeHistoryVmReallocation());
        double executionTimeTotalMean =
            MathUtil.mean(vmAllocationPolicy.getExecutionTimeHistoryTotal());
        double executionTimeTotalStDev =
            MathUtil.stDev(vmAllocationPolicy.getExecutionTimeHistoryTotal());

        data.append(String.format("%.5f", executionTimeVmSelectionMean) + delimeter);
        data.append(String.format("%.5f", executionTimeVmSelectionStDev) + delimeter);
        data.append(String.format("%.5f", executionTimeHostSelectionMean) + delimeter);
        data.append(String.format("%.5f", executionTimeHostSelectionStDev) + delimeter);
        data.append(String.format("%.5f", executionTimeVmReallocationMean) + delimeter);
        data.append(String.format("%.5f", executionTimeVmReallocationStDev) + delimeter);
        data.append(String.format("%.5f", executionTimeTotalMean) + delimeter);
        data.append(String.format("%.5f", executionTimeTotalStDev) + delimeter);

        writeMetricHistory(
            hosts, vmAllocationPolicy, outputFolder + "/metrics/" + experimentName + "_metric");
      }

      data.append("\n");

      writeDataRow(data.toString(), outputFolder + "/stats/" + experimentName + "_stats.csv");
      writeDataColumn(
          timeBeforeHostShutdown,
          outputFolder
              + "/time_before_host_shutdown/"
              + experimentName
              + "_time_before_host_shutdown.csv");
      writeDataColumn(
          timeBeforeVmMigration,
          outputFolder
              + "/time_before_vm_migration/"
              + experimentName
              + "_time_before_vm_migration.csv");

    } else {
      Log.setDisabled(false);
      Log.printLine();
      Log.printLine(String.format("Experiment name: " + experimentName));
      Log.printLine(String.format("Number of hosts: " + numberOfHosts));
      Log.printLine(String.format("Number of VMs: " + numberOfVms));
      Log.printLine(String.format("Total simulation time: %.2f sec", totalSimulationTime));
      Log.printLine(String.format("Energy consumption: %.2f kWh", energy));
      Log.printLine(String.format("Number of VM migrations: %d", numberOfMigrations));
      Log.printLine(String.format("SLA: %.5f%%", sla * 100));
      Log.printLine(
          String.format(
              "SLA perf degradation due to migration: %.2f%%", slaDegradationDueToMigration * 100));
      Log.printLine(String.format("SLA time per active host: %.2f%%", slaTimePerActiveHost * 100));
      Log.printLine(String.format("Overall SLA violation: %.2f%%", slaOverall * 100));
      Log.printLine(String.format("Average SLA violation: %.2f%%", slaAverage * 100));
      // Log.printLine(String.format("SLA time per VM with migration: %.2f%%",
      // slaTimePerVmWithMigration * 100));
      // Log.printLine(String.format("SLA time per VM without migration: %.2f%%",
      // slaTimePerVmWithoutMigration * 100));
      // Log.printLine(String.format("SLA time per host: %.2f%%", slaTimePerHost * 100));
      Log.printLine(String.format("Number of host shutdowns: %d", numberOfHostShutdowns));
      Log.printLine(
          String.format("Mean time before a host shutdown: %.2f sec", meanTimeBeforeHostShutdown));
      Log.printLine(
          String.format(
              "StDev time before a host shutdown: %.2f sec", stDevTimeBeforeHostShutdown));
      Log.printLine(
          String.format("Mean time before a VM migration: %.2f sec", meanTimeBeforeVmMigration));
      Log.printLine(
          String.format("StDev time before a VM migration: %.2f sec", stDevTimeBeforeVmMigration));

      if (datacenter.getVmAllocationPolicy() instanceof PowerVmAllocationPolicyMigrationAbstract) {
        PowerVmAllocationPolicyMigrationAbstract vmAllocationPolicy =
            (PowerVmAllocationPolicyMigrationAbstract) datacenter.getVmAllocationPolicy();

        double executionTimeVmSelectionMean =
            MathUtil.mean(vmAllocationPolicy.getExecutionTimeHistoryVmSelection());
        double executionTimeVmSelectionStDev =
            MathUtil.stDev(vmAllocationPolicy.getExecutionTimeHistoryVmSelection());
        double executionTimeHostSelectionMean =
            MathUtil.mean(vmAllocationPolicy.getExecutionTimeHistoryHostSelection());
        double executionTimeHostSelectionStDev =
            MathUtil.stDev(vmAllocationPolicy.getExecutionTimeHistoryHostSelection());
        double executionTimeVmReallocationMean =
            MathUtil.mean(vmAllocationPolicy.getExecutionTimeHistoryVmReallocation());
        double executionTimeVmReallocationStDev =
            MathUtil.stDev(vmAllocationPolicy.getExecutionTimeHistoryVmReallocation());
        double executionTimeTotalMean =
            MathUtil.mean(vmAllocationPolicy.getExecutionTimeHistoryTotal());
        double executionTimeTotalStDev =
            MathUtil.stDev(vmAllocationPolicy.getExecutionTimeHistoryTotal());

        Log.printLine(
            String.format(
                "Execution time - VM selection mean: %.5f sec", executionTimeVmSelectionMean));
        Log.printLine(
            String.format(
                "Execution time - VM selection stDev: %.5f sec", executionTimeVmSelectionStDev));
        Log.printLine(
            String.format(
                "Execution time - host selection mean: %.5f sec", executionTimeHostSelectionMean));
        Log.printLine(
            String.format(
                "Execution time - host selection stDev: %.5f sec",
                executionTimeHostSelectionStDev));
        Log.printLine(
            String.format(
                "Execution time - VM reallocation mean: %.5f sec",
                executionTimeVmReallocationMean));
        Log.printLine(
            String.format(
                "Execution time - VM reallocation stDev: %.5f sec",
                executionTimeVmReallocationStDev));
        Log.printLine(
            String.format("Execution time - total mean: %.5f sec", executionTimeTotalMean));
        Log.printLine(
            String.format("Execution time - total stDev: %.5f sec", executionTimeTotalStDev));
      }
      Log.printLine();
    }

    Log.setDisabled(true);
  }
  @Override
  public List<Cloudlet> cloudletAssign(List<Cloudlet> cloudletList, List<Vm> vmList) {
    if (vmList != null || vmList.size() != 0) {
      List<Cloudlet> toAssignCloudletList = getToAssignCloudletList(cloudletList); // 初始化等待分配任务队列
      if (toAssignCloudletList.size() < 1) { // 没有等待分配的任务,返回空列表
        return null;
        //				System.exit(0);
      }

      int m = vmList.size(); // 虚拟机数
      int n = toAssignCloudletList.size(); // 即将分配任务列表数
      int maxCloudletsWaitingLength = vQueueSize.getMaxLength(); // 子任务队列最大长度
      List<Map<String, Integer>> vmWaitingQueueSizeList =
          initVmWaitingQueueSizeList(); // 初始化虚拟子队列队长列表

      /*
       * Log.printLine("Queue size Print Before n=" + n); for (int i = 0;i
       * < m; i++) { Log.print(vmWaitingQueueSizeList.get(i).get("size") +
       * " "); } Log.printLine("\nvirQueueSize"); for (int i = 0; i < m;
       * i++) { Log.print(virQueueSize.get(i) + " "); }
       */

      int i;
      int numFreeVm = m; // 空闲的vm数
      List<Map<String, Integer>> tmpSizeList =
          updateTmpSizeList(-1, numFreeVm, vmWaitingQueueSizeList); // 临时队列
      for (i = 0; i < n; i++) { // 分配任务到适合的虚拟机
        int index = createAction(numFreeVm, tmpSizeList);
        int mSize = tmpSizeList.get(index).get("size");
        if (mSize >= maxCloudletsWaitingLength) { // 若选择的队列满了,去掉这个队列,重新选择

          if (numFreeVm > 1) { // 如果空闲的队列数还可以减为1或以上,则更新临时队列,即抛掉已满的队列
            tmpSizeList = updateTmpSizeList(index, numFreeVm--, tmpSizeList);
            //						System.out.println(numFreeVm);
            i--;
            continue;
          } else { // 所有虚拟机的等待队列都满了
            Log.printLine("mSize=50 list(0):" + mSize);
            break;
          }

          //					//寻找最空的队列作为要分配云任务的目的vm
          //					for (int j = 0, tmp = maxCloudletsWaitingLength + 1; j < m; j++) {
          //						if (tmp > vmWaitingQueueSizeList.get(j).get("size")) {
          //							tmp = vmWaitingQueueSizeList.get(j).get("size");
          //							index = j;
          //						}
          //					}
          //					mSize = vmWaitingQueueSizeList.get(0).get("size");

          //					//非排序手法获取最空队列的mSize
          //					mSize = vmWaitingQueueSizeList.get(index).get("size");
          //					if (mSize >= maxCloudletsWaitingLength) {
          //						Log.printLine("mSize=50 list(0):" + mSize);
          //						break;
          //					}

        }

        /*
         * Log.printLine("\nLOOP I:" + i); for (int j = 0; j < m; j++) {
         * Log.print(vmWaitingQueueSizeList.get(j).get("size") + " "); }
         */

        // System.out.println("一个云任务分配Vm成功");
        //				int id = vmWaitingQueueSizeList.get(index).get("id");
        int id = tmpSizeList.get(index).get("id"); // 被选中的虚拟机的id

        if (vQueueSize.increment(id)) { // 决断是否能正确分配到被选中的虚拟机中,虚拟机的子队列队长队长加一
          tmpSizeList.get(index).put("size", ++mSize); // 更新临时虚拟机等待队列长度列表状态
          for (int j = 0; j < m; j++) { // 更新虚拟机等待队列长度列表状态
            if (vmWaitingQueueSizeList.get(j).get("id") == tmpSizeList.get(index).get("id")) {
              vmWaitingQueueSizeList.get(j).put("size", mSize);
              index = j;
              break;
            }
          }
          toAssignCloudletList.get(i).setVmId(id); // 将该任务分配给被选中的虚拟机

          updateQList(index, m, vmList, vmWaitingQueueSizeList); // 更新Q值表
          /*
           * Log.printLine("Cloudlet#" +
           * toAssignCloudletList.get(i).getCloudletId() + " vmid" +
           * toAssignCloudletList.get(i).getVmId() + "VM#" + id +
           * " size:" + vQueueSize.getQueueSize().get(id)); /* if
           * (mSize == 50) Log.printLine("size==50 Vm#" + id +
           * " Cloudlet#" +
           * toAssignCloudletList.get(i).getCloudletId() + " itsVmid "
           * + toAssignCloudletList.get(i).getVmId());
           */

          // Log.printLine("Two Sizes:"
          // + vQueueSize.getQueueSize().get(id) + " "
          // + vmWaitingQueueSizeList.get(index).get("size"));
        } else { // 被选中的虚拟机的等待队列已满
          Log.printLine(
              index
                  + "Index Assign Full Error!! Vm#"
                  + id
                  + " mSize:"
                  + mSize
                  + " vQueueSize:"
                  + vQueueSize.getQueueSize().get(id));
          System.exit(0);
        }
      }

      List<Cloudlet> assignedCloudletList =
          getAssignedCloudletList(i, toAssignCloudletList); // 获取被成功分配的任务列表

      finishAssign(toAssignCloudletList); // 结束分配

      Log.printLine(
          "Assign Finished! Left:"
              + getGlobalCloudletWaitingQueue().size()
              + " Success:"
              + assignedCloudletList.size());

      return assignedCloudletList;

    } else { // 没有可用的虚拟机
      Log.printLine("VmCloudletAssignerLearning No VM Error!!");
      return null;
    }
  }
  /** Creates main() to run this example This example has only one datacenter and one storage */
  public static void main(String[] args) {

    try {
      // First step: Initialize the WorkflowSim package.
      /**
       * However, the exact number of vms may not necessarily be vmNum If the data center or the
       * host doesn't have sufficient resources the exact vmNum would be smaller than that. Take
       * care.
       */
      int vmNum = 20; // number of vms;
      /** Should change this based on real physical path */
      String daxPath =
          "/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";
      File daxFile = new File(daxPath);
      if (!daxFile.exists()) {
        Log.printLine(
            "Warning: Please replace daxPath with the physical path in your working environment!");
        return;
      }
      /*
       *  Fault Tolerant Parameters
       */
      /**
       * MONITOR_JOB classifies failures based on the level of jobs; MONITOR_VM classifies failures
       * based on the vm id; MOINTOR_ALL does not do any classification; MONITOR_NONE does not
       * record any failiure.
       */
      FailureParameters.FTCMonitor ftc_monitor = FailureParameters.FTCMonitor.MONITOR_JOB;
      /** Similar to FTCMonitor, FTCFailure controls the way how we generate failures. */
      FailureParameters.FTCFailure ftc_failure = FailureParameters.FTCFailure.FAILURE_JOB;
      /** In this example, we have horizontal clustering and we use Dynamic Reclustering. */
      FailureParameters.FTCluteringAlgorithm ftc_method =
          FailureParameters.FTCluteringAlgorithm.FTCLUSTERING_DR;
      /** Task failure rate for each level */
      int maxLevel = 11; // most workflows we use has a maximum of 11 levels

      DistributionGenerator[][] failureGenerators = new DistributionGenerator[vmNum][maxLevel];

      for (int level = 0; level < maxLevel; level++) {
        /*
         * For simplicity, set the task failure rate of each level to be 0.1. Which means 10%
         * of submitted tasks will fail. It doesn't have to be the same task
         * failure rate at each level.
         */
        DistributionGenerator generator =
            new DistributionGenerator(
                DistributionGenerator.DistributionFamily.WEIBULL, 100, 1.0, 30, 300, 0.78);
        for (int vmId = 0; vmId < vmNum; vmId++) {
          failureGenerators[vmId][level] = generator;
        }
      }
      /**
       * Since we are using MINMIN scheduling algorithm, the planning algorithm should be INVALID
       * such that the planner would not override the result of the scheduler
       */
      Parameters.SchedulingAlgorithm sch_method = Parameters.SchedulingAlgorithm.MINMIN;
      Parameters.PlanningAlgorithm pln_method = Parameters.PlanningAlgorithm.INVALID;
      ReplicaCatalog.FileSystem file_system = ReplicaCatalog.FileSystem.SHARED;

      /** No overheads */
      OverheadParameters op = new OverheadParameters(0, null, null, null, null, 0);

      /** No Clustering */
      ClusteringParameters.ClusteringMethod method = ClusteringParameters.ClusteringMethod.NONE;
      ClusteringParameters cp = new ClusteringParameters(0, 0, method, null);

      /** Initialize static parameters */
      FailureParameters.init(ftc_method, ftc_monitor, ftc_failure, failureGenerators);
      Parameters.init(vmNum, daxPath, null, null, op, cp, sch_method, pln_method, null, 0);
      ReplicaCatalog.init(file_system);

      FailureMonitor.init();
      FailureGenerator.init();

      // before creating any entities.
      int num_user = 1; // number of grid users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      WorkflowDatacenter datacenter0 = createDatacenter("Datacenter_0");

      /** Create a WorkflowPlanner with one schedulers. */
      WorkflowPlanner wfPlanner = new WorkflowPlanner("planner_0", 1);
      /** Create a WorkflowEngine. */
      WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();
      /**
       * Create a list of VMs.The userId of a vm is basically the id of the scheduler that controls
       * this vm.
       */
      List<CondorVM> vmlist0 = createVM(wfEngine.getSchedulerId(0), Parameters.getVmNum());

      /** Submits this list of vms to this WorkflowEngine. */
      wfEngine.submitVmList(vmlist0, 0);

      /** Binds the data centers with the scheduler. */
      wfEngine.bindSchedulerDatacenter(datacenter0.getId(), 0);

      CloudSim.startSimulation();
      List<Job> outputList0 = wfEngine.getJobsReceivedList();
      CloudSim.stopSimulation();
      printJobList(outputList0);
    } catch (Exception e) {
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }
Ejemplo n.º 29
0
  public static void main(String[] args) {
    for (int ca = 0; ca < SchedulingConstants.NUMBER_OF_CASE; ca++) {
      int num_all_cloudlets = SchedulingConstants.NUMBER_OF_CLOUDLETS + ca * 20;

      SchedulingHelper.initOutput(
          SchedulingConstants.our_log_file + num_all_cloudlets,
          SchedulingConstants.our_result_file + num_all_cloudlets,
          SchedulingConstants.our_result_temp_file + num_all_cloudlets);
      OutputStream result_output =
          SchedulingHelper.getOutputStream(SchedulingConstants.our_result_file + num_all_cloudlets);
      OutputStream mediate_result_output =
          SchedulingHelper.getOutputStream(
              SchedulingConstants.our_result_temp_file + num_all_cloudlets);
      OutputStream originOutput = Log.getOutput();

      AllocationMapping mapping = new AllocationMapping(num_all_cloudlets);

      List<Vm> vmList = new ArrayList<Vm>();
      List<Cloudlet> cloudletList = new ArrayList<Cloudlet>();
      List<PowerHost> hostList = new ArrayList<PowerHost>();
      SchedulingHelper.simulation(
          cloudletList,
          hostList,
          vmList,
          mapping,
          SchedulingConstants.our_initial_vmAllocationPolicy);
      SchedulingHelper.outputResultToResultFile("Initial", originOutput, result_output, mapping, 1);

      int iCnt = 1;
      SchedulingHost[] hosts = null;
      List<Cloudlet> tempCloudlets = null;
      while (iCnt <= 1) {
        Log.printLine(iCnt + "th Iteration:");
        SchedulingHelper.outputToResultFile(
            originOutput,
            mediate_result_output,
            "########################" + iCnt + "th Iteration:########################");
        hosts = new SchedulingHost[SchedulingConstants.NUMBER_OF_HOSTS];
        for (int i = 0; i < SchedulingConstants.NUMBER_OF_HOSTS; i++) {
          hosts[i] = new SchedulingHost(HostList.getById(hostList, i));
        }
        tempCloudlets = SchedulingHelper.getCopyOfCloudlets(cloudletList);
        SchedulingHelper.getOrderedCloudletOnSchedulingHost(mapping, hosts, tempCloudlets);
        int num[] = SchedulingHelper.getRandomPermitation(SchedulingConstants.NUMBER_OF_HOSTS);
        for (int i = 0; i < SchedulingConstants.NUMBER_OF_HOSTS; ) {
          new LocalExchangeProcessor(vmList, mapping)
              .doExchange(hosts[num[i++]], hosts[num[i++]], mediate_result_output, originOutput);
        }
        tempCloudlets = SchedulingHelper.getCopyOfCloudlets(cloudletList);
        SchedulingHelper.simulation(
            tempCloudlets,
            hostList,
            vmList,
            mapping,
            SchedulingConstants.our_normal_vmAllocationPolicy);
        SchedulingHelper.outputResultToResultFile(
            iCnt++ + "th iteration ", originOutput, result_output, mapping, 2);
      }
      tempCloudlets = SchedulingHelper.getCopyOfCloudlets(cloudletList);
      SchedulingHelper.simulation(
          tempCloudlets,
          hostList,
          vmList,
          mapping,
          SchedulingConstants.our_normal_vmAllocationPolicy);
      SchedulingHelper.outputResultToResultFile("Final", originOutput, result_output, mapping, 1);
    }
  }
Ejemplo n.º 30
0
  /** Creates main() to run this example */
  public static void main(String[] args) {

    Log.printLine("Starting CloudSimExample2...");

    try {
      // First step: Initialize the CloudSim package. It should be called
      // before creating any entities.
      int num_user = 8; // number of cloud users
      Calendar calendar = Calendar.getInstance();
      boolean trace_flag = false; // mean trace events

      // Initialize the CloudSim library
      CloudSim.init(num_user, calendar, trace_flag);

      // Second step: Create Datacenters
      // Datacenters are the resource providers in CloudSim. We need at list one of them to run a
      // CloudSim simulation
      Datacenter datacenter0 = createDatacenter("Datacenter_0");
      Datacenter datacenter1 = createDatacenter("Datacenter_1");
      Datacenter datacenter2 = createDatacenter("Datacenter_2");
      Datacenter datacenter3 = createDatacenter("Datacenter_3");

      // Third step: Create Broker
      DatacenterBroker broker = createBroker();
      int brokerId = broker.getId();

      // Fourth step: Create one virtual machine
      vmlist = new ArrayList<Vm>();

      // VM description
      int vmid = 0;
      int mips = 250;
      double costpersec = 20.0;
      long size = 10000; // image size (MB)
      int ram = 512; // vm memory (MB)
      long bw = 1000;
      int pesNumber = 1; // number of cpus
      String vmm = "Xen"; // VMM name

      // create two VMs
      Vm vm1 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      costpersec = 28.0;
      mips = 450;
      ram = 768;
      bw = 512;
      vmid++;
      Vm vm2 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      costpersec = 25.0;
      mips = 580;
      ram = 256;
      bw = 768;
      vmid++;
      Vm vm3 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      costpersec = 30.0;
      mips = 300;
      ram = 768;
      bw = 512;
      vmid++;
      Vm vm4 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      costpersec = 22.0;
      mips = 280;
      ram = 1000;
      bw = 128;
      vmid++;
      Vm vm5 =
          new Vm(
              vmid,
              brokerId,
              mips,
              costpersec,
              pesNumber,
              ram,
              bw,
              size,
              vmm,
              new CloudletSchedulerTimeShared());
      vmid++;
      costpersec = 50.0;
      mips = 220;
      ram = 128;
      bw = 128;

      //                        Vm vm6 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw,
      // size, vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	costpersec=22.0;
      //                        mips =280;
      //                        ram = 2048;
      //                        bw = 64;
      //                        Vm vm7 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw,
      // size, vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm8 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm9 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm10 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm11 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm12 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm13 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm14 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm15 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());
      //                        vmid++;
      //	            	Vm vm16 = new Vm(vmid, brokerId, mips,costpersec, pesNumber, ram, bw, size,
      // vmm, new CloudletSchedulerTimeShared());

      // add the VMs to the vmList
      vmlist.add(vm1);
      vmlist.add(vm2);
      vmlist.add(vm3);
      vmlist.add(vm4);
      vmlist.add(vm5);

      //                        vmlist.add(vm6);
      //                        vmlist.add(vm7);
      //                        vmlist.add(vm9);
      //                        vmlist.add(vm10);
      //                        vmlist.add(vm11);
      //                        vmlist.add(vm12);
      //                        vmlist.add(vm13);
      //                        vmlist.add(vm14);
      //                        vmlist.add(vm15);
      //                        vmlist.add(vm16);

      // submit vm list to the broker
      broker.submitVmList(vmlist);

      // Fifth step: Create two Cloudlets
      cloudletList = new ArrayList<Cloudlet>();

      // Cloudlet properties
      int id = 0;
      pesNumber = 1;
      long length = 2500;
      long fileSize = 300;
      long outputSize = 450;
      UtilizationModel utilizationModel = new UtilizationModelFull();
      double wt = 1.0;
      double wm = 0;

      Cloudlet cloudlet1 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet1.setUserId(brokerId);

      id++;
      pesNumber = 1;
      length = 2800;
      fileSize = 600;
      outputSize = 600;
      Cloudlet cloudlet2 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet2.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 1250;
      fileSize = 800;
      outputSize = 800;
      Cloudlet cloudlet3 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet3.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 2480;
      fileSize = 300;
      outputSize = 300;
      Cloudlet cloudlet4 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet4.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 7000;
      fileSize = 500;
      outputSize = 500;
      Cloudlet cloudlet5 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet5.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 1500;
      fileSize = 500;
      outputSize = 500;
      Cloudlet cloudlet6 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet6.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 800;
      fileSize = 500;
      outputSize = 500;
      Cloudlet cloudlet7 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet7.setUserId(brokerId);
      id++;
      pesNumber = 1;
      length = 7500;
      fileSize = 500;
      outputSize = 500;
      Cloudlet cloudlet8 =
          new Cloudlet(
              wt,
              wm,
              id,
              length,
              pesNumber,
              fileSize,
              outputSize,
              utilizationModel,
              utilizationModel,
              utilizationModel);
      cloudlet8.setUserId(brokerId);

      // add the cloudlets to the list
      cloudletList.add(cloudlet1);
      cloudletList.add(cloudlet2);
      cloudletList.add(cloudlet3);
      cloudletList.add(cloudlet4);
      cloudletList.add(cloudlet5);

      cloudletList.add(cloudlet6);
      cloudletList.add(cloudlet7);
      cloudletList.add(cloudlet8);

      // submit cloudlet list to the broker
      broker.submitCloudletList(cloudletList);

      // bind the cloudlets to the vms. This way, the broker
      // will submit the bound cloudlets only to the specific VM

      // broker.bindCloudletToVm(cloudlet1.getCloudletId(),vm1.getId());
      // broker.bindCloudletToVm(cloudlet2.getCloudletId(),vm2.getId());

      // Sixth step: Starts the simulation
      CloudSim.startSimulation();

      // Final step: Print results when simulation is over
      List<Cloudlet> newList = broker.getCloudletReceivedList();

      CloudSim.stopSimulation();

      printCloudletList(newList);

      // Print the debt of each user to each datacenter
      // datacenter0.printDebts();

      Log.printLine("CloudSimExample2 finished!");
    } catch (Exception e) {
      e.printStackTrace();
      Log.printLine("The simulation has been terminated due to an unexpected error");
    }
  }