Esempio n. 1
0
  /**
   * Construct a list of VMs that must be executed
   *
   * @param hosts
   * @return
   */
  private ArrayList<VMAllocation> buildVmList(ArrayList<Host> hosts) {
    ArrayList<VMAllocation> vmList = new ArrayList<VMAllocation>();

    for (Host host : hosts) {
      vmList.addAll(host.getVMAllocations());
    }
    Collections.sort(vmList, new VmExecutionOrderComparator());

    return vmList;
  }
Esempio n. 2
0
  @Override
  public void execute() {

    double util = 0;
    double power = 0;
    double prevSlavWork;
    double prevWork;

    // store current work and SLA violated work values
    prevSlavWork = totalSlavWork;
    prevWork = totalWork;

    // reset total work values
    totalSlavWork = 0;
    totalWork = 0;

    totalPower = 0;

    for (Host host : dc.getHosts()) {

      // store host CPU utilization
      if (!hostUtil.containsKey(host)) {
        hostUtil.put(host, new DescriptiveStatistics());
      }

      hostUtil.get(host).addValue(host.getCpuManager().getCpuInUse());

      util += host.getCpuManager().getCpuInUse();

      // get VM SLA values
      for (VMAllocation vmAlloc : host.getVMAllocations()) {
        totalSlavWork += vmAlloc.getVm().getApplication().getTotalSLAViolatedWork();
        totalWork +=
            vmAlloc
                .getVm()
                .getApplication()
                .getTotalIncomingWork(); // NOTE: This ONLY works with SINGLE TIERED applications.
        // For multi-tiered applications, this will count incoming
        // work multiple times!!
      }

      // get power consumption
      power += host.getCurrentPowerConsumption();
      totalPower += host.getPowerConsumed();
    }

    dcUtil.addValue(util);

    dcPower.addValue(power);
    dcPowerEfficiency.addValue(util / power);
    double optimalPowerConsumption = calculateOptimalPowerConsumption(util);
    dcOptimalPower.addValue(optimalPowerConsumption);
    dcOptimalPowerEfficiency.addValue(util / optimalPowerConsumption);

    dcOptimalPowerRatio.addValue((util / optimalPowerConsumption) / (util / power));

    // records the total fraction of SLA violated incoming work since the last time interval
    dcSla.addValue((totalSlavWork - prevSlavWork) / (totalWork - prevWork));
  }
Esempio n. 3
0
  @Override
  public void setup(Simulation simulation) {

    DataCentre dc = new DataCentre(simulation);

    int aantalHosts = 4;

    int hostPrivCpu = 500; // Cynric - waarop gebaseerd?
    int hostPrivBW = 1280; // Cynric - idem?

    int aantalVMs = 8; // Cynric - waarop gebaseerd?
    int VMcores = 1; // Cynric - VM 16 cores? nee toch, de host/node zelf?
    int VMcpu = 2400; // Cynric - idem?
    int VMram = 512; // Cynric - idem?

    int VMbw = 12800 / (16 / VMcores); // Cynric - wat is het verschil met hostPrivBW?
    int VMstorage = 2048;
    double VMtime = 0.01; // Cynric - waarop gebaseerd?
    // double utilScale=0.98;  // Cynric - opnieuw, waarop gebaseerd?

    // double[] cpuUtilization={0.5,0.9,0.85};		// lower, upper, and target CPU utilization in de
    // hosts
    // Cynric - opnieuw, waarop gebaseerd?

    // int relocPolicy=30; 	//om de hoeveel minuten de relocation policy wordt uitgevoerd
    // int consolPolicy=60; 	//om de hoeveel minutes de consolidation policy wordt uitgevoerd
    // Cynric - maakt niet veel uit voor ons

    // Add the DataCentre to the simulation
    simulation.addDatacentre(dc);

    // Create the HostPoolManager capability separately, as we need to reference it later to add
    // hosts
    HostPoolManager hostPool = new HostPoolManager();

    // Create a new AutonomicManager with this capability
    AutonomicManager dcAM = new AutonomicManager(simulation, hostPool);

    // Install the HostStatusPolicy and VmPlacementPolicy
    dcAM.installPolicy(new HostStatusPolicy(5));

    // hier kiezen welke placement policy
    dcAM.installPolicy(new DefaultVmPlacementPolicy());
    // dcAM.installPolicy(new VWallPlacementPolicy());

    // Create hosts
    Host.Builder hostBuilder =
        HostModels.VirtualWallHost(simulation)
            .privCpu(hostPrivCpu)
            .privBandwidth(hostPrivBW)
            .resourceManagerFactory(new DefaultResourceManagerFactory())
            .resourceSchedulerFactory(new DefaultResourceSchedulerFactory());

    // Instantiate the Hosts
    ArrayList<Host> hosts = new ArrayList<Host>();
    for (int i = 0; i < aantalHosts; ++i) {
      Host host = hostBuilder.build();

      // Create an AutonomicManager for the Host, with the HostManager capability (provides access
      // to the host being managed)
      AutonomicManager hostAM = new AutonomicManager(simulation, new HostManager(host));

      // Install a HostMonitoringPolicy, which sends status updates to the datacentre manager, set
      // to execute every 5 minutes
      // Cynric - waarop zijn deze vijf minuten gebaseerd?
      hostAM.installPolicy(new HostMonitoringPolicy(dcAM), SimTime.minutes(5), 0);

      // Install a HostOperationsPolicy, which handles basic host operations
      hostAM.installPolicy(new HostOperationsPolicy());

      // Optionally, we can "install" the manager into the Host. This ensures that the manager does
      // not run when the host is
      // not 'ON', and triggers hooks in the manager and policies on power on and off.
      host.installAutonomicManager(hostAM);

      // Add the Host to the DataCentre
      dc.addHost(host);

      // Add the Host to the HostPoolManager capability of our datacentre AutonomicManager
      hostPool.addHost(host, hostAM);

      hosts.add(host);
    }

    // Create applications
    ArrayList<VmAllocationRequest> vmRequests = new ArrayList<VmAllocationRequest>();

    for (int i = 0; i < aantalVMs; ++i) {
      // We gebruiken geen trace aangezien we er geen geschikte hebben, we gebruik een static
      // workload.
      StaticWorkload workload = new StaticWorkload(simulation);
      workload.setWorkLevel(100); // anders had ik geen throughput?

      InteractiveApplication app =
          Applications.singleTaskInteractiveApplication(
              simulation, workload, VMcores, VMcpu, VMram, VMbw, VMstorage, VMtime);

      InteractiveServiceLevelAgreement sla =
          new InteractiveServiceLevelAgreement(app)
              .responseTime(1, 1); // sla limit at 1s response time
      app.setSla(sla);

      // place applications
      vmRequests.addAll(app.createInitialVmRequests());
    }

    VmPlacementEvent vmPlacementEvent = new VmPlacementEvent(dcAM, vmRequests);
    simulation.sendEvent(vmPlacementEvent, 0);
  }
Esempio n. 4
0
  /**
   * Execute the VMs in the simulation
   *
   * @param hosts
   */
  public void execute(ArrayList<Host> hosts) {

    // retrieve ordered list of vm allocations
    ArrayList<VMAllocation> vmList = buildVmList(hosts);

    // calculate the resources each VM has available
    for (VMAllocation vmAllocation : vmList) {
      if (vmAllocation.getVm() != null) vmAllocation.getVm().prepareExecution();
    }

    // prepare hosts for VM execution
    for (Host host : hosts) {
      // if the Host is ON
      if (host.getState() == Host.HostState.ON) {
        // allow the Host's CPU Scheduler to prepare for scheduling
        host.getCpuScheduler().prepareScheduler();
        host.getCpuScheduler().beginScheduling();

        // prepare the privileged domain VM for execution
        host.getPrivDomainAllocation().getVm().prepareExecution();

        // instruct the Host's CPU Scheduler to run the priviledged domain VM
        host.getCpuScheduler().schedulePrivDomain(host.getPrivDomainAllocation());
      }
    }

    HashSet<VMAllocation> completedVms =
        new HashSet<VMAllocation>(); // set of VMs that have completed execution
    boolean notDone = true; // true while execution is not complete

    // execute VMs in rounds until complete. In each round, every VM has a chance to execute
    do {
      notDone = false; // start by assuming done

      // instruct Host CPU Schedulers to begin a round of scheduling
      for (Host host : hosts) {
        if (host.getState() == Host.HostState.ON) host.getCpuScheduler().beginRound();
      }

      // execute VMs
      for (VMAllocation vmAllocation : vmList) {
        // if the VM has not be executed, the VM Allocation actually contains a VM, and the host is
        // ON
        if (!completedVms.contains(vmAllocation)
            && vmAllocation.getVm() != null
            && vmAllocation.getHost().getState()
                == Host.HostState.ON) { // ensure that a VM is instantiated within the allocation
          // if the CPU scheduler has not indicated that is is COMPLETE (i.e. out of resources)
          if (vmAllocation.getHost().getCpuScheduler().getState()
              != CpuScheduler.CpuSchedulerState.COMPLETE) {
            // run the VM
            if (vmAllocation.getHost().getCpuScheduler().processVM(vmAllocation)) {
              // returned true = VM is not finished executing (still has work to complete)
              notDone = true; // not done yet
            } else {
              // returned false = VM is done (no more work to complete)
              completedVms.add(vmAllocation);
            }
          }
        }
      }

      // instruct Host CPU Schedulers that the round has completed
      for (Host host : hosts) {
        if (host.getState() == Host.HostState.ON) host.getCpuScheduler().endRound();
      }

    } while (notDone); // if not done, execute another round

    // instruct Host CPU Schedulers that scheduling is complete
    for (Host host : hosts) {
      if (host.getState() == Host.HostState.ON) {
        host.getCpuScheduler().endScheduling();

        host.getPrivDomainAllocation().getVm().completeExecution();
      }
    }

    // update the resourcesInUse for each VM
    for (VMAllocation vmAllocation : vmList) {
      if (vmAllocation.getVm() != null) {
        vmAllocation.getVm().completeExecution();
      }
    }
  }