protected HashMap<String, Object> buildConfigParams(HostVO host) {
    HashMap<String, Object> params = new HashMap<String, Object>(host.getDetails().size() + 5);
    params.putAll(host.getDetails());

    params.put("guid", host.getGuid());
    params.put("zone", Long.toString(host.getDataCenterId()));
    if (host.getPodId() != null) {
      params.put("pod", Long.toString(host.getPodId()));
    }
    if (host.getClusterId() != null) {
      params.put("cluster", Long.toString(host.getClusterId()));
      String guid = null;
      ClusterVO cluster = _clusterDao.findById(host.getClusterId());
      if (cluster.getGuid() == null) {
        guid = host.getDetail("pool");
      } else {
        guid = cluster.getGuid();
      }
      if (guid != null && !guid.isEmpty()) {
        params.put("pool", guid);
      }
    }

    params.put("ipaddress", host.getPrivateIpAddress());
    params.put("secondary.storage.vm", "false");
    params.put(
        "max.template.iso.size", _configDao.getValue(Config.MaxTemplateAndIsoSize.toString()));
    params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString()));
    return params;
  }
  @Override
  public void scheduleRestartForVmsOnHost(final HostVO host) {

    if (host.getType() != Host.Type.Routing) {
      return;
    }
    s_logger.warn("Scheduling restart for VMs on host " + host.getId());

    final List<VMInstanceVO> vms = _instanceDao.listByHostId(host.getId());
    final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());

    // send an email alert that the host is down
    StringBuilder sb = null;
    if ((vms != null) && !vms.isEmpty()) {
      sb = new StringBuilder();
      sb.append("  Starting HA on the following VMs: ");
      // collect list of vm names for the alert email
      VMInstanceVO vm = vms.get(0);
      if (vm.isHaEnabled()) {
        sb.append(" " + vm.getName());
      }
      for (int i = 1; i < vms.size(); i++) {
        vm = vms.get(i);
        if (vm.isHaEnabled()) {
          sb.append(" " + vm.getName());
        }
      }
    }

    // send an email alert that the host is down, include VMs
    HostPodVO podVO = _podDao.findById(host.getPodId());
    String hostDesc =
        "name: "
            + host.getName()
            + " (id:"
            + host.getId()
            + "), availability zone: "
            + dcVO.getName()
            + ", pod: "
            + podVO.getName();

    _alertMgr.sendAlert(
        AlertManager.ALERT_TYPE_HOST,
        host.getDataCenterId(),
        host.getPodId(),
        "Host is down, " + hostDesc,
        "Host [" + hostDesc + "] is down." + ((sb != null) ? sb.toString() : ""));

    for (final VMInstanceVO vm : vms) {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug("Notifying HA Mgr of to investigate vm " + vm.getId() + "-" + vm.getName());
      }
      scheduleRestart(vm, true);
    }
  }
  @Override
  public Status isAgentAlive(HostVO agent) {
    if (s_logger.isDebugEnabled()) {
      s_logger.debug("checking if agent (" + agent.getId() + ") is alive");
    }

    if (agent.getPodId() == null) {
      return null;
    }

    List<Long> otherHosts = findHostByPod(agent.getPodId(), agent.getId());

    for (Long hostId : otherHosts) {

      if (s_logger.isDebugEnabled()) {
        s_logger.debug(
            "sending ping from ("
                + hostId
                + ") to agent's host ip address ("
                + agent.getPrivateIpAddress()
                + ")");
      }
      Status hostState = testIpAddress(hostId, agent.getPrivateIpAddress());
      if (hostState == null) {
        continue;
      }
      if (hostState == Status.Up) {
        if (s_logger.isDebugEnabled()) {
          s_logger.debug(
              "ping from ("
                  + hostId
                  + ") to agent's host ip address ("
                  + agent.getPrivateIpAddress()
                  + ") successful, returning that agent is disconnected");
        }
        return Status
            .Disconnected; // the computing host ip is ping-able, but the computing agent is down,
                           // report that the agent is disconnected
      } else if (hostState == Status.Down) {
        if (s_logger.isDebugEnabled()) {
          s_logger.debug("returning host state: " + hostState);
        }
        return hostState;
      }
    }

    // could not reach agent, could not reach agent's host, unclear what the problem is but it'll
    // require more investigation...
    if (s_logger.isDebugEnabled()) {
      s_logger.debug(
          "could not reach agent, could not reach agent's host, returning that we don't have enough information");
    }
    return null;
  }
  // TODO: add test for method
  @Override
  public final HostVO createHostVOForDirectConnectAgent(
      final HostVO host,
      final StartupCommand[] startup,
      final ServerResource resource,
      final Map<String, String> details,
      final List<String> hostTags) {
    StartupCommand firstCmd = startup[0];
    if (!(firstCmd instanceof StartupRoutingCommand)) {
      return null;
    }

    StartupRoutingCommand ssCmd = ((StartupRoutingCommand) firstCmd);
    if (ssCmd.getHypervisorType() != HypervisorType.Hyperv) {
      return null;
    }

    s_logger.info(
        "Host: "
            + host.getName()
            + " connected with hypervisor type: "
            + HypervisorType.Hyperv
            + ". Checking CIDR...");

    HostPodVO pod = _podDao.findById(host.getPodId());
    DataCenterVO dc = _dcDao.findById(host.getDataCenterId());

    _resourceMgr.checkCIDR(pod, dc, ssCmd.getPrivateIpAddress(), ssCmd.getPrivateNetmask());

    return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.Hyperv, details, hostTags);
  }
  // TODO: Get rid of this case once we've determined that the capacity listeners above have all the
  // changes
  // create capacity entries if none exist for this server
  private void createCapacityEntry(StartupCommand startup, HostVO server) {
    SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
    capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId());
    capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId());
    capacitySC.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId());

    if (startup instanceof StartupRoutingCommand) {
      SearchCriteria<CapacityVO> capacityCPU = _capacityDao.createSearchCriteria();
      capacityCPU.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId());
      capacityCPU.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId());
      capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId());
      capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_CPU);
      List<CapacityVO> capacityVOCpus = _capacityDao.search(capacitySC, null);
      Float cpuovercommitratio =
          Float.parseFloat(
              _clusterDetailsDao
                  .findDetail(server.getClusterId(), "cpuOvercommitRatio")
                  .getValue());
      Float memoryOvercommitRatio =
          Float.parseFloat(
              _clusterDetailsDao
                  .findDetail(server.getClusterId(), "memoryOvercommitRatio")
                  .getValue());

      if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) {
        CapacityVO CapacityVOCpu = capacityVOCpus.get(0);
        long newTotalCpu =
            (long)
                (server.getCpus().longValue() * server.getSpeed().longValue() * cpuovercommitratio);
        if ((CapacityVOCpu.getTotalCapacity() <= newTotalCpu)
            || ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity())
                <= newTotalCpu)) {
          CapacityVOCpu.setTotalCapacity(newTotalCpu);
        } else if ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity()
                > newTotalCpu)
            && (CapacityVOCpu.getUsedCapacity() < newTotalCpu)) {
          CapacityVOCpu.setReservedCapacity(0);
          CapacityVOCpu.setTotalCapacity(newTotalCpu);
        } else {
          s_logger.debug(
              "What? new cpu is :"
                  + newTotalCpu
                  + ", old one is "
                  + CapacityVOCpu.getUsedCapacity()
                  + ","
                  + CapacityVOCpu.getReservedCapacity()
                  + ","
                  + CapacityVOCpu.getTotalCapacity());
        }
        _capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu);
      } else {
        CapacityVO capacity =
            new CapacityVO(
                server.getId(),
                server.getDataCenterId(),
                server.getPodId(),
                server.getClusterId(),
                0L,
                (long) (server.getCpus().longValue() * server.getSpeed().longValue()),
                CapacityVO.CAPACITY_TYPE_CPU);
        _capacityDao.persist(capacity);
      }

      SearchCriteria<CapacityVO> capacityMem = _capacityDao.createSearchCriteria();
      capacityMem.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId());
      capacityMem.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId());
      capacityMem.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId());
      capacityMem.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_MEMORY);
      List<CapacityVO> capacityVOMems = _capacityDao.search(capacityMem, null);

      if (capacityVOMems != null && !capacityVOMems.isEmpty()) {
        CapacityVO CapacityVOMem = capacityVOMems.get(0);
        long newTotalMem = (long) ((server.getTotalMemory()) * memoryOvercommitRatio);
        if (CapacityVOMem.getTotalCapacity() <= newTotalMem
            || (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity()
                <= newTotalMem)) {
          CapacityVOMem.setTotalCapacity(newTotalMem);
        } else if (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity()
                > newTotalMem
            && CapacityVOMem.getUsedCapacity() < newTotalMem) {
          CapacityVOMem.setReservedCapacity(0);
          CapacityVOMem.setTotalCapacity(newTotalMem);
        } else {
          s_logger.debug(
              "What? new cpu is :"
                  + newTotalMem
                  + ", old one is "
                  + CapacityVOMem.getUsedCapacity()
                  + ","
                  + CapacityVOMem.getReservedCapacity()
                  + ","
                  + CapacityVOMem.getTotalCapacity());
        }
        _capacityDao.update(CapacityVOMem.getId(), CapacityVOMem);
      } else {
        CapacityVO capacity =
            new CapacityVO(
                server.getId(),
                server.getDataCenterId(),
                server.getPodId(),
                server.getClusterId(),
                0L,
                server.getTotalMemory(),
                CapacityVO.CAPACITY_TYPE_MEMORY);
        _capacityDao.persist(capacity);
      }
    }
  }
  @DB
  @Override
  public void updateCapacityForHost(HostVO host) {
    // prepare the service offerings
    List<ServiceOfferingVO> offerings = _offeringsDao.listAllIncludingRemoved();
    Map<Long, ServiceOfferingVO> offeringsMap = new HashMap<Long, ServiceOfferingVO>();
    for (ServiceOfferingVO offering : offerings) {
      offeringsMap.put(offering.getId(), offering);
    }

    long usedCpu = 0;
    long usedMemory = 0;
    long reservedMemory = 0;
    long reservedCpu = 0;

    List<VMInstanceVO> vms = _vmDao.listUpByHostId(host.getId());
    if (s_logger.isDebugEnabled()) {
      s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId());
    }

    for (VMInstanceVO vm : vms) {
      ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
      usedMemory += so.getRamSize() * 1024L * 1024L;
      usedCpu += so.getCpu() * so.getSpeed();
    }

    List<VMInstanceVO> vmsByLastHostId = _vmDao.listByLastHostId(host.getId());
    if (s_logger.isDebugEnabled()) {
      s_logger.debug(
          "Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId());
    }
    for (VMInstanceVO vm : vmsByLastHostId) {
      long secondsSinceLastUpdate =
          (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000;
      if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) {
        ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
        reservedMemory += so.getRamSize() * 1024L * 1024L;
        reservedCpu += so.getCpu() * so.getSpeed();
      }
    }

    CapacityVO cpuCap = _capacityDao.findByHostIdType(host.getId(), CapacityVO.CAPACITY_TYPE_CPU);
    CapacityVO memCap =
        _capacityDao.findByHostIdType(host.getId(), CapacityVO.CAPACITY_TYPE_MEMORY);

    if (cpuCap != null && memCap != null) {
      if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) {
        s_logger.debug(
            "No need to calibrate cpu capacity, host:"
                + host.getId()
                + " usedCpu: "
                + cpuCap.getUsedCapacity()
                + " reservedCpu: "
                + cpuCap.getReservedCapacity());
      } else if (cpuCap.getReservedCapacity() != reservedCpu) {
        s_logger.debug(
            "Calibrate reserved cpu for host: "
                + host.getId()
                + " old reservedCpu:"
                + cpuCap.getReservedCapacity()
                + " new reservedCpu:"
                + reservedCpu);
        cpuCap.setReservedCapacity(reservedCpu);
      } else if (cpuCap.getUsedCapacity() != usedCpu) {
        s_logger.debug(
            "Calibrate used cpu for host: "
                + host.getId()
                + " old usedCpu:"
                + cpuCap.getUsedCapacity()
                + " new usedCpu:"
                + usedCpu);
        cpuCap.setUsedCapacity(usedCpu);
      }

      if (memCap.getUsedCapacity() == usedMemory
          && memCap.getReservedCapacity() == reservedMemory) {
        s_logger.debug(
            "No need to calibrate memory capacity, host:"
                + host.getId()
                + " usedMem: "
                + memCap.getUsedCapacity()
                + " reservedMem: "
                + memCap.getReservedCapacity());
      } else if (memCap.getReservedCapacity() != reservedMemory) {
        s_logger.debug(
            "Calibrate reserved memory for host: "
                + host.getId()
                + " old reservedMem:"
                + memCap.getReservedCapacity()
                + " new reservedMem:"
                + reservedMemory);
        memCap.setReservedCapacity(reservedMemory);
      } else if (memCap.getUsedCapacity() != usedMemory) {
        /*
         * Didn't calibrate for used memory, because VMs can be in state(starting/migrating) that I don't know on which host they are
         * allocated
         */
        s_logger.debug(
            "Calibrate used memory for host: "
                + host.getId()
                + " old usedMem: "
                + memCap.getUsedCapacity()
                + " new usedMem: "
                + usedMemory);
        memCap.setUsedCapacity(usedMemory);
      }

      try {
        _capacityDao.update(cpuCap.getId(), cpuCap);
        _capacityDao.update(memCap.getId(), memCap);
      } catch (Exception e) {
        s_logger.error(
            "Caught exception while updating cpu/memory capacity for the host " + host.getId(), e);
      }
    } else {
      Transaction txn = Transaction.currentTxn();
      CapacityState capacityState =
          _configMgr.findClusterAllocationState(ApiDBUtils.findClusterById(host.getClusterId()))
                  == AllocationState.Disabled
              ? CapacityState.Disabled
              : CapacityState.Enabled;
      txn.start();
      CapacityVO capacity =
          new CapacityVO(
              host.getId(),
              host.getDataCenterId(),
              host.getPodId(),
              host.getClusterId(),
              usedMemory,
              host.getTotalMemory(),
              CapacityVO.CAPACITY_TYPE_MEMORY);
      capacity.setReservedCapacity(reservedMemory);
      capacity.setCapacityState(capacityState);
      _capacityDao.persist(capacity);

      capacity =
          new CapacityVO(
              host.getId(),
              host.getDataCenterId(),
              host.getPodId(),
              host.getClusterId(),
              usedCpu,
              (long) (host.getCpus().longValue() * host.getSpeed().longValue()),
              CapacityVO.CAPACITY_TYPE_CPU);
      capacity.setReservedCapacity(reservedCpu);
      capacity.setCapacityState(capacityState);
      _capacityDao.persist(capacity);
      txn.commit();
    }
  }
    @Override
    public void run() {
      try {
        SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.Storage.toString());

        ConcurrentHashMap<Long, StorageStats> storageStats =
            new ConcurrentHashMap<Long, StorageStats>();
        List<HostVO> hosts = _hostDao.search(sc, null);
        for (HostVO host : hosts) {
          GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
          Answer answer = _agentMgr.easySend(host.getId(), command);
          if (answer != null && answer.getResult()) {
            storageStats.put(host.getId(), (StorageStats) answer);
          }
        }

        sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.SecondaryStorage.toString());

        hosts = _hostDao.search(sc, null);
        for (HostVO host : hosts) {
          GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
          Answer answer = _agentMgr.easySend(host.getId(), command);
          if (answer != null && answer.getResult()) {
            storageStats.put(host.getId(), (StorageStats) answer);
          }
        }
        _storageStats = storageStats;

        ConcurrentHashMap<Long, StorageStats> storagePoolStats =
            new ConcurrentHashMap<Long, StorageStats>();

        List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
        for (StoragePoolVO pool : storagePools) {
          GetStorageStatsCommand command =
              new GetStorageStatsCommand(pool.getUuid(), pool.getPoolType(), pool.getPath());
          Answer answer = _storageManager.sendToPool(pool, command);
          if (answer != null && answer.getResult()) {
            storagePoolStats.put(pool.getId(), (StorageStats) answer);
          }
        }
        _storagePoolStats = storagePoolStats;

        // a list to store the new capacity entries that will be committed once everything is
        // calculated
        List<CapacityVO> newCapacities = new ArrayList<CapacityVO>();

        // Updating the storage entries and creating new ones if they dont exist.
        Transaction txn = Transaction.open(Transaction.CLOUD_DB);
        try {
          if (s_logger.isTraceEnabled()) {
            s_logger.trace("recalculating system storage capacity");
          }
          txn.start();
          for (Long hostId : storageStats.keySet()) {
            StorageStats stats = storageStats.get(hostId);
            short capacityType = -1;
            HostVO host = _hostDao.findById(hostId);
            host.setTotalSize(stats.getCapacityBytes());
            _hostDao.update(host.getId(), host);

            SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
            capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
            capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, host.getDataCenterId());

            if (Host.Type.SecondaryStorage.equals(host.getType())) {
              capacityType = CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE;
            } else if (Host.Type.Storage.equals(host.getType())) {
              capacityType = CapacityVO.CAPACITY_TYPE_STORAGE;
            }
            if (-1 != capacityType) {
              capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
              List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
              if (capacities.size() == 0) { // Create a new one
                CapacityVO capacity =
                    new CapacityVO(
                        host.getId(),
                        host.getDataCenterId(),
                        host.getPodId(),
                        stats.getByteUsed(),
                        stats.getCapacityBytes(),
                        capacityType);
                _capacityDao.persist(capacity);
              } else { // Update if it already exists.
                CapacityVO capacity = capacities.get(0);
                capacity.setUsedCapacity(stats.getByteUsed());
                capacity.setTotalCapacity(stats.getCapacityBytes());
                _capacityDao.update(capacity.getId(), capacity);
              }
            }
          } // End of for
          txn.commit();
        } catch (Exception ex) {
          txn.rollback();
          s_logger.error("Unable to start transaction for storage capacity update");
        } finally {
          txn.close();
        }

        for (Long poolId : storagePoolStats.keySet()) {
          StorageStats stats = storagePoolStats.get(poolId);
          StoragePoolVO pool = _storagePoolDao.findById(poolId);

          if (pool == null) {
            continue;
          }

          pool.setCapacityBytes(stats.getCapacityBytes());
          long available = stats.getCapacityBytes() - stats.getByteUsed();
          if (available < 0) {
            available = 0;
          }
          pool.setAvailableBytes(available);
          _storagePoolDao.update(pool.getId(), pool);

          _storageManager.createCapacityEntry(pool, 0L);
        }
      } catch (Throwable t) {
        s_logger.error("Error trying to retrieve storage stats", t);
      }
    }
  @Override
  public Boolean isVmAlive(VirtualMachine vm, Host host) {
    if (!vm.getType().isUsedBySystem()) {
      s_logger.debug("Not a System Vm, unable to determine state of " + vm + " returning null");
    }

    if (s_logger.isDebugEnabled()) {
      s_logger.debug("Testing if " + vm + " is alive");
    }

    if (vm.getHostId() == null) {
      s_logger.debug("There's no host id for " + vm);
      return null;
    }

    HostVO vmHost = _hostDao.findById(vm.getHostId());
    if (vmHost == null) {
      s_logger.debug("Unable to retrieve the host by using id " + vm.getHostId());
      return null;
    }

    List<? extends Nic> nics = _networkMgr.getNicsForTraffic(vm.getId(), TrafficType.Management);
    if (nics.size() == 0) {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug(
            "Unable to find a management nic, cannot ping this system VM, unable to determine state of "
                + vm
                + " returning null");
      }
      return null;
    }

    for (Nic nic : nics) {
      if (nic.getIp4Address() == null) {
        continue;
      }
      // get the data center IP address, find a host on the pod, use that host to ping the data
      // center IP address
      List<Long> otherHosts = findHostByPod(vmHost.getPodId(), vm.getHostId());
      for (Long otherHost : otherHosts) {
        Status vmState = testIpAddress(otherHost, nic.getIp4Address());
        if (vmState == null) {
          // can't get information from that host, try the next one
          continue;
        }
        if (vmState == Status.Up) {
          if (s_logger.isDebugEnabled()) {
            s_logger.debug(
                "successfully pinged vm's private IP ("
                    + vm.getPrivateIpAddress()
                    + "), returning that the VM is up");
          }
          return Boolean.TRUE;
        } else if (vmState == Status.Down) {
          // We can't ping the VM directly...if we can ping the host, then report the VM down.
          // If we can't ping the host, then we don't have enough information.
          Status vmHostState = testIpAddress(otherHost, vmHost.getPrivateIpAddress());
          if ((vmHostState != null) && (vmHostState == Status.Up)) {
            if (s_logger.isDebugEnabled()) {
              s_logger.debug(
                  "successfully pinged vm's host IP ("
                      + vmHost.getPrivateIpAddress()
                      + "), but could not ping VM, returning that the VM is down");
            }
            return Boolean.FALSE;
          }
        }
      }
    }

    if (s_logger.isDebugEnabled()) {
      s_logger.debug("unable to determine state of " + vm + " returning null");
    }
    return null;
  }
  protected Long restart(HaWorkVO work) {
    List<HaWorkVO> items = _haDao.listFutureHaWorkForVm(work.getInstanceId(), work.getId());
    if (items.size() > 0) {
      StringBuilder str =
          new StringBuilder(
              "Cancelling this work item because newer ones have been scheduled.  Work Ids = [");
      for (HaWorkVO item : items) {
        str.append(item.getId()).append(", ");
      }
      str.delete(str.length() - 2, str.length()).append("]");
      s_logger.info(str.toString());
      return null;
    }

    items = _haDao.listRunningHaWorkForVm(work.getInstanceId());
    if (items.size() > 0) {
      StringBuilder str =
          new StringBuilder(
              "Waiting because there's HA work being executed on an item currently.  Work Ids =[");
      for (HaWorkVO item : items) {
        str.append(item.getId()).append(", ");
      }
      str.delete(str.length() - 2, str.length()).append("]");
      s_logger.info(str.toString());
      return (System.currentTimeMillis() >> 10) + _investigateRetryInterval;
    }

    long vmId = work.getInstanceId();

    VMInstanceVO vm = _itMgr.findByIdAndType(work.getType(), work.getInstanceId());
    if (vm == null) {
      s_logger.info("Unable to find vm: " + vmId);
      return null;
    }

    s_logger.info("HA on " + vm);
    if (vm.getState() != work.getPreviousState() || vm.getUpdated() != work.getUpdateTime()) {
      s_logger.info(
          "VM "
              + vm
              + " has been changed.  Current State = "
              + vm.getState()
              + " Previous State = "
              + work.getPreviousState()
              + " last updated = "
              + vm.getUpdated()
              + " previous updated = "
              + work.getUpdateTime());
      return null;
    }

    short alertType = AlertManager.ALERT_TYPE_USERVM;
    if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) {
      alertType = AlertManager.ALERT_TYPE_DOMAIN_ROUTER;
    } else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) {
      alertType = AlertManager.ALERT_TYPE_CONSOLE_PROXY;
    } else if (VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType())) {
      alertType = AlertManager.ALERT_TYPE_SSVM;
    }

    HostVO host = _hostDao.findById(work.getHostId());
    boolean isHostRemoved = false;
    if (host == null) {
      host = _hostDao.findByIdIncludingRemoved(work.getHostId());
      if (host != null) {
        s_logger.debug(
            "VM "
                + vm.toString()
                + " is now no longer on host "
                + work.getHostId()
                + " as the host is removed");
        isHostRemoved = true;
      }
    }

    DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
    HostPodVO podVO = _podDao.findById(host.getPodId());
    String hostDesc =
        "name: "
            + host.getName()
            + "(id:"
            + host.getId()
            + "), availability zone: "
            + dcVO.getName()
            + ", pod: "
            + podVO.getName();

    Boolean alive = null;
    if (work.getStep() == Step.Investigating) {
      if (!isHostRemoved) {
        if (vm.getHostId() == null || vm.getHostId() != work.getHostId()) {
          s_logger.info("VM " + vm.toString() + " is now no longer on host " + work.getHostId());
          return null;
        }

        Enumeration<Investigator> en = _investigators.enumeration();
        Investigator investigator = null;
        while (en.hasMoreElements()) {
          investigator = en.nextElement();
          alive = investigator.isVmAlive(vm, host);
          s_logger.info(investigator.getName() + " found " + vm + "to be alive? " + alive);
          if (alive != null) {
            break;
          }
        }
        boolean fenced = false;
        if (alive == null) {
          s_logger.debug("Fencing off VM that we don't know the state of");
          Enumeration<FenceBuilder> enfb = _fenceBuilders.enumeration();
          while (enfb.hasMoreElements()) {
            FenceBuilder fb = enfb.nextElement();
            Boolean result = fb.fenceOff(vm, host);
            s_logger.info("Fencer " + fb.getName() + " returned " + result);
            if (result != null && result) {
              fenced = true;
              break;
            }
          }
        } else if (!alive) {
          fenced = true;
        } else {
          s_logger.debug(
              "VM " + vm.getHostName() + " is found to be alive by " + investigator.getName());
          if (host.getStatus() == Status.Up) {
            s_logger.info(vm + " is alive and host is up. No need to restart it.");
            return null;
          } else {
            s_logger.debug("Rescheduling because the host is not up but the vm is alive");
            return (System.currentTimeMillis() >> 10) + _investigateRetryInterval;
          }
        }

        if (!fenced) {
          s_logger.debug("We were unable to fence off the VM " + vm);
          _alertMgr.sendAlert(
              alertType,
              vm.getDataCenterIdToDeployIn(),
              vm.getPodIdToDeployIn(),
              "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc,
              "Insufficient capacity to restart VM, name: "
                  + vm.getHostName()
                  + ", id: "
                  + vmId
                  + " which was running on host "
                  + hostDesc);
          return (System.currentTimeMillis() >> 10) + _restartRetryInterval;
        }

        try {
          _itMgr.advanceStop(vm, true, _accountMgr.getSystemUser(), _accountMgr.getSystemAccount());
        } catch (ResourceUnavailableException e) {
          assert false : "How do we hit this when force is true?";
          throw new CloudRuntimeException("Caught exception even though it should be handled.", e);
        } catch (OperationTimedoutException e) {
          assert false : "How do we hit this when force is true?";
          throw new CloudRuntimeException("Caught exception even though it should be handled.", e);
        } catch (ConcurrentOperationException e) {
          assert false : "How do we hit this when force is true?";
          throw new CloudRuntimeException("Caught exception even though it should be handled.", e);
        }

        work.setStep(Step.Scheduled);
        _haDao.update(work.getId(), work);
      } else {
        s_logger.debug(
            "How come that HA step is Investigating and the host is removed? Calling forced Stop on Vm anyways");
        try {
          _itMgr.advanceStop(vm, true, _accountMgr.getSystemUser(), _accountMgr.getSystemAccount());
        } catch (ResourceUnavailableException e) {
          assert false : "How do we hit this when force is true?";
          throw new CloudRuntimeException("Caught exception even though it should be handled.", e);
        } catch (OperationTimedoutException e) {
          assert false : "How do we hit this when force is true?";
          throw new CloudRuntimeException("Caught exception even though it should be handled.", e);
        } catch (ConcurrentOperationException e) {
          assert false : "How do we hit this when force is true?";
          throw new CloudRuntimeException("Caught exception even though it should be handled.", e);
        }
      }
    }

    vm = _itMgr.findByIdAndType(vm.getType(), vm.getId());

    if (!_forceHA && !vm.isHaEnabled()) {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug("VM is not HA enabled so we're done.");
      }
      return null; // VM doesn't require HA
    }

    if (!_storageMgr.canVmRestartOnAnotherServer(vm.getId())) {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug("VM can not restart on another server.");
      }
      return null;
    }

    if (work.getTimesTried() > _maxRetries) {
      s_logger.warn("Retried to max times so deleting: " + vmId);
      return null;
    }

    try {
      VMInstanceVO started =
          _itMgr.advanceStart(
              vm,
              new HashMap<VirtualMachineProfile.Param, Object>(),
              _accountMgr.getSystemUser(),
              _accountMgr.getSystemAccount());
      if (started != null) {
        s_logger.info("VM is now restarted: " + vmId + " on " + started.getHostId());
        return null;
      }

      if (s_logger.isDebugEnabled()) {
        s_logger.debug(
            "Rescheduling VM " + vm.toString() + " to try again in " + _restartRetryInterval);
      }
    } catch (final InsufficientCapacityException e) {
      s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage());
      _alertMgr.sendAlert(
          alertType,
          vm.getDataCenterIdToDeployIn(),
          vm.getPodIdToDeployIn(),
          "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc,
          "Insufficient capacity to restart VM, name: "
              + vm.getHostName()
              + ", id: "
              + vmId
              + " which was running on host "
              + hostDesc);
    } catch (final ResourceUnavailableException e) {
      s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage());
      _alertMgr.sendAlert(
          alertType,
          vm.getDataCenterIdToDeployIn(),
          vm.getPodIdToDeployIn(),
          "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc,
          "The Storage is unavailable for trying to restart VM, name: "
              + vm.getHostName()
              + ", id: "
              + vmId
              + " which was running on host "
              + hostDesc);
    } catch (ConcurrentOperationException e) {
      s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage());
      _alertMgr.sendAlert(
          alertType,
          vm.getDataCenterIdToDeployIn(),
          vm.getPodIdToDeployIn(),
          "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc,
          "The Storage is unavailable for trying to restart VM, name: "
              + vm.getHostName()
              + ", id: "
              + vmId
              + " which was running on host "
              + hostDesc);
    } catch (OperationTimedoutException e) {
      s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage());
      _alertMgr.sendAlert(
          alertType,
          vm.getDataCenterIdToDeployIn(),
          vm.getPodIdToDeployIn(),
          "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc,
          "The Storage is unavailable for trying to restart VM, name: "
              + vm.getHostName()
              + ", id: "
              + vmId
              + " which was running on host "
              + hostDesc);
    }
    vm = _itMgr.findByIdAndType(vm.getType(), vm.getId());
    work.setUpdateTime(vm.getUpdated());
    work.setPreviousState(vm.getState());
    return (System.currentTimeMillis() >> 10) + _restartRetryInterval;
  }
  public Long migrate(final HaWorkVO work) {
    final long vmId = work.getInstanceId();

    final VirtualMachineGuru<VMInstanceVO> mgr = findManager(work.getType());

    VMInstanceVO vm = mgr.get(vmId);
    if (vm == null || vm.getRemoved() != null) {
      s_logger.debug("Unable to find the vm " + vmId);
      return null;
    }

    s_logger.info("Migrating vm: " + vm.toString());
    if (vm.getHostId() == null || vm.getHostId() != work.getHostId()) {
      s_logger.info("VM is not longer running on the current hostId");
      return null;
    }

    short alertType = AlertManager.ALERT_TYPE_USERVM_MIGRATE;
    if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) {
      alertType = AlertManager.ALERT_TYPE_DOMAIN_ROUTER_MIGRATE;
    } else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) {
      alertType = AlertManager.ALERT_TYPE_CONSOLE_PROXY_MIGRATE;
    }

    HostVO fromHost = _hostDao.findById(vm.getHostId());
    String fromHostName = ((fromHost == null) ? "unknown" : fromHost.getName());
    HostVO toHost = null;
    if (work.getStep() == Step.Scheduled) {
      if (vm.getState() != State.Running) {
        s_logger.info(
            "VM's state is not ready for migration. "
                + vm.toString()
                + " State is "
                + vm.getState().toString());
        return (System.currentTimeMillis() >> 10) + _migrateRetryInterval;
      }

      DataCenterVO dcVO = _dcDao.findById(fromHost.getDataCenterId());
      HostPodVO podVO = _podDao.findById(fromHost.getPodId());

      try {
        toHost = mgr.prepareForMigration(vm);
        if (toHost == null) {
          if (s_logger.isDebugEnabled()) {
            s_logger.debug("Unable to find a host for migrating vm " + vmId);
          }
          _alertMgr.sendAlert(
              alertType,
              vm.getDataCenterId(),
              vm.getPodId(),
              "Unable to migrate vm "
                  + vm.getName()
                  + " from host "
                  + fromHostName
                  + " in zone "
                  + dcVO.getName()
                  + " and pod "
                  + podVO.getName(),
              "Unable to find a suitable host");
        }
      } catch (final InsufficientCapacityException e) {
        s_logger.warn("Unable to mgirate due to insufficient capacity " + vm.toString());
        _alertMgr.sendAlert(
            alertType,
            vm.getDataCenterId(),
            vm.getPodId(),
            "Unable to migrate vm "
                + vm.getName()
                + " from host "
                + fromHostName
                + " in zone "
                + dcVO.getName()
                + " and pod "
                + podVO.getName(),
            "Insufficient capacity");
      } catch (final StorageUnavailableException e) {
        s_logger.warn("Storage is unavailable: " + vm.toString());
        _alertMgr.sendAlert(
            alertType,
            vm.getDataCenterId(),
            vm.getPodId(),
            "Unable to migrate vm "
                + vm.getName()
                + " from host "
                + fromHostName
                + " in zone "
                + dcVO.getName()
                + " and pod "
                + podVO.getName(),
            "Storage is gone.");
      }

      if (toHost == null) {
        _agentMgr.maintenanceFailed(vm.getHostId());
        return null;
      }

      if (s_logger.isDebugEnabled()) {
        s_logger.debug("Migrating from " + work.getHostId() + " to " + toHost.getId());
      }
      work.setStep(Step.Migrating);
      work.setHostId(toHost.getId());
      _haDao.update(work.getId(), work);
    }

    if (work.getStep() == Step.Migrating) {
      vm = mgr.get(vmId); // let's see if anything has changed.
      boolean migrated = false;
      if (vm == null
          || vm.getRemoved() != null
          || vm.getHostId() == null
          || !_itMgr.stateTransitTo(vm, Event.MigrationRequested, vm.getHostId())) {
        s_logger.info("Migration cancelled because state has changed: " + vm.toString());
      } else {
        try {
          boolean isWindows =
              _guestOSCategoryDao
                  .findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId())
                  .getName()
                  .equalsIgnoreCase("Windows");
          MigrateCommand cmd =
              new MigrateCommand(vm.getInstanceName(), toHost.getPrivateIpAddress(), isWindows);
          Answer answer = _agentMgr.send(fromHost.getId(), cmd);
          if (answer != null && answer.getResult()) {
            migrated = true;
            _storageMgr.unshare(vm, fromHost);
            work.setStep(Step.Investigating);
            _haDao.update(work.getId(), work);
          }
        } catch (final AgentUnavailableException e) {
          s_logger.debug("host became unavailable");
        } catch (final OperationTimedoutException e) {
          s_logger.debug("operation timed out");
          if (e.isActive()) {
            scheduleRestart(vm, true);
          }
        }
      }

      if (!migrated) {
        s_logger.info("Migration was unsuccessful.  Cleaning up: " + vm.toString());

        DataCenterVO dcVO = _dcDao.findById(vm.getDataCenterId());
        HostPodVO podVO = _podDao.findById(vm.getPodId());
        _alertMgr.sendAlert(
            alertType,
            fromHost.getDataCenterId(),
            fromHost.getPodId(),
            "Unable to migrate vm "
                + vm.getName()
                + " from host "
                + fromHost.getName()
                + " in zone "
                + dcVO.getName()
                + " and pod "
                + podVO.getName(),
            "Migrate Command failed.  Please check logs.");

        _itMgr.stateTransitTo(vm, Event.MigrationFailedOnSource, toHost.getId());
        _agentMgr.maintenanceFailed(vm.getHostId());

        Command cleanup = mgr.cleanup(vm, null);
        _agentMgr.easySend(toHost.getId(), cleanup);
        _storageMgr.unshare(vm, toHost);

        return null;
      }
    }

    if (toHost == null) {
      toHost = _hostDao.findById(work.getHostId());
    }
    DataCenterVO dcVO = _dcDao.findById(toHost.getDataCenterId());
    HostPodVO podVO = _podDao.findById(toHost.getPodId());

    try {
      if (!mgr.completeMigration(vm, toHost)) {
        _alertMgr.sendAlert(
            alertType,
            toHost.getDataCenterId(),
            toHost.getPodId(),
            "Unable to migrate "
                + vmId
                + " to host "
                + toHost.getName()
                + " in zone "
                + dcVO.getName()
                + " and pod "
                + podVO.getName(),
            "Migration not completed");
        s_logger.warn("Unable to complete migration: " + vm.toString());
      } else {
        s_logger.info("Migration is complete: " + vm.toString());
      }
      return null;
    } catch (final AgentUnavailableException e) {
      s_logger.warn("Agent is unavailable for " + vm.toString());
    } catch (final OperationTimedoutException e) {
      s_logger.warn("Operation timed outfor " + vm.toString());
    }
    _itMgr.stateTransitTo(vm, Event.MigrationFailedOnDest, toHost.getId());
    return (System.currentTimeMillis() >> 10) + _migrateRetryInterval;
  }
  protected Long restart(final HaWorkVO work) {
    final long vmId = work.getInstanceId();

    final VirtualMachineGuru<VMInstanceVO> mgr = findManager(work.getType());
    if (mgr == null) {
      s_logger.warn(
          "Unable to find a handler for " + work.getType().toString() + ", throwing out " + vmId);
      return null;
    }

    VMInstanceVO vm = mgr.get(vmId);
    if (vm == null) {
      s_logger.info("Unable to find vm: " + vmId);
      return null;
    }

    s_logger.info("HA on " + vm.toString());
    if (vm.getState() != work.getPreviousState() || vm.getUpdated() != work.getUpdateTime()) {
      s_logger.info(
          "VM "
              + vm.toString()
              + " has been changed.  Current State = "
              + vm.getState()
              + " Previous State = "
              + work.getPreviousState()
              + " last updated = "
              + vm.getUpdated()
              + " previous updated = "
              + work.getUpdateTime());
      return null;
    }

    final HostVO host = _hostDao.findById(work.getHostId());

    DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());
    HostPodVO podVO = _podDao.findById(host.getPodId());
    String hostDesc =
        "name: "
            + host.getName()
            + "(id:"
            + host.getId()
            + "), availability zone: "
            + dcVO.getName()
            + ", pod: "
            + podVO.getName();

    short alertType = AlertManager.ALERT_TYPE_USERVM;
    if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) {
      alertType = AlertManager.ALERT_TYPE_DOMAIN_ROUTER;
    } else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) {
      alertType = AlertManager.ALERT_TYPE_CONSOLE_PROXY;
    }

    Boolean alive = null;
    if (work.getStep() == Step.Investigating) {
      if (vm.getHostId() == null || vm.getHostId() != work.getHostId()) {
        s_logger.info("VM " + vm.toString() + " is now no longer on host " + work.getHostId());
        if (vm.getState() == State.Starting && vm.getUpdated() == work.getUpdateTime()) {
          _itMgr.stateTransitTo(vm, Event.AgentReportStopped, null);
        }
        return null;
      }

      Enumeration<Investigator> en = _investigators.enumeration();
      Investigator investigator = null;
      while (en.hasMoreElements()) {
        investigator = en.nextElement();
        alive = investigator.isVmAlive(vm, host);
        if (alive != null) {
          s_logger.debug(
              investigator.getName() + " found VM " + vm.getName() + "to be alive? " + alive);
          break;
        }
      }
      if (alive != null && alive) {
        s_logger.debug("VM " + vm.getName() + " is found to be alive by " + investigator.getName());
        if (host.getStatus() == Status.Up) {
          compareState(vm, new AgentVmInfo(vm.getInstanceName(), mgr, State.Running), false);
          return null;
        } else {
          s_logger.debug("Rescheduling because the host is not up but the vm is alive");
          return (System.currentTimeMillis() >> 10) + _investigateRetryInterval;
        }
      }

      boolean fenced = false;
      if (alive == null || !alive) {
        fenced = true;
        s_logger.debug("Fencing off VM that we don't know the state of");
        Enumeration<FenceBuilder> enfb = _fenceBuilders.enumeration();
        while (enfb.hasMoreElements()) {
          final FenceBuilder fb = enfb.nextElement();
          Boolean result = fb.fenceOff(vm, host);
          if (result != null && !result) {
            fenced = false;
          }
        }
      }

      if (alive == null && !fenced) {
        s_logger.debug("We were unable to fence off the VM " + vm.toString());
        _alertMgr.sendAlert(
            alertType,
            vm.getDataCenterId(),
            vm.getPodId(),
            "Unable to restart " + vm.getName() + " which was running on host " + hostDesc,
            "Insufficient capacity to restart VM, name: "
                + vm.getName()
                + ", id: "
                + vmId
                + " which was running on host "
                + hostDesc);
        return (System.currentTimeMillis() >> 10) + _restartRetryInterval;
      }

      mgr.completeStopCommand(vm);

      work.setStep(Step.Scheduled);
      _haDao.update(work.getId(), work);
    }

    // send an alert for VMs that stop unexpectedly
    _alertMgr.sendAlert(
        alertType,
        vm.getDataCenterId(),
        vm.getPodId(),
        "VM (name: "
            + vm.getName()
            + ", id: "
            + vmId
            + ") stopped unexpectedly on host "
            + hostDesc,
        "Virtual Machine "
            + vm.getName()
            + " (id: "
            + vm.getId()
            + ") running on host ["
            + hostDesc
            + "] stopped unexpectedly.");

    vm = mgr.get(vm.getId());

    if (!_forceHA && !vm.isHaEnabled()) {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug("VM is not HA enabled so we're done.");
      }
      return null; // VM doesn't require HA
    }

    if (!_storageMgr.canVmRestartOnAnotherServer(vm.getId())) {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug("VM can not restart on another server.");
      }
      return null;
    }

    if (work.getTimesTried() > _maxRetries) {
      s_logger.warn("Retried to max times so deleting: " + vmId);
      return null;
    }

    try {
      VMInstanceVO started = mgr.start(vm.getId(), 0);
      if (started != null) {
        s_logger.info("VM is now restarted: " + vmId + " on " + started.getHostId());
        return null;
      }

      if (s_logger.isDebugEnabled()) {
        s_logger.debug(
            "Rescheduling VM " + vm.toString() + " to try again in " + _restartRetryInterval);
      }
      vm = mgr.get(vm.getId());
      work.setUpdateTime(vm.getUpdated());
      work.setPreviousState(vm.getState());
      return (System.currentTimeMillis() >> 10) + _restartRetryInterval;
    } catch (final InsufficientCapacityException e) {
      s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage());
      _alertMgr.sendAlert(
          alertType,
          vm.getDataCenterId(),
          vm.getPodId(),
          "Unable to restart " + vm.getName() + " which was running on host " + hostDesc,
          "Insufficient capacity to restart VM, name: "
              + vm.getName()
              + ", id: "
              + vmId
              + " which was running on host "
              + hostDesc);
      return null;
    } catch (final StorageUnavailableException e) {
      s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage());
      _alertMgr.sendAlert(
          alertType,
          vm.getDataCenterId(),
          vm.getPodId(),
          "Unable to restart " + vm.getName() + " which was running on host " + hostDesc,
          "The Storage is unavailable for trying to restart VM, name: "
              + vm.getName()
              + ", id: "
              + vmId
              + " which was running on host "
              + hostDesc);
      return null;
    } catch (ConcurrentOperationException e) {
      s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage());
      _alertMgr.sendAlert(
          alertType,
          vm.getDataCenterId(),
          vm.getPodId(),
          "Unable to restart " + vm.getName() + " which was running on host " + hostDesc,
          "The Storage is unavailable for trying to restart VM, name: "
              + vm.getName()
              + ", id: "
              + vmId
              + " which was running on host "
              + hostDesc);
      return null;
    } catch (ExecutionException e) {
      s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage());
      _alertMgr.sendAlert(
          alertType,
          vm.getDataCenterId(),
          vm.getPodId(),
          "Unable to restart " + vm.getName() + " which was running on host " + hostDesc,
          "The Storage is unavailable for trying to restart VM, name: "
              + vm.getName()
              + ", id: "
              + vmId
              + " which was running on host "
              + hostDesc);
      return null;
    }
  }
  @Test(priority = -1)
  public void setUp() {
    ComponentContext.initComponentsLifeCycle();

    host = hostDao.findByGuid(this.getHostGuid());
    if (host != null) {
      dcId = host.getDataCenterId();
      clusterId = host.getClusterId();
      podId = host.getPodId();
      return;
    }
    // create data center
    DataCenterVO dc =
        new DataCenterVO(
            UUID.randomUUID().toString(),
            "test",
            "8.8.8.8",
            null,
            "10.0.0.1",
            null,
            "10.0.0.1/24",
            null,
            null,
            NetworkType.Basic,
            null,
            null,
            true,
            true,
            null,
            null);
    dc = dcDao.persist(dc);
    dcId = dc.getId();
    // create pod

    HostPodVO pod =
        new HostPodVO(
            UUID.randomUUID().toString(),
            dc.getId(),
            this.getHostGateway(),
            this.getHostCidr(),
            8,
            "test");
    pod = podDao.persist(pod);
    podId = pod.getId();
    // create xen cluster
    ClusterVO cluster = new ClusterVO(dc.getId(), pod.getId(), "devcloud cluster");
    cluster.setHypervisorType(HypervisorType.XenServer.toString());
    cluster.setClusterType(ClusterType.CloudManaged);
    cluster.setManagedState(ManagedState.Managed);
    cluster = clusterDao.persist(cluster);
    clusterId = cluster.getId();
    // create xen host

    host = new HostVO(this.getHostGuid());
    host.setName("devcloud xen host");
    host.setType(Host.Type.Routing);
    host.setPrivateIpAddress(this.getHostIp());
    host.setDataCenterId(dc.getId());
    host.setVersion("6.0.1");
    host.setAvailable(true);
    host.setSetup(true);
    host.setPodId(podId);
    host.setLastPinged(0);
    host.setResourceState(ResourceState.Enabled);
    host.setHypervisorType(HypervisorType.XenServer);
    host.setClusterId(cluster.getId());

    host = hostDao.persist(host);

    imageStore = new ImageStoreVO();
    imageStore.setName("test");
    imageStore.setDataCenterId(dcId);
    imageStore.setProviderName("CloudStack ImageStore Provider");
    imageStore.setRole(DataStoreRole.Image);
    imageStore.setUrl(this.getSecondaryStorage());
    imageStore.setUuid(UUID.randomUUID().toString());
    imageStore = imageStoreDao.persist(imageStore);
  }
  @Test(priority = -1)
  public void setUp() {
    ComponentContext.initComponentsLifeCycle();

    host = hostDao.findByGuid(this.getHostGuid());
    if (host != null) {
      dcId = host.getDataCenterId();
      clusterId = host.getClusterId();
      podId = host.getPodId();
      imageStore = this.imageStoreDao.findByName(imageStoreName);
    } else {
      // create data center
      DataCenterVO dc =
          new DataCenterVO(
              UUID.randomUUID().toString(),
              "test",
              "8.8.8.8",
              null,
              "10.0.0.1",
              null,
              "10.0.0.1/24",
              null,
              null,
              NetworkType.Basic,
              null,
              null,
              true,
              true,
              null,
              null);
      dc = dcDao.persist(dc);
      dcId = dc.getId();
      // create pod

      HostPodVO pod =
          new HostPodVO(
              UUID.randomUUID().toString(),
              dc.getId(),
              this.getHostGateway(),
              this.getHostCidr(),
              8,
              "test");
      pod = podDao.persist(pod);
      podId = pod.getId();
      // create xen cluster
      ClusterVO cluster = new ClusterVO(dc.getId(), pod.getId(), "devcloud cluster");
      cluster.setHypervisorType(HypervisorType.VMware.toString());
      cluster.setClusterType(ClusterType.ExternalManaged);
      cluster.setManagedState(ManagedState.Managed);
      cluster = clusterDao.persist(cluster);
      clusterId = cluster.getId();

      // setup vcenter
      ClusterDetailsVO clusterDetailVO = new ClusterDetailsVO(cluster.getId(), "url", null);
      this.clusterDetailsDao.persist(clusterDetailVO);
      clusterDetailVO = new ClusterDetailsVO(cluster.getId(), "username", null);
      this.clusterDetailsDao.persist(clusterDetailVO);
      clusterDetailVO = new ClusterDetailsVO(cluster.getId(), "password", null);
      this.clusterDetailsDao.persist(clusterDetailVO);
      // create xen host

      host = new HostVO(this.getHostGuid());
      host.setName("devcloud vmware host");
      host.setType(Host.Type.Routing);
      host.setPrivateIpAddress(this.getHostIp());
      host.setDataCenterId(dc.getId());
      host.setVersion("6.0.1");
      host.setAvailable(true);
      host.setSetup(true);
      host.setPodId(podId);
      host.setLastPinged(0);
      host.setResourceState(ResourceState.Enabled);
      host.setHypervisorType(HypervisorType.VMware);
      host.setClusterId(cluster.getId());

      host = hostDao.persist(host);

      imageStore = new ImageStoreVO();
      imageStore.setName(imageStoreName);
      imageStore.setDataCenterId(dcId);
      imageStore.setProviderName("CloudStack ImageStore Provider");
      imageStore.setRole(DataStoreRole.Image);
      imageStore.setUrl(this.getSecondaryStorage());
      imageStore.setUuid(UUID.randomUUID().toString());
      imageStore.setProtocol("nfs");
      imageStore = imageStoreDao.persist(imageStore);
    }

    image = new VMTemplateVO();
    image.setTemplateType(TemplateType.USER);
    image.setUrl(this.getTemplateUrl());
    image.setUniqueName(UUID.randomUUID().toString());
    image.setName(UUID.randomUUID().toString());
    image.setPublicTemplate(true);
    image.setFeatured(true);
    image.setRequiresHvm(true);
    image.setBits(64);
    image.setFormat(Storage.ImageFormat.VHD);
    image.setEnablePassword(true);
    image.setEnableSshKey(true);
    image.setGuestOSId(1);
    image.setBootable(true);
    image.setPrepopulate(true);
    image.setCrossZones(true);
    image.setExtractable(true);

    image = imageDataDao.persist(image);

    /*
     * TemplateDataStoreVO templateStore = new TemplateDataStoreVO();
     *
     * templateStore.setDataStoreId(imageStore.getId());
     * templateStore.setDownloadPercent(100);
     * templateStore.setDownloadState(Status.DOWNLOADED);
     * templateStore.setDownloadUrl(imageStore.getUrl());
     * templateStore.setInstallPath(this.getImageInstallPath());
     * templateStore.setTemplateId(image.getId());
     * templateStoreDao.persist(templateStore);
     */

    DataStore store = this.dataStoreMgr.getDataStore(imageStore.getId(), DataStoreRole.Image);
    TemplateInfo template = templateFactory.getTemplate(image.getId(), DataStoreRole.Image);
    DataObject templateOnStore = store.create(template);
    TemplateObjectTO to = new TemplateObjectTO();
    to.setPath(this.getImageInstallPath());
    CopyCmdAnswer answer = new CopyCmdAnswer(to);
    templateOnStore.processEvent(Event.CreateOnlyRequested);
    templateOnStore.processEvent(Event.OperationSuccessed, answer);
  }
  protected void loadResource(Long hostId) {
    HostVO host = hostDao.findById(hostId);
    Map<String, Object> params = new HashMap<String, Object>();
    params.put("guid", host.getGuid());
    params.put("ipaddress", host.getPrivateIpAddress());
    params.put("username", "root");
    params.put("password", "password");
    params.put("zone", String.valueOf(host.getDataCenterId()));
    params.put("pod", String.valueOf(host.getPodId()));

    ServerResource resource = null;
    if (host.getHypervisorType() == HypervisorType.XenServer) {
      resource = new XcpOssResource();
      try {
        resource.configure(host.getName(), params);

      } catch (ConfigurationException e) {
        logger.debug("Failed to load resource:" + e.toString());
      }
    } else if (host.getHypervisorType() == HypervisorType.KVM) {
      resource = new LibvirtComputingResource();
      try {
        params.put("public.network.device", "cloudbr0");
        params.put("private.network.device", "cloudbr0");
        resource.configure(host.getName(), params);
      } catch (ConfigurationException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    } else if (host.getHypervisorType() == HypervisorType.VMware) {
      ClusterVO cluster = clusterDao.findById(host.getClusterId());
      String url = clusterDetailsDao.findDetail(cluster.getId(), "url").getValue();
      URI uri;
      try {
        uri = new URI(url);
        String userName = clusterDetailsDao.findDetail(cluster.getId(), "username").getValue();
        String password = clusterDetailsDao.findDetail(cluster.getId(), "password").getValue();
        VmwareServerDiscoverer discover = new VmwareServerDiscoverer();

        Map<? extends ServerResource, Map<String, String>> resources =
            discover.find(
                host.getDataCenterId(),
                host.getPodId(),
                host.getClusterId(),
                uri,
                userName,
                password,
                null);
        for (Map.Entry<? extends ServerResource, Map<String, String>> entry :
            resources.entrySet()) {
          resource = entry.getKey();
        }
        if (resource == null) {
          throw new CloudRuntimeException("can't find resource");
        }
      } catch (DiscoveryException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      } catch (URISyntaxException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }

    hostResourcesMap.put(hostId, resource);
    HostEnvironment env = new HostEnvironment();
    SetupCommand cmd = new SetupCommand(env);
    cmd.setNeedSetup(true);

    resource.executeRequest(cmd);
  }