@Override
    public void run() {
      try {
        s_logger.debug("HostStatsCollector is running...");

        SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.Storage.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.ConsoleProxy.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.SecondaryStorage.toString());
        ConcurrentHashMap<Long, HostStats> hostStats = new ConcurrentHashMap<Long, HostStats>();
        List<HostVO> hosts = _hostDao.search(sc, null);
        for (HostVO host : hosts) {
          if (host.getId() != null) {
            HostStatsEntry stats = (HostStatsEntry) _agentMgr.getHostStatistics(host.getId());
            if (stats != null) {
              hostStats.put(host.getId(), stats);
            } else {
              s_logger.warn("Received invalid host stats for host: " + host.getId());
            }
          } else {
            s_logger.warn("Host: " + host.getId() + " does not exist, skipping host statistics");
          }
        }
        _hostStats = hostStats;
      } catch (Throwable t) {
        s_logger.error("Error trying to retrieve host stats", t);
      }
    }
  @Override
  public boolean attachCluster(DataStore store, ClusterScope scope) {
    PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
    // Check if there is host up in this cluster
    List<HostVO> allHosts =
        _resourceMgr.listAllUpAndEnabledHosts(
            Host.Type.Routing,
            primarystore.getClusterId(),
            primarystore.getPodId(),
            primarystore.getDataCenterId());
    if (allHosts.isEmpty()) {
      primaryDataStoreDao.expunge(primarystore.getId());
      throw new CloudRuntimeException(
          "No host up to associate a storage pool with in cluster " + primarystore.getClusterId());
    }

    if (primarystore.getPoolType() == StoragePoolType.OCFS2
        && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) {
      s_logger.warn(
          "Can not create storage pool "
              + primarystore
              + " on cluster "
              + primarystore.getClusterId());
      primaryDataStoreDao.expunge(primarystore.getId());
      return false;
    }

    boolean success = false;
    for (HostVO h : allHosts) {
      success = createStoragePool(h.getId(), primarystore);
      if (success) {
        break;
      }
    }

    s_logger.debug("In createPool Adding the pool to each of the hosts");
    List<HostVO> poolHosts = new ArrayList<HostVO>();
    for (HostVO h : allHosts) {
      try {
        storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId());
        poolHosts.add(h);
      } catch (Exception e) {
        s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e);
      }
    }

    if (poolHosts.isEmpty()) {
      s_logger.warn(
          "No host can access storage pool "
              + primarystore
              + " on cluster "
              + primarystore.getClusterId());
      primaryDataStoreDao.expunge(primarystore.getId());
      throw new CloudRuntimeException("Failed to access storage pool");
    }

    dataStoreHelper.attachCluster(store);
    return true;
  }
예제 #3
0
  @Override
  public Boolean fenceOff(VirtualMachine vm, Host host) {
    if (host.getHypervisorType() != HypervisorType.KVM
        && host.getHypervisorType() != HypervisorType.LXC) {
      s_logger.warn("Don't know how to fence non kvm hosts " + host.getHypervisorType());
      return null;
    }

    List<HostVO> hosts = _resourceMgr.listAllHostsInCluster(host.getClusterId());
    FenceCommand fence = new FenceCommand(vm, host);

    int i = 0;
    for (HostVO h : hosts) {
      if (h.getHypervisorType() == HypervisorType.KVM
          || h.getHypervisorType() == HypervisorType.LXC) {
        if (h.getStatus() != Status.Up) {
          continue;
        }

        i++;

        if (h.getId() == host.getId()) {
          continue;
        }
        FenceAnswer answer;
        try {
          answer = (FenceAnswer) _agentMgr.send(h.getId(), fence);
        } catch (AgentUnavailableException e) {
          s_logger.info("Moving on to the next host because " + h.toString() + " is unavailable");
          continue;
        } catch (OperationTimedoutException e) {
          s_logger.info("Moving on to the next host because " + h.toString() + " is unavailable");
          continue;
        }
        if (answer != null && answer.getResult()) {
          return true;
        }
      }
    }

    _alertMgr.sendAlert(
        AlertManager.AlertType.ALERT_TYPE_HOST,
        host.getDataCenterId(),
        host.getPodId(),
        "Unable to fence off host: " + host.getId(),
        "Fencing off host "
            + host.getId()
            + " did not succeed after asking "
            + i
            + " hosts. "
            + "Check Agent logs for more information.");

    s_logger.error("Unable to fence off " + vm.toString() + " on " + host.toString());

    return false;
  }
  @Override
  public void scheduleRestartForVmsOnHost(final HostVO host) {

    if (host.getType() != Host.Type.Routing) {
      return;
    }
    s_logger.warn("Scheduling restart for VMs on host " + host.getId());

    final List<VMInstanceVO> vms = _instanceDao.listByHostId(host.getId());
    final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId());

    // send an email alert that the host is down
    StringBuilder sb = null;
    if ((vms != null) && !vms.isEmpty()) {
      sb = new StringBuilder();
      sb.append("  Starting HA on the following VMs: ");
      // collect list of vm names for the alert email
      VMInstanceVO vm = vms.get(0);
      if (vm.isHaEnabled()) {
        sb.append(" " + vm.getName());
      }
      for (int i = 1; i < vms.size(); i++) {
        vm = vms.get(i);
        if (vm.isHaEnabled()) {
          sb.append(" " + vm.getName());
        }
      }
    }

    // send an email alert that the host is down, include VMs
    HostPodVO podVO = _podDao.findById(host.getPodId());
    String hostDesc =
        "name: "
            + host.getName()
            + " (id:"
            + host.getId()
            + "), availability zone: "
            + dcVO.getName()
            + ", pod: "
            + podVO.getName();

    _alertMgr.sendAlert(
        AlertManager.ALERT_TYPE_HOST,
        host.getDataCenterId(),
        host.getPodId(),
        "Host is down, " + hostDesc,
        "Host [" + hostDesc + "] is down." + ((sb != null) ? sb.toString() : ""));

    for (final VMInstanceVO vm : vms) {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug("Notifying HA Mgr of to investigate vm " + vm.getId() + "-" + vm.getName());
      }
      scheduleRestart(vm, true);
    }
  }
  @Override
  public Status isAgentAlive(HostVO agent) {
    if (s_logger.isDebugEnabled()) {
      s_logger.debug("checking if agent (" + agent.getId() + ") is alive");
    }

    if (agent.getPodId() == null) {
      return null;
    }

    List<Long> otherHosts = findHostByPod(agent.getPodId(), agent.getId());

    for (Long hostId : otherHosts) {

      if (s_logger.isDebugEnabled()) {
        s_logger.debug(
            "sending ping from ("
                + hostId
                + ") to agent's host ip address ("
                + agent.getPrivateIpAddress()
                + ")");
      }
      Status hostState = testIpAddress(hostId, agent.getPrivateIpAddress());
      if (hostState == null) {
        continue;
      }
      if (hostState == Status.Up) {
        if (s_logger.isDebugEnabled()) {
          s_logger.debug(
              "ping from ("
                  + hostId
                  + ") to agent's host ip address ("
                  + agent.getPrivateIpAddress()
                  + ") successful, returning that agent is disconnected");
        }
        return Status
            .Disconnected; // the computing host ip is ping-able, but the computing agent is down,
                           // report that the agent is disconnected
      } else if (hostState == Status.Down) {
        if (s_logger.isDebugEnabled()) {
          s_logger.debug("returning host state: " + hostState);
        }
        return hostState;
      }
    }

    // could not reach agent, could not reach agent's host, unclear what the problem is but it'll
    // require more investigation...
    if (s_logger.isDebugEnabled()) {
      s_logger.debug(
          "could not reach agent, could not reach agent's host, returning that we don't have enough information");
    }
    return null;
  }
예제 #6
0
  @DB
  public void handleDownloadEvent(HostVO host, VolumeVO volume, Status dnldStatus) {
    if ((dnldStatus == VMTemplateStorageResourceAssoc.Status.DOWNLOADED)
        || (dnldStatus == Status.ABANDONED)) {
      VolumeHostVO volumeHost = new VolumeHostVO(host.getId(), volume.getId());
      synchronized (_listenerVolumeMap) {
        _listenerVolumeMap.remove(volumeHost);
      }
    }

    VolumeHostVO volumeHost = _volumeHostDao.findByHostVolume(host.getId(), volume.getId());

    Transaction txn = Transaction.currentTxn();
    txn.start();

    if (dnldStatus == Status.DOWNLOADED) {

      // Create usage event
      long size = -1;
      if (volumeHost != null) {
        size = volumeHost.getSize();
        volume.setSize(size);
        this._volumeDao.update(volume.getId(), volume);
      } else {
        s_logger.warn("Failed to get size for volume" + volume.getName());
      }
      String eventType = EventTypes.EVENT_VOLUME_UPLOAD;
      if (volume.getAccountId() != Account.ACCOUNT_ID_SYSTEM) {
        UsageEventUtils.publishUsageEvent(
            eventType,
            volume.getAccountId(),
            host.getDataCenterId(),
            volume.getId(),
            volume.getName(),
            null,
            0l,
            size,
            volume.getClass().getName(),
            volume.getUuid());
      }
    } else if (dnldStatus == Status.DOWNLOAD_ERROR
        || dnldStatus == Status.ABANDONED
        || dnldStatus == Status.UNKNOWN) {
      // Decrement the volume and secondary storage space count
      _resourceLimitMgr.decrementResourceCount(
          volume.getAccountId(), com.cloud.configuration.Resource.ResourceType.volume);
      _resourceLimitMgr.recalculateResourceCount(
          volume.getAccountId(),
          volume.getDomainId(),
          com.cloud.configuration.Resource.ResourceType.secondary_storage.getOrdinal());
    }
    txn.commit();
  }
예제 #7
0
  @Override
  public Boolean fenceOff(VMInstanceVO vm, HostVO host) {
    if (host.getHypervisorType() != HypervisorType.Ovm) {
      s_logger.debug("Don't know how to fence non Ovm hosts " + host.getHypervisorType());
      return null;
    }

    List<HostVO> hosts = _resourceMgr.listAllHostsInCluster(host.getClusterId());
    FenceCommand fence = new FenceCommand(vm, host);

    for (HostVO h : hosts) {
      if (h.getHypervisorType() != HypervisorType.Ovm) {
        continue;
      }

      if (h.getStatus() != Status.Up) {
        continue;
      }

      if (h.getId() == host.getId()) {
        continue;
      }

      FenceAnswer answer;
      try {
        answer = (FenceAnswer) _agentMgr.send(h.getId(), fence);
      } catch (AgentUnavailableException e) {
        if (s_logger.isDebugEnabled()) {
          s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable");
        }
        continue;
      } catch (OperationTimedoutException e) {
        if (s_logger.isDebugEnabled()) {
          s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable");
        }
        continue;
      }

      if (answer != null && answer.getResult()) {
        return true;
      }
    }

    if (s_logger.isDebugEnabled()) {
      s_logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString());
    }

    return false;
  }
  @Override
  public void shutdown(final NetworkProfile profile, final NetworkOffering offering) {
    final NetworkVO networkObject = networkDao.findById(profile.getId());
    if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Lswitch
        || networkObject.getBroadcastUri() == null) {
      s_logger.warn(
          "BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText());
      return;
    }

    final List<NiciraNvpDeviceVO> devices =
        niciraNvpDao.listByPhysicalNetwork(networkObject.getPhysicalNetworkId());
    if (devices.isEmpty()) {
      s_logger.error(
          "No NiciraNvp Controller on physical network " + networkObject.getPhysicalNetworkId());
      return;
    }
    final NiciraNvpDeviceVO niciraNvpDevice = devices.get(0);
    final HostVO niciraNvpHost = hostDao.findById(niciraNvpDevice.getHostId());

    final DeleteLogicalSwitchCommand cmd =
        new DeleteLogicalSwitchCommand(
            BroadcastDomainType.getValue(networkObject.getBroadcastUri()));
    final DeleteLogicalSwitchAnswer answer =
        (DeleteLogicalSwitchAnswer) agentMgr.easySend(niciraNvpHost.getId(), cmd);

    if (answer == null || !answer.getResult()) {
      s_logger.error("DeleteLogicalSwitchCommand failed");
    }

    super.shutdown(profile, offering);
  }
예제 #9
0
 @Override
 public void addSystemVMTemplatesToHost(HostVO host, Map<String, TemplateInfo> templateInfos) {
   if (templateInfos == null) {
     return;
   }
   Long hostId = host.getId();
   List<VMTemplateVO> rtngTmplts = _templateDao.listAllSystemVMTemplates();
   for (VMTemplateVO tmplt : rtngTmplts) {
     TemplateInfo tmpltInfo = templateInfos.get(tmplt.getUniqueName());
     if (tmpltInfo == null) {
       continue;
     }
     VMTemplateHostVO tmpltHost = _vmTemplateHostDao.findByHostTemplate(hostId, tmplt.getId());
     if (tmpltHost == null) {
       tmpltHost =
           new VMTemplateHostVO(
               hostId,
               tmplt.getId(),
               new Date(),
               100,
               Status.DOWNLOADED,
               null,
               null,
               null,
               tmpltInfo.getInstallPath(),
               tmplt.getUrl());
       tmpltHost.setSize(tmpltInfo.getSize());
       tmpltHost.setPhysicalSize(tmpltInfo.getPhysicalSize());
       _vmTemplateHostDao.persist(tmpltHost);
     }
   }
 }
  @Override
  public boolean deleteCiscoVnmcResource(DeleteCiscoVnmcResourceCmd cmd) {
    Long vnmcResourceId = cmd.getCiscoVnmcResourceId();
    CiscoVnmcControllerVO vnmcResource = _ciscoVnmcDao.findById(vnmcResourceId);
    if (vnmcResource == null) {
      throw new InvalidParameterValueException(
          "Could not find a Cisco VNMC appliance with id " + vnmcResourceId);
    }

    // Check if there any ASA 1000v appliances
    Long physicalNetworkId = vnmcResource.getPhysicalNetworkId();
    PhysicalNetworkVO physicalNetwork = _physicalNetworkDao.findById(physicalNetworkId);
    if (physicalNetwork != null) {
      List<CiscoAsa1000vDeviceVO> responseList =
          _ciscoAsa1000vDao.listByPhysicalNetwork(physicalNetworkId);
      if (responseList.size() > 0) {
        throw new CloudRuntimeException(
            "Cisco VNMC appliance with id "
                + vnmcResourceId
                + " cannot be deleted as there Cisco ASA 1000v appliances using it");
      }
    }

    HostVO vnmcHost = _hostDao.findById(vnmcResource.getHostId());
    Long hostId = vnmcHost.getId();
    vnmcHost.setResourceState(ResourceState.Maintenance);
    _hostDao.update(hostId, vnmcHost);
    _resourceMgr.deleteHost(hostId, false, false);
    _ciscoVnmcDao.remove(vnmcResourceId);

    return true;
  }
예제 #11
0
  @DB
  public void handleDownloadEvent(HostVO host, VMTemplateVO template, Status dnldStatus) {
    if ((dnldStatus == VMTemplateStorageResourceAssoc.Status.DOWNLOADED)
        || (dnldStatus == Status.ABANDONED)) {
      VMTemplateHostVO vmTemplateHost = new VMTemplateHostVO(host.getId(), template.getId());
      synchronized (_listenerMap) {
        _listenerMap.remove(vmTemplateHost);
      }
    }

    VMTemplateHostVO vmTemplateHost =
        _vmTemplateHostDao.findByHostTemplate(host.getId(), template.getId());

    Transaction txn = Transaction.currentTxn();
    txn.start();

    if (dnldStatus == Status.DOWNLOADED) {
      long size = -1;
      if (vmTemplateHost != null) {
        size = vmTemplateHost.getPhysicalSize();
        template.setSize(size);
        this._templateDao.update(template.getId(), template);
      } else {
        s_logger.warn("Failed to get size for template" + template.getName());
      }
      String eventType = EventTypes.EVENT_TEMPLATE_CREATE;
      if ((template.getFormat()).equals(ImageFormat.ISO)) {
        eventType = EventTypes.EVENT_ISO_CREATE;
      }
      if (template.getAccountId() != Account.ACCOUNT_ID_SYSTEM) {
        UsageEventUtils.publishUsageEvent(
            eventType,
            template.getAccountId(),
            host.getDataCenterId(),
            template.getId(),
            template.getName(),
            null,
            template.getSourceTemplateId(),
            size,
            template.getClass().getName(),
            template.getUuid());
      }
    }
    txn.commit();
  }
예제 #12
0
  @Override
  public void handleSysTemplateDownload(HostVO host) {
    List<HypervisorType> hypers =
        _resourceMgr.listAvailHypervisorInZone(host.getId(), host.getDataCenterId());
    HypervisorType hostHyper = host.getHypervisorType();
    if (hypers.contains(hostHyper)) {
      return;
    }

    Set<VMTemplateVO> toBeDownloaded = new HashSet<VMTemplateVO>();
    List<HostVO> ssHosts =
        _resourceMgr.listAllUpAndEnabledHostsInOneZoneByType(
            Host.Type.SecondaryStorage, host.getDataCenterId());
    if (ssHosts == null || ssHosts.isEmpty()) {
      return;
    }
    /*Download all the templates in zone with the same hypervisortype*/
    for (HostVO ssHost : ssHosts) {
      List<VMTemplateVO> rtngTmplts = _templateDao.listAllSystemVMTemplates();
      List<VMTemplateVO> defaultBuiltin = _templateDao.listDefaultBuiltinTemplates();

      for (VMTemplateVO rtngTmplt : rtngTmplts) {
        if (rtngTmplt.getHypervisorType() == hostHyper) {
          toBeDownloaded.add(rtngTmplt);
        }
      }

      for (VMTemplateVO builtinTmplt : defaultBuiltin) {
        if (builtinTmplt.getHypervisorType() == hostHyper) {
          toBeDownloaded.add(builtinTmplt);
        }
      }

      for (VMTemplateVO template : toBeDownloaded) {
        VMTemplateHostVO tmpltHost =
            _vmTemplateHostDao.findByHostTemplate(ssHost.getId(), template.getId());
        if (tmpltHost == null || tmpltHost.getDownloadState() != Status.DOWNLOADED) {
          downloadTemplateToStorage(template, ssHost);
        }
      }
    }
  }
 @Override
 public boolean agentStatusTransitTo(HostVO host, Event e, long msId) {
   try {
     return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao);
   } catch (NoTransitionException e1) {
     e1
         .printStackTrace(); // To change body of catch statement use File | Settings | File
                             // Templates.
   }
   return true;
 }
 @Override
 public void processConnect(HostVO agent, StartupCommand cmd, boolean forRebalance) {
   if ((cmd instanceof StartupStorageCommand)) {
     StartupStorageCommand scmd = (StartupStorageCommand) cmd;
     if (scmd.getResourceType() == Storage.StorageResourceType.SECONDARY_STORAGE) {
       _ssVmMgr.generateSetupCommand(agent.getId());
       return;
     }
   } else if (cmd instanceof StartupSecondaryStorageCommand) {
     if (s_logger.isInfoEnabled()) {
       s_logger.info("Received a host startup notification " + cmd);
     }
     _ssVmMgr.onAgentConnect(agent.getDataCenterId(), cmd);
     _ssVmMgr.generateSetupCommand(agent.getId());
     _ssVmMgr.generateFirewallConfiguration(agent.getId());
     _ssVmMgr.generateVMSetupCommand(agent.getId());
     return;
   }
   return;
 }
예제 #15
0
  private Map<Long, TemplateInfo> listVolume(HostVO ssHost) {
    ListVolumeCommand cmd = new ListVolumeCommand(ssHost.getStorageUrl());
    Answer answer = _agentMgr.sendToSecStorage(ssHost, cmd);
    if (answer != null && answer.getResult()) {
      ListVolumeAnswer tanswer = (ListVolumeAnswer) answer;
      return tanswer.getTemplateInfo();
    } else {
      if (s_logger.isDebugEnabled()) {
        s_logger.debug("Can not list volumes for secondary storage host " + ssHost.getId());
      }
    }

    return null;
  }
예제 #16
0
 @Override
 public boolean checkIfHostReachMaxGuestLimit(HostVO host) {
   Long vmCount = _vmDao.countRunningByHostId(host.getId());
   HypervisorType hypervisorType = host.getHypervisorType();
   String hypervisorVersion = host.getHypervisorVersion();
   Long maxGuestLimit =
       _hypervisorCapabilitiesDao.getMaxGuestsLimit(hypervisorType, hypervisorVersion);
   if (vmCount.longValue() >= maxGuestLimit.longValue()) {
     if (s_logger.isDebugEnabled()) {
       s_logger.debug(
           "Host name: "
               + host.getName()
               + ", hostId: "
               + host.getId()
               + " already reached max Running VMs(count includes system VMs), limit is: "
               + maxGuestLimit
               + ",Running VM counts is: "
               + vmCount.longValue());
     }
     return true;
   }
   return false;
 }
 @Override
 public void processConnect(HostVO agent, StartupCommand cmd, boolean forRebalance) {
   if (cmd instanceof StartupTrafficMonitorCommand) {
     long agentId = agent.getId();
     s_logger.debug("Sending RecurringNetworkUsageCommand to " + agentId);
     RecurringNetworkUsageCommand watch = new RecurringNetworkUsageCommand(_interval);
     try {
       _agentMgr.send(agentId, new Commands(watch), this);
     } catch (AgentUnavailableException e) {
       s_logger.debug("Can not process connect for host " + agentId, e);
     }
   }
   return;
 }
  public boolean isZoneReady(Map<Long, ZoneHostInfo> zoneHostInfoMap, long dataCenterId) {
    ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenterId);
    if (zoneHostInfo != null
        && (zoneHostInfo.getFlags() & RunningHostInfoAgregator.ZoneHostInfo.ROUTING_HOST_MASK)
            != 0) {
      VMTemplateVO template = _templateDao.findSystemVMTemplate(dataCenterId);
      HostVO secHost = _hostDao.findSecondaryStorageHost(dataCenterId);
      if (secHost == null) {
        if (s_logger.isDebugEnabled()) {
          s_logger.debug(
              "No secondary storage available in zone "
                  + dataCenterId
                  + ", wait until it is ready to launch secondary storage vm");
        }
        return false;
      }

      boolean templateReady = false;
      if (template != null) {
        VMTemplateHostVO templateHostRef =
            _vmTemplateHostDao.findByHostTemplate(secHost.getId(), template.getId());
        templateReady =
            (templateHostRef != null) && (templateHostRef.getDownloadState() == Status.DOWNLOADED);
      }

      if (templateReady) {

        List<Pair<Long, Integer>> l =
            _storagePoolHostDao.getDatacenterStoragePoolHostInfo(dataCenterId, !_useLocalStorage);
        if (l != null && l.size() > 0 && l.get(0).second().intValue() > 0) {

          return true;
        } else {
          if (s_logger.isDebugEnabled()) {
            s_logger.debug(
                "Primary storage is not ready, wait until it is ready to launch secondary storage vm");
          }
        }
      } else {
        if (s_logger.isTraceEnabled()) {
          s_logger.trace("Zone host is ready, but secondary storage vm template is not ready");
        }
      }
    }
    return false;
  }
  @Override
  public boolean shutdown(Network network, ReservationContext context, boolean cleanup)
      throws ConcurrentOperationException, ResourceUnavailableException {

    unassignAsa1000vFromNetwork(network);

    String vlan = network.getBroadcastUri().getHost();
    long vlanId = Long.parseLong(vlan);
    List<CiscoVnmcControllerVO> devices =
        _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId());
    if (!devices.isEmpty()) {
      CiscoVnmcControllerVO ciscoVnmcDevice = devices.get(0);
      HostVO ciscoVnmcHost = _hostDao.findById(ciscoVnmcDevice.getHostId());
      cleanupLogicalEdgeFirewall(vlanId, ciscoVnmcHost.getId());
    }

    return true;
  }
예제 #20
0
  @Override
  public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage)
      throws UnableDeleteHostException {
    if (host.getType() != Host.Type.Routing || host.getHypervisorType() != HypervisorType.KVM) {
      return null;
    }

    _resourceMgr.deleteRoutingHost(host, isForced, isForceDeleteStorage);
    try {
      ShutdownCommand cmd = new ShutdownCommand(ShutdownCommand.DeleteHost, null);
      _agentMgr.send(host.getId(), cmd);
    } catch (AgentUnavailableException e) {
      s_logger.warn("Sending ShutdownCommand failed: ", e);
    } catch (OperationTimedoutException e) {
      s_logger.warn("Sending ShutdownCommand failed: ", e);
    }

    return new DeleteHostAnswer(true);
  }
예제 #21
0
 @Override
 protected void injectMockito() {
   if (host == null) {
     return;
   }
   List<HostVO> results = new ArrayList<HostVO>();
   results.add(host);
   Mockito.when(hostDao.listAll()).thenReturn(results);
   Mockito.when(hostDao.findById(Matchers.anyLong())).thenReturn(host);
   Mockito.when(hostDao.findHypervisorHostInCluster(Matchers.anyLong())).thenReturn(results);
   List<EndPoint> eps = new ArrayList<EndPoint>();
   eps.add(
       RemoteHostEndPoint.getHypervisorHostEndPoint(
           host.getId(), host.getPrivateIpAddress(), host.getPublicIpAddress()));
   Mockito.when(selector.selectAll(Matchers.any(DataStore.class))).thenReturn(eps);
   Mockito.when(selector.select(Matchers.any(DataObject.class))).thenReturn(eps.get(0));
   Mockito.when(selector.select(Matchers.any(DataObject.class), Matchers.any(DataObject.class)))
       .thenReturn(eps.get(0));
 }
예제 #22
0
 @Override
 public boolean downloadTemplateToStorage(VMTemplateVO template, Long zoneId) {
   List<DataCenterVO> dcs = new ArrayList<DataCenterVO>();
   if (zoneId == null) {
     dcs.addAll(_dcDao.listAll());
   } else {
     dcs.add(_dcDao.findById(zoneId));
   }
   long templateId = template.getId();
   boolean isPublic = template.isFeatured() || template.isPublicTemplate();
   for (DataCenterVO dc : dcs) {
     List<HostVO> ssHosts = _ssvmMgr.listAllTypesSecondaryStorageHostsInOneZone(dc.getId());
     for (HostVO ssHost : ssHosts) {
       if (isTemplateUpdateable(templateId, ssHost.getId())) {
         initiateTemplateDownload(templateId, ssHost);
         if (!isPublic) {
           break;
         }
       }
     }
   }
   return true;
 }
 @Override
 public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
   List<HostVO> hosts =
       _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(
           hypervisorType, scope.getScopeId());
   s_logger.debug("In createPool. Attaching the pool to each of the hosts.");
   List<HostVO> poolHosts = new ArrayList<HostVO>();
   for (HostVO host : hosts) {
     try {
       this.storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
       poolHosts.add(host);
     } catch (Exception e) {
       s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
     }
   }
   if (poolHosts.isEmpty()) {
     s_logger.warn("No host can access storage pool " + dataStore + " in this zone.");
     primaryDataStoreDao.expunge(dataStore.getId());
     throw new CloudRuntimeException(
         "Failed to create storage pool as it is not accessible to hosts.");
   }
   this.dataStoreHelper.attachZone(dataStore, hypervisorType);
   return true;
 }
예제 #24
0
  // TODO: Get rid of this case once we've determined that the capacity listeners above have all the
  // changes
  // create capacity entries if none exist for this server
  private void createCapacityEntry(StartupCommand startup, HostVO server) {
    SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
    capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId());
    capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId());
    capacitySC.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId());

    if (startup instanceof StartupRoutingCommand) {
      SearchCriteria<CapacityVO> capacityCPU = _capacityDao.createSearchCriteria();
      capacityCPU.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId());
      capacityCPU.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId());
      capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId());
      capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_CPU);
      List<CapacityVO> capacityVOCpus = _capacityDao.search(capacitySC, null);
      Float cpuovercommitratio =
          Float.parseFloat(
              _clusterDetailsDao
                  .findDetail(server.getClusterId(), "cpuOvercommitRatio")
                  .getValue());
      Float memoryOvercommitRatio =
          Float.parseFloat(
              _clusterDetailsDao
                  .findDetail(server.getClusterId(), "memoryOvercommitRatio")
                  .getValue());

      if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) {
        CapacityVO CapacityVOCpu = capacityVOCpus.get(0);
        long newTotalCpu =
            (long)
                (server.getCpus().longValue() * server.getSpeed().longValue() * cpuovercommitratio);
        if ((CapacityVOCpu.getTotalCapacity() <= newTotalCpu)
            || ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity())
                <= newTotalCpu)) {
          CapacityVOCpu.setTotalCapacity(newTotalCpu);
        } else if ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity()
                > newTotalCpu)
            && (CapacityVOCpu.getUsedCapacity() < newTotalCpu)) {
          CapacityVOCpu.setReservedCapacity(0);
          CapacityVOCpu.setTotalCapacity(newTotalCpu);
        } else {
          s_logger.debug(
              "What? new cpu is :"
                  + newTotalCpu
                  + ", old one is "
                  + CapacityVOCpu.getUsedCapacity()
                  + ","
                  + CapacityVOCpu.getReservedCapacity()
                  + ","
                  + CapacityVOCpu.getTotalCapacity());
        }
        _capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu);
      } else {
        CapacityVO capacity =
            new CapacityVO(
                server.getId(),
                server.getDataCenterId(),
                server.getPodId(),
                server.getClusterId(),
                0L,
                (long) (server.getCpus().longValue() * server.getSpeed().longValue()),
                CapacityVO.CAPACITY_TYPE_CPU);
        _capacityDao.persist(capacity);
      }

      SearchCriteria<CapacityVO> capacityMem = _capacityDao.createSearchCriteria();
      capacityMem.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId());
      capacityMem.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId());
      capacityMem.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId());
      capacityMem.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_MEMORY);
      List<CapacityVO> capacityVOMems = _capacityDao.search(capacityMem, null);

      if (capacityVOMems != null && !capacityVOMems.isEmpty()) {
        CapacityVO CapacityVOMem = capacityVOMems.get(0);
        long newTotalMem = (long) ((server.getTotalMemory()) * memoryOvercommitRatio);
        if (CapacityVOMem.getTotalCapacity() <= newTotalMem
            || (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity()
                <= newTotalMem)) {
          CapacityVOMem.setTotalCapacity(newTotalMem);
        } else if (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity()
                > newTotalMem
            && CapacityVOMem.getUsedCapacity() < newTotalMem) {
          CapacityVOMem.setReservedCapacity(0);
          CapacityVOMem.setTotalCapacity(newTotalMem);
        } else {
          s_logger.debug(
              "What? new cpu is :"
                  + newTotalMem
                  + ", old one is "
                  + CapacityVOMem.getUsedCapacity()
                  + ","
                  + CapacityVOMem.getReservedCapacity()
                  + ","
                  + CapacityVOMem.getTotalCapacity());
        }
        _capacityDao.update(CapacityVOMem.getId(), CapacityVOMem);
      } else {
        CapacityVO capacity =
            new CapacityVO(
                server.getId(),
                server.getDataCenterId(),
                server.getPodId(),
                server.getClusterId(),
                0L,
                server.getTotalMemory(),
                CapacityVO.CAPACITY_TYPE_MEMORY);
        _capacityDao.persist(capacity);
      }
    }
  }
예제 #25
0
  @DB
  @Override
  public void updateCapacityForHost(HostVO host) {
    // prepare the service offerings
    List<ServiceOfferingVO> offerings = _offeringsDao.listAllIncludingRemoved();
    Map<Long, ServiceOfferingVO> offeringsMap = new HashMap<Long, ServiceOfferingVO>();
    for (ServiceOfferingVO offering : offerings) {
      offeringsMap.put(offering.getId(), offering);
    }

    long usedCpu = 0;
    long usedMemory = 0;
    long reservedMemory = 0;
    long reservedCpu = 0;

    List<VMInstanceVO> vms = _vmDao.listUpByHostId(host.getId());
    if (s_logger.isDebugEnabled()) {
      s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId());
    }

    for (VMInstanceVO vm : vms) {
      ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
      usedMemory += so.getRamSize() * 1024L * 1024L;
      usedCpu += so.getCpu() * so.getSpeed();
    }

    List<VMInstanceVO> vmsByLastHostId = _vmDao.listByLastHostId(host.getId());
    if (s_logger.isDebugEnabled()) {
      s_logger.debug(
          "Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId());
    }
    for (VMInstanceVO vm : vmsByLastHostId) {
      long secondsSinceLastUpdate =
          (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000;
      if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) {
        ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
        reservedMemory += so.getRamSize() * 1024L * 1024L;
        reservedCpu += so.getCpu() * so.getSpeed();
      }
    }

    CapacityVO cpuCap = _capacityDao.findByHostIdType(host.getId(), CapacityVO.CAPACITY_TYPE_CPU);
    CapacityVO memCap =
        _capacityDao.findByHostIdType(host.getId(), CapacityVO.CAPACITY_TYPE_MEMORY);

    if (cpuCap != null && memCap != null) {
      if (cpuCap.getUsedCapacity() == usedCpu && cpuCap.getReservedCapacity() == reservedCpu) {
        s_logger.debug(
            "No need to calibrate cpu capacity, host:"
                + host.getId()
                + " usedCpu: "
                + cpuCap.getUsedCapacity()
                + " reservedCpu: "
                + cpuCap.getReservedCapacity());
      } else if (cpuCap.getReservedCapacity() != reservedCpu) {
        s_logger.debug(
            "Calibrate reserved cpu for host: "
                + host.getId()
                + " old reservedCpu:"
                + cpuCap.getReservedCapacity()
                + " new reservedCpu:"
                + reservedCpu);
        cpuCap.setReservedCapacity(reservedCpu);
      } else if (cpuCap.getUsedCapacity() != usedCpu) {
        s_logger.debug(
            "Calibrate used cpu for host: "
                + host.getId()
                + " old usedCpu:"
                + cpuCap.getUsedCapacity()
                + " new usedCpu:"
                + usedCpu);
        cpuCap.setUsedCapacity(usedCpu);
      }

      if (memCap.getUsedCapacity() == usedMemory
          && memCap.getReservedCapacity() == reservedMemory) {
        s_logger.debug(
            "No need to calibrate memory capacity, host:"
                + host.getId()
                + " usedMem: "
                + memCap.getUsedCapacity()
                + " reservedMem: "
                + memCap.getReservedCapacity());
      } else if (memCap.getReservedCapacity() != reservedMemory) {
        s_logger.debug(
            "Calibrate reserved memory for host: "
                + host.getId()
                + " old reservedMem:"
                + memCap.getReservedCapacity()
                + " new reservedMem:"
                + reservedMemory);
        memCap.setReservedCapacity(reservedMemory);
      } else if (memCap.getUsedCapacity() != usedMemory) {
        /*
         * Didn't calibrate for used memory, because VMs can be in state(starting/migrating) that I don't know on which host they are
         * allocated
         */
        s_logger.debug(
            "Calibrate used memory for host: "
                + host.getId()
                + " old usedMem: "
                + memCap.getUsedCapacity()
                + " new usedMem: "
                + usedMemory);
        memCap.setUsedCapacity(usedMemory);
      }

      try {
        _capacityDao.update(cpuCap.getId(), cpuCap);
        _capacityDao.update(memCap.getId(), memCap);
      } catch (Exception e) {
        s_logger.error(
            "Caught exception while updating cpu/memory capacity for the host " + host.getId(), e);
      }
    } else {
      Transaction txn = Transaction.currentTxn();
      CapacityState capacityState =
          _configMgr.findClusterAllocationState(ApiDBUtils.findClusterById(host.getClusterId()))
                  == AllocationState.Disabled
              ? CapacityState.Disabled
              : CapacityState.Enabled;
      txn.start();
      CapacityVO capacity =
          new CapacityVO(
              host.getId(),
              host.getDataCenterId(),
              host.getPodId(),
              host.getClusterId(),
              usedMemory,
              host.getTotalMemory(),
              CapacityVO.CAPACITY_TYPE_MEMORY);
      capacity.setReservedCapacity(reservedMemory);
      capacity.setCapacityState(capacityState);
      _capacityDao.persist(capacity);

      capacity =
          new CapacityVO(
              host.getId(),
              host.getDataCenterId(),
              host.getPodId(),
              host.getClusterId(),
              usedCpu,
              (long) (host.getCpus().longValue() * host.getSpeed().longValue()),
              CapacityVO.CAPACITY_TYPE_CPU);
      capacity.setReservedCapacity(reservedCpu);
      capacity.setCapacityState(capacityState);
      _capacityDao.persist(capacity);
      txn.commit();
    }
  }
  @Override
  @DB
  public boolean delete(TemplateProfile profile) {
    boolean success = true;

    VMTemplateVO template = profile.getTemplate();
    Long zoneId = profile.getZoneId();
    Long templateId = template.getId();

    String zoneName;
    List<HostVO> secondaryStorageHosts;
    if (!template.isCrossZones() && zoneId != null) {
      DataCenterVO zone = _dcDao.findById(zoneId);
      zoneName = zone.getName();
      secondaryStorageHosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId);
    } else {
      zoneName = "(all zones)";
      secondaryStorageHosts = _ssvmMgr.listSecondaryStorageHostsInAllZones();
    }

    s_logger.debug(
        "Attempting to mark template host refs for template: "
            + template.getName()
            + " as destroyed in zone: "
            + zoneName);

    // Make sure the template is downloaded to all the necessary secondary storage hosts
    for (HostVO secondaryStorageHost : secondaryStorageHosts) {
      long hostId = secondaryStorageHost.getId();
      List<VMTemplateHostVO> templateHostVOs = _tmpltHostDao.listByHostTemplate(hostId, templateId);
      for (VMTemplateHostVO templateHostVO : templateHostVOs) {
        if (templateHostVO.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) {
          String errorMsg = "Please specify a template that is not currently being downloaded.";
          s_logger.debug(
              "Template: "
                  + template.getName()
                  + " is currently being downloaded to secondary storage host: "
                  + secondaryStorageHost.getName()
                  + "; cant' delete it.");
          throw new CloudRuntimeException(errorMsg);
        }
      }
    }

    Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId());
    String eventType = "";

    if (template.getFormat().equals(ImageFormat.ISO)) {
      eventType = EventTypes.EVENT_ISO_DELETE;
    } else {
      eventType = EventTypes.EVENT_TEMPLATE_DELETE;
    }

    // Iterate through all necessary secondary storage hosts and mark the template on each host as
    // destroyed
    for (HostVO secondaryStorageHost : secondaryStorageHosts) {
      long hostId = secondaryStorageHost.getId();
      long sZoneId = secondaryStorageHost.getDataCenterId();
      List<VMTemplateHostVO> templateHostVOs = _tmpltHostDao.listByHostTemplate(hostId, templateId);
      for (VMTemplateHostVO templateHostVO : templateHostVOs) {
        VMTemplateHostVO lock = _tmpltHostDao.acquireInLockTable(templateHostVO.getId());
        try {
          if (lock == null) {
            s_logger.debug(
                "Failed to acquire lock when deleting templateHostVO with ID: "
                    + templateHostVO.getId());
            success = false;
            break;
          }
          UsageEventVO usageEvent =
              new UsageEventVO(eventType, account.getId(), sZoneId, templateId, null);
          _usageEventDao.persist(usageEvent);
          templateHostVO.setDestroyed(true);
          _tmpltHostDao.update(templateHostVO.getId(), templateHostVO);
          String installPath = templateHostVO.getInstallPath();
          if (installPath != null) {
            Answer answer =
                _agentMgr.sendToSecStorage(
                    secondaryStorageHost,
                    new DeleteTemplateCommand(secondaryStorageHost.getStorageUrl(), installPath));

            if (answer == null || !answer.getResult()) {
              s_logger.debug(
                  "Failed to delete "
                      + templateHostVO
                      + " due to "
                      + ((answer == null) ? "answer is null" : answer.getDetails()));
            } else {
              _tmpltHostDao.remove(templateHostVO.getId());
              s_logger.debug("Deleted template at: " + installPath);
            }
          } else {
            _tmpltHostDao.remove(templateHostVO.getId());
          }
          VMTemplateZoneVO templateZone = _tmpltZoneDao.findByZoneTemplate(sZoneId, templateId);

          if (templateZone != null) {
            _tmpltZoneDao.remove(templateZone.getId());
          }
        } finally {
          if (lock != null) {
            _tmpltHostDao.releaseFromLockTable(lock.getId());
          }
        }
      }

      if (!success) {
        break;
      }
    }

    s_logger.debug(
        "Successfully marked template host refs for template: "
            + template.getName()
            + " as destroyed in zone: "
            + zoneName);

    // If there are no more non-destroyed template host entries for this template, delete it
    if (success && (_tmpltHostDao.listByTemplateId(templateId).size() == 0)) {
      long accountId = template.getAccountId();

      VMTemplateVO lock = _tmpltDao.acquireInLockTable(templateId);

      try {
        if (lock == null) {
          s_logger.debug("Failed to acquire lock when deleting template with ID: " + templateId);
          success = false;
        } else if (_tmpltDao.remove(templateId)) {
          // Decrement the number of templates
          _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.template);
        }

      } finally {
        if (lock != null) {
          _tmpltDao.releaseFromLockTable(lock.getId());
        }
      }

      s_logger.debug(
          "Removed template: "
              + template.getName()
              + " because all of its template host refs were marked as destroyed.");
    }

    return success;
  }
    @Override
    public void run() {
      try {
        SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.Storage.toString());

        ConcurrentHashMap<Long, StorageStats> storageStats =
            new ConcurrentHashMap<Long, StorageStats>();
        List<HostVO> hosts = _hostDao.search(sc, null);
        for (HostVO host : hosts) {
          GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
          Answer answer = _agentMgr.easySend(host.getId(), command);
          if (answer != null && answer.getResult()) {
            storageStats.put(host.getId(), (StorageStats) answer);
          }
        }

        sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.SecondaryStorage.toString());

        hosts = _hostDao.search(sc, null);
        for (HostVO host : hosts) {
          GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
          Answer answer = _agentMgr.easySend(host.getId(), command);
          if (answer != null && answer.getResult()) {
            storageStats.put(host.getId(), (StorageStats) answer);
          }
        }
        _storageStats = storageStats;

        ConcurrentHashMap<Long, StorageStats> storagePoolStats =
            new ConcurrentHashMap<Long, StorageStats>();

        List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
        for (StoragePoolVO pool : storagePools) {
          GetStorageStatsCommand command =
              new GetStorageStatsCommand(pool.getUuid(), pool.getPoolType(), pool.getPath());
          Answer answer = _storageManager.sendToPool(pool, command);
          if (answer != null && answer.getResult()) {
            storagePoolStats.put(pool.getId(), (StorageStats) answer);
          }
        }
        _storagePoolStats = storagePoolStats;

        // a list to store the new capacity entries that will be committed once everything is
        // calculated
        List<CapacityVO> newCapacities = new ArrayList<CapacityVO>();

        // Updating the storage entries and creating new ones if they dont exist.
        Transaction txn = Transaction.open(Transaction.CLOUD_DB);
        try {
          if (s_logger.isTraceEnabled()) {
            s_logger.trace("recalculating system storage capacity");
          }
          txn.start();
          for (Long hostId : storageStats.keySet()) {
            StorageStats stats = storageStats.get(hostId);
            short capacityType = -1;
            HostVO host = _hostDao.findById(hostId);
            host.setTotalSize(stats.getCapacityBytes());
            _hostDao.update(host.getId(), host);

            SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
            capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
            capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, host.getDataCenterId());

            if (Host.Type.SecondaryStorage.equals(host.getType())) {
              capacityType = CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE;
            } else if (Host.Type.Storage.equals(host.getType())) {
              capacityType = CapacityVO.CAPACITY_TYPE_STORAGE;
            }
            if (-1 != capacityType) {
              capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
              List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
              if (capacities.size() == 0) { // Create a new one
                CapacityVO capacity =
                    new CapacityVO(
                        host.getId(),
                        host.getDataCenterId(),
                        host.getPodId(),
                        stats.getByteUsed(),
                        stats.getCapacityBytes(),
                        capacityType);
                _capacityDao.persist(capacity);
              } else { // Update if it already exists.
                CapacityVO capacity = capacities.get(0);
                capacity.setUsedCapacity(stats.getByteUsed());
                capacity.setTotalCapacity(stats.getCapacityBytes());
                _capacityDao.update(capacity.getId(), capacity);
              }
            }
          } // End of for
          txn.commit();
        } catch (Exception ex) {
          txn.rollback();
          s_logger.error("Unable to start transaction for storage capacity update");
        } finally {
          txn.close();
        }

        for (Long poolId : storagePoolStats.keySet()) {
          StorageStats stats = storagePoolStats.get(poolId);
          StoragePoolVO pool = _storagePoolDao.findById(poolId);

          if (pool == null) {
            continue;
          }

          pool.setCapacityBytes(stats.getCapacityBytes());
          long available = stats.getCapacityBytes() - stats.getByteUsed();
          if (available < 0) {
            available = 0;
          }
          pool.setAvailableBytes(available);
          _storagePoolDao.update(pool.getId(), pool);

          _storageManager.createCapacityEntry(pool, 0L);
        }
      } catch (Throwable t) {
        s_logger.error("Error trying to retrieve storage stats", t);
      }
    }
    @Override
    public void run() {
      try {
        s_logger.debug("VmStatsCollector is running...");

        SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.Storage.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.ConsoleProxy.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.SecondaryStorage.toString());
        List<HostVO> hosts = _hostDao.search(sc, null);

        for (HostVO host : hosts) {
          List<UserVmVO> vms = _userVmDao.listRunningByHostId(host.getId());
          List<Long> vmIds = new ArrayList<Long>();

          for (UserVmVO vm : vms) {
            vmIds.add(vm.getId());
          }

          try {
            HashMap<Long, VmStatsEntry> vmStatsById =
                _userVmMgr.getVirtualMachineStatistics(host.getId(), host.getName(), vmIds);

            if (vmStatsById != null) {
              VmStatsEntry statsInMemory = null;

              Set<Long> vmIdSet = vmStatsById.keySet();
              for (Long vmId : vmIdSet) {
                VmStatsEntry statsForCurrentIteration = vmStatsById.get(vmId);
                statsInMemory = (VmStatsEntry) _VmStats.get(vmId);

                if (statsInMemory == null) {
                  // no stats exist for this vm, directly persist
                  _VmStats.put(vmId, statsForCurrentIteration);
                } else {
                  // update each field
                  statsInMemory.setCPUUtilization(statsForCurrentIteration.getCPUUtilization());
                  statsInMemory.setNumCPUs(statsForCurrentIteration.getNumCPUs());
                  statsInMemory.setNetworkReadKBs(
                      statsInMemory.getNetworkReadKBs()
                          + statsForCurrentIteration.getNetworkReadKBs());
                  statsInMemory.setNetworkWriteKBs(
                      statsInMemory.getNetworkWriteKBs()
                          + statsForCurrentIteration.getNetworkWriteKBs());

                  _VmStats.put(vmId, statsInMemory);
                }
              }
            }

          } catch (Exception e) {
            s_logger.debug("Failed to get VM stats for host with ID: " + host.getId());
            continue;
          }
        }

      } catch (Throwable t) {
        s_logger.error("Error trying to retrieve VM stats", t);
      }
    }
  @Override
  public final void processConnect(
      final Host agent, final StartupCommand cmd, final boolean forRebalance)
      throws ConnectionException {
    // Limit the commands we can process
    if (!(cmd instanceof StartupRoutingCommand)) {
      return;
    }

    StartupRoutingCommand startup = (StartupRoutingCommand) cmd;

    // assert
    if (startup.getHypervisorType() != HypervisorType.Hyperv) {
      s_logger.debug("Not Hyper-V hypervisor, so moving on.");
      return;
    }

    long agentId = agent.getId();
    HostVO host = _hostDao.findById(agentId);

    // Our Hyper-V machines are not participating in pools, and the pool id
    // we provide them is not persisted.
    // This means the pool id can vary.
    ClusterVO cluster = _clusterDao.findById(host.getClusterId());
    if (cluster.getGuid() == null) {
      cluster.setGuid(startup.getPool());
      _clusterDao.update(cluster.getId(), cluster);
    }

    if (s_logger.isDebugEnabled()) {
      s_logger.debug("Setting up host " + agentId);
    }

    HostEnvironment env = new HostEnvironment();
    SetupCommand setup = new SetupCommand(env);
    if (!host.isSetup()) {
      setup.setNeedSetup(true);
    }

    try {
      SetupAnswer answer = (SetupAnswer) _agentMgr.send(agentId, setup);
      if (answer != null && answer.getResult()) {
        host.setSetup(true);
        // TODO: clean up magic numbers below
        host.setLastPinged((System.currentTimeMillis() >> 10) - 5 * 60);
        _hostDao.update(host.getId(), host);
        if (answer.needReconnect()) {
          throw new ConnectionException(false, "Reinitialize agent after setup.");
        }
        return;
      } else {
        String reason = answer.getDetails();
        if (reason == null) {
          reason = " details were null";
        }
        s_logger.warn("Unable to setup agent " + agentId + " due to " + reason);
      }
      // Error handling borrowed from XcpServerDiscoverer, may need to be
      // updated.
    } catch (AgentUnavailableException e) {
      s_logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e);
    } catch (OperationTimedoutException e) {
      s_logger.warn("Unable to setup agent " + agentId + " because it timed out", e);
    }
    throw new ConnectionException(true, "Reinitialize agent after setup.");
  }
  @Override
  public void cancelScheduledMigrations(final HostVO host) {
    WorkType type = host.getType() == HostVO.Type.Storage ? WorkType.Stop : WorkType.Migration;

    _haDao.deleteMigrationWorkItems(host.getId(), type, _serverId);
  }