@Override
    public void run() {
      try {
        s_logger.debug("HostStatsCollector is running...");

        SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.Storage.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.ConsoleProxy.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.SecondaryStorage.toString());
        ConcurrentHashMap<Long, HostStats> hostStats = new ConcurrentHashMap<Long, HostStats>();
        List<HostVO> hosts = _hostDao.search(sc, null);
        for (HostVO host : hosts) {
          if (host.getId() != null) {
            HostStatsEntry stats = (HostStatsEntry) _agentMgr.getHostStatistics(host.getId());
            if (stats != null) {
              hostStats.put(host.getId(), stats);
            } else {
              s_logger.warn("Received invalid host stats for host: " + host.getId());
            }
          } else {
            s_logger.warn("Host: " + host.getId() + " does not exist, skipping host statistics");
          }
        }
        _hostStats = hostStats;
      } catch (Throwable t) {
        s_logger.error("Error trying to retrieve host stats", t);
      }
    }
    @Override
    public void run() {
      try {
        SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.Storage.toString());

        ConcurrentHashMap<Long, StorageStats> storageStats =
            new ConcurrentHashMap<Long, StorageStats>();
        List<HostVO> hosts = _hostDao.search(sc, null);
        for (HostVO host : hosts) {
          GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
          Answer answer = _agentMgr.easySend(host.getId(), command);
          if (answer != null && answer.getResult()) {
            storageStats.put(host.getId(), (StorageStats) answer);
          }
        }

        sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.SecondaryStorage.toString());

        hosts = _hostDao.search(sc, null);
        for (HostVO host : hosts) {
          GetStorageStatsCommand command = new GetStorageStatsCommand(host.getGuid());
          Answer answer = _agentMgr.easySend(host.getId(), command);
          if (answer != null && answer.getResult()) {
            storageStats.put(host.getId(), (StorageStats) answer);
          }
        }
        _storageStats = storageStats;

        ConcurrentHashMap<Long, StorageStats> storagePoolStats =
            new ConcurrentHashMap<Long, StorageStats>();

        List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
        for (StoragePoolVO pool : storagePools) {
          GetStorageStatsCommand command =
              new GetStorageStatsCommand(pool.getUuid(), pool.getPoolType(), pool.getPath());
          Answer answer = _storageManager.sendToPool(pool, command);
          if (answer != null && answer.getResult()) {
            storagePoolStats.put(pool.getId(), (StorageStats) answer);
          }
        }
        _storagePoolStats = storagePoolStats;

        // a list to store the new capacity entries that will be committed once everything is
        // calculated
        List<CapacityVO> newCapacities = new ArrayList<CapacityVO>();

        // Updating the storage entries and creating new ones if they dont exist.
        Transaction txn = Transaction.open(Transaction.CLOUD_DB);
        try {
          if (s_logger.isTraceEnabled()) {
            s_logger.trace("recalculating system storage capacity");
          }
          txn.start();
          for (Long hostId : storageStats.keySet()) {
            StorageStats stats = storageStats.get(hostId);
            short capacityType = -1;
            HostVO host = _hostDao.findById(hostId);
            host.setTotalSize(stats.getCapacityBytes());
            _hostDao.update(host.getId(), host);

            SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
            capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
            capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, host.getDataCenterId());

            if (Host.Type.SecondaryStorage.equals(host.getType())) {
              capacityType = CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE;
            } else if (Host.Type.Storage.equals(host.getType())) {
              capacityType = CapacityVO.CAPACITY_TYPE_STORAGE;
            }
            if (-1 != capacityType) {
              capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
              List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
              if (capacities.size() == 0) { // Create a new one
                CapacityVO capacity =
                    new CapacityVO(
                        host.getId(),
                        host.getDataCenterId(),
                        host.getPodId(),
                        stats.getByteUsed(),
                        stats.getCapacityBytes(),
                        capacityType);
                _capacityDao.persist(capacity);
              } else { // Update if it already exists.
                CapacityVO capacity = capacities.get(0);
                capacity.setUsedCapacity(stats.getByteUsed());
                capacity.setTotalCapacity(stats.getCapacityBytes());
                _capacityDao.update(capacity.getId(), capacity);
              }
            }
          } // End of for
          txn.commit();
        } catch (Exception ex) {
          txn.rollback();
          s_logger.error("Unable to start transaction for storage capacity update");
        } finally {
          txn.close();
        }

        for (Long poolId : storagePoolStats.keySet()) {
          StorageStats stats = storagePoolStats.get(poolId);
          StoragePoolVO pool = _storagePoolDao.findById(poolId);

          if (pool == null) {
            continue;
          }

          pool.setCapacityBytes(stats.getCapacityBytes());
          long available = stats.getCapacityBytes() - stats.getByteUsed();
          if (available < 0) {
            available = 0;
          }
          pool.setAvailableBytes(available);
          _storagePoolDao.update(pool.getId(), pool);

          _storageManager.createCapacityEntry(pool, 0L);
        }
      } catch (Throwable t) {
        s_logger.error("Error trying to retrieve storage stats", t);
      }
    }
    @Override
    public void run() {
      try {
        s_logger.debug("VmStatsCollector is running...");

        SearchCriteria<HostVO> sc = _hostDao.createSearchCriteria();
        sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.Storage.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.ConsoleProxy.toString());
        sc.addAnd("type", SearchCriteria.Op.NEQ, Host.Type.SecondaryStorage.toString());
        List<HostVO> hosts = _hostDao.search(sc, null);

        for (HostVO host : hosts) {
          List<UserVmVO> vms = _userVmDao.listRunningByHostId(host.getId());
          List<Long> vmIds = new ArrayList<Long>();

          for (UserVmVO vm : vms) {
            vmIds.add(vm.getId());
          }

          try {
            HashMap<Long, VmStatsEntry> vmStatsById =
                _userVmMgr.getVirtualMachineStatistics(host.getId(), host.getName(), vmIds);

            if (vmStatsById != null) {
              VmStatsEntry statsInMemory = null;

              Set<Long> vmIdSet = vmStatsById.keySet();
              for (Long vmId : vmIdSet) {
                VmStatsEntry statsForCurrentIteration = vmStatsById.get(vmId);
                statsInMemory = (VmStatsEntry) _VmStats.get(vmId);

                if (statsInMemory == null) {
                  // no stats exist for this vm, directly persist
                  _VmStats.put(vmId, statsForCurrentIteration);
                } else {
                  // update each field
                  statsInMemory.setCPUUtilization(statsForCurrentIteration.getCPUUtilization());
                  statsInMemory.setNumCPUs(statsForCurrentIteration.getNumCPUs());
                  statsInMemory.setNetworkReadKBs(
                      statsInMemory.getNetworkReadKBs()
                          + statsForCurrentIteration.getNetworkReadKBs());
                  statsInMemory.setNetworkWriteKBs(
                      statsInMemory.getNetworkWriteKBs()
                          + statsForCurrentIteration.getNetworkWriteKBs());

                  _VmStats.put(vmId, statsInMemory);
                }
              }
            }

          } catch (Exception e) {
            s_logger.debug("Failed to get VM stats for host with ID: " + host.getId());
            continue;
          }
        }

      } catch (Throwable t) {
        s_logger.error("Error trying to retrieve VM stats", t);
      }
    }