@Test
  public void testPoolStateIsNotUp() {
    try {
      createDb();

      StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
      pool.setScope(ScopeType.ZONE);
      pool.setStatus(StoragePoolStatus.Maintenance);
      storagePoolDao.update(pool.getId(), pool);

      DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
      VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
      Mockito.when(
              storageMgr.storagePoolHasEnoughSpace(
                  Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class)))
          .thenReturn(true);
      DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
      int foundAcct = 0;
      for (StoragePoolAllocator allocator : allocators) {
        List<StoragePool> pools =
            allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
        if (!pools.isEmpty()) {
          Assert.assertEquals(pools.get(0).getId(), storage.getId());
          foundAcct++;
        }
      }

      if (foundAcct == 1) {
        Assert.fail();
      }
    } catch (Exception e) {
      cleanDb();
      Assert.fail();
    }
  }
  @Override
  public long getAllocatedPoolCapacity(StoragePoolVO pool, VMTemplateVO templateForVmCreation) {

    // Get size for all the volumes
    Pair<Long, Long> sizes = _volumeDao.getCountAndTotalByPool(pool.getId());
    long totalAllocatedSize = sizes.second() + sizes.first() * _extraBytesPerVolume;

    // Get size for VM Snapshots
    totalAllocatedSize = totalAllocatedSize + getVMSnapshotAllocatedCapacity(pool);

    // Iterate through all templates on this storage pool
    boolean tmpinstalled = false;
    List<VMTemplateStoragePoolVO> templatePoolVOs;
    templatePoolVOs = _templatePoolDao.listByPoolId(pool.getId());

    for (VMTemplateStoragePoolVO templatePoolVO : templatePoolVOs) {
      if ((templateForVmCreation != null)
          && !tmpinstalled
          && (templatePoolVO.getTemplateId() == templateForVmCreation.getId())) {
        tmpinstalled = true;
      }
      long templateSize = templatePoolVO.getTemplateSize();
      totalAllocatedSize += templateSize + _extraBytesPerVolume;
    }

    // Add the size for the templateForVmCreation if its not already present
    /*if ((templateForVmCreation != null) && !tmpinstalled) {

    }*/

    return totalAllocatedSize;
  }
  @Override
  public void createAsync(
      DataStore dataStore,
      DataObject dataObject,
      AsyncCompletionCallback<CreateCmdResult> callback) {
    String iqn = null;
    String errMsg = null;

    if (dataObject.getType() == DataObjectType.VOLUME) {
      VolumeInfo volumeInfo = (VolumeInfo) dataObject;
      AccountVO account = _accountDao.findById(volumeInfo.getAccountId());
      String sfAccountName = getSfAccountName(account.getUuid(), account.getAccountId());

      long storagePoolId = dataStore.getId();
      SolidFireConnection sfConnection = getSolidFireConnection(storagePoolId);

      if (!sfAccountExists(sfAccountName, sfConnection)) {
        SolidFireUtil.SolidFireAccount sfAccount =
            createSolidFireAccount(sfAccountName, sfConnection);

        updateCsDbWithAccountInfo(account.getId(), sfAccount);
      }

      SolidFireUtil.SolidFireVolume sfVolume = createSolidFireVolume(volumeInfo, sfConnection);

      iqn = sfVolume.getIqn();

      VolumeVO volume = this._volumeDao.findById(volumeInfo.getId());

      volume.set_iScsiName(iqn);
      volume.setFolder(String.valueOf(sfVolume.getId()));
      volume.setPoolType(StoragePoolType.IscsiLUN);
      volume.setPoolId(storagePoolId);

      _volumeDao.update(volume.getId(), volume);

      StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());

      long capacityBytes = storagePool.getCapacityBytes();
      long usedBytes = storagePool.getUsedBytes();

      usedBytes += volumeInfo.getSize();

      storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);

      _storagePoolDao.update(storagePoolId, storagePool);
    } else {
      errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
    }

    // path = iqn
    // size is pulled from DataObject instance, if errMsg is null
    CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg));

    result.setResult(errMsg);

    callback.complete(result);
  }
示例#4
0
 // @Test(priority=3)
 public void tearDown() {
   List<StoragePoolVO> ds = primaryStoreDao.findPoolByName(this.primaryName);
   for (int i = 0; i < ds.size(); i++) {
     StoragePoolVO store = ds.get(i);
     store.setUuid(null);
     primaryStoreDao.remove(ds.get(i).getId());
     primaryStoreDao.expunge(ds.get(i).getId());
   }
 }
  @Override
  public void deleteAsync(
      DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CommandResult> callback) {
    String errMsg = null;

    if (dataObject.getType() == DataObjectType.VOLUME) {
      VolumeInfo volumeInfo = (VolumeInfo) dataObject;
      AccountVO account = _accountDao.findById(volumeInfo.getAccountId());
      AccountDetailVO accountDetails =
          _accountDetailsDao.findDetail(account.getAccountId(), SolidFireUtil.ACCOUNT_ID);
      long sfAccountId = Long.parseLong(accountDetails.getValue());

      long storagePoolId = dataStore.getId();
      SolidFireConnection sfConnection = getSolidFireConnection(storagePoolId);

      deleteSolidFireVolume(volumeInfo, sfConnection);

      _volumeDao.deleteVolumesByInstance(volumeInfo.getId());

      //            if (!sfAccountHasVolume(sfAccountId, sfConnection)) {
      //                // delete the account from the SolidFire SAN
      //                deleteSolidFireAccount(sfAccountId, sfConnection);
      //
      //                // delete the info in the account_details table
      //                // that's related to the SolidFire account
      //                _accountDetailsDao.deleteDetails(account.getAccountId());
      //            }

      StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);

      long usedBytes = storagePool.getUsedBytes();

      usedBytes -= volumeInfo.getSize();

      storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);

      _storagePoolDao.update(storagePoolId, storagePool);
    } else {
      errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync";
    }

    CommandResult result = new CommandResult();

    result.setResult(errMsg);

    callback.complete(result);
  }
 private long getVMSnapshotAllocatedCapacity(StoragePoolVO pool) {
   List<VolumeVO> volumes = _volumeDao.findByPoolId(pool.getId());
   long totalSize = 0;
   for (VolumeVO volume : volumes) {
     if (volume.getInstanceId() == null) continue;
     Long vmId = volume.getInstanceId();
     UserVm vm = _userVMDao.findById(vmId);
     if (vm == null) continue;
     ServiceOffering offering = _offeringsDao.findById(vm.getServiceOfferingId());
     List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vmId);
     long pathCount = 0;
     long memorySnapshotSize = 0;
     for (VMSnapshotVO vmSnapshotVO : vmSnapshots) {
       if (_vmSnapshotDao.listByParent(vmSnapshotVO.getId()).size() == 0) pathCount++;
       if (vmSnapshotVO.getType() == VMSnapshot.Type.DiskAndMemory)
         memorySnapshotSize += (offering.getRamSize() * 1024 * 1024);
     }
     if (pathCount <= 1) totalSize = totalSize + memorySnapshotSize;
     else totalSize = totalSize + volume.getSize() * (pathCount - 1) + memorySnapshotSize;
   }
   return totalSize;
 }
  @Test
  public void testClusterAllocatorWithTags() {
    try {
      createDb();
      StoragePoolDetailVO detailVO = new StoragePoolDetailVO(this.storagePoolId, "high", "true");
      poolDetailsDao.persist(detailVO);
      DiskOfferingVO diskOff = this.diskOfferingDao.findById(diskOffering.getId());
      List<String> tags = new ArrayList<String>();
      tags.add("high");
      diskOff.setTagsArray(tags);
      diskOfferingDao.update(diskOff.getId(), diskOff);

      DiskProfile profile = new DiskProfile(volume, diskOff, HypervisorType.XenServer);
      VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
      Mockito.when(
              storageMgr.storagePoolHasEnoughSpace(
                  Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class)))
          .thenReturn(true);
      DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
      int foundAcct = 0;
      for (StoragePoolAllocator allocator : allocators) {
        List<StoragePool> pools =
            allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
        if (!pools.isEmpty()) {
          Assert.assertEquals(pools.get(0).getId(), storage.getId());
          foundAcct++;
        }
      }

      if (foundAcct > 1 || foundAcct == 0) {
        Assert.fail();
      }
    } catch (Exception e) {
      cleanDb();
      Assert.fail();
    }
  }
  @Override
  protected List<StoragePool> select(
      DiskProfile dskCh,
      VirtualMachineProfile vmProfile,
      DeploymentPlan plan,
      ExcludeList avoid,
      int returnUpTo) {

    List<StoragePool> suitablePools = new ArrayList<StoragePool>();

    s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");

    if (!dskCh.useLocalStorage()) {
      return suitablePools;
    }

    // data disk and host identified from deploying vm (attach volume case)
    if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
      List<StoragePoolHostVO> hostPools = _poolHostDao.listByHostId(plan.getHostId());
      for (StoragePoolHostVO hostPool : hostPools) {
        StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId());
        if (pool != null && pool.isLocal()) {
          StoragePool pol = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId());
          if (filter(avoid, pol, dskCh, plan)) {
            s_logger.debug(
                "Found suitable local storage pool " + pool.getId() + ", adding to list");
            suitablePools.add(pol);
          } else {
            avoid.addPool(pool.getId());
          }
        }

        if (suitablePools.size() == returnUpTo) {
          break;
        }
      }
    } else {
      if (plan.getPodId() == null) {
        // zone wide primary storage deployment
        return null;
      }
      List<StoragePoolVO> availablePools =
          _storagePoolDao.findLocalStoragePoolsByTags(
              plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), dskCh.getTags());
      for (StoragePoolVO pool : availablePools) {
        if (suitablePools.size() == returnUpTo) {
          break;
        }
        StoragePool pol = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId());
        if (filter(avoid, pol, dskCh, plan)) {
          suitablePools.add(pol);
        } else {
          avoid.addPool(pool.getId());
        }
      }

      // add remaining pools in cluster, that did not match tags, to avoid
      // set
      List<StoragePoolVO> allPools =
          _storagePoolDao.findLocalStoragePoolsByTags(
              plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), null);
      allPools.removeAll(availablePools);
      for (StoragePoolVO pool : allPools) {
        avoid.addPool(pool.getId());
      }
    }

    if (s_logger.isDebugEnabled()) {
      s_logger.debug(
          "LocalStoragePoolAllocator returning "
              + suitablePools.size()
              + " suitable storage pools");
    }

    return suitablePools;
  }
  protected void createDb() {
    DataCenterVO dc =
        new DataCenterVO(
            UUID.randomUUID().toString(),
            "test",
            "8.8.8.8",
            null,
            "10.0.0.1",
            null,
            "10.0.0.1/24",
            null,
            null,
            NetworkType.Basic,
            null,
            null,
            true,
            true,
            null,
            null);
    dc = dcDao.persist(dc);
    dcId = dc.getId();

    HostPodVO pod =
        new HostPodVO(UUID.randomUUID().toString(), dc.getId(), "255.255.255.255", "", 8, "test");
    pod = podDao.persist(pod);
    podId = pod.getId();

    ClusterVO cluster = new ClusterVO(dc.getId(), pod.getId(), "devcloud cluster");
    cluster.setHypervisorType(HypervisorType.XenServer.toString());
    cluster.setClusterType(ClusterType.CloudManaged);
    cluster.setManagedState(ManagedState.Managed);
    cluster = clusterDao.persist(cluster);
    clusterId = cluster.getId();

    DataStoreProvider provider =
        providerMgr.getDataStoreProvider("ancient primary data store provider");
    storage = new StoragePoolVO();
    storage.setDataCenterId(dcId);
    storage.setPodId(podId);
    storage.setPoolType(StoragePoolType.NetworkFilesystem);
    storage.setClusterId(clusterId);
    storage.setStatus(StoragePoolStatus.Up);
    storage.setScope(ScopeType.CLUSTER);
    storage.setAvailableBytes(1000);
    storage.setCapacityBytes(20000);
    storage.setHostAddress(UUID.randomUUID().toString());
    storage.setPath(UUID.randomUUID().toString());
    storage.setStorageProviderName(provider.getName());
    storage = storagePoolDao.persist(storage);
    storagePoolId = storage.getId();

    storageMgr.createCapacityEntry(storage.getId());

    diskOffering = new DiskOfferingVO();
    diskOffering.setDiskSize(500);
    diskOffering.setName("test-disk");
    diskOffering.setSystemUse(false);
    diskOffering.setUseLocalStorage(false);
    diskOffering.setCustomized(false);
    diskOffering.setRecreatable(false);
    diskOffering = diskOfferingDao.persist(diskOffering);
    diskOfferingId = diskOffering.getId();

    volume =
        new VolumeVO(
            Volume.Type.ROOT,
            "volume",
            dcId,
            1,
            1,
            diskOffering.getId(),
            diskOffering.getDiskSize());
    volume = volumeDao.persist(volume);
    volumeId = volume.getId();
  }
  @Test
  public void testClusterAllocatorMultiplePools() {
    Long newStorageId = null;
    try {
      createDb();

      DataStoreProvider provider =
          providerMgr.getDataStoreProvider("ancient primary data store provider");
      storage = new StoragePoolVO();
      storage.setDataCenterId(dcId);
      storage.setPodId(podId);
      storage.setPoolType(StoragePoolType.NetworkFilesystem);
      storage.setClusterId(clusterId);
      storage.setStatus(StoragePoolStatus.Up);
      storage.setScope(ScopeType.CLUSTER);
      storage.setAvailableBytes(1000);
      storage.setCapacityBytes(20000);
      storage.setHostAddress(UUID.randomUUID().toString());
      storage.setPath(UUID.randomUUID().toString());
      storage.setStorageProviderName(provider.getName());
      StoragePoolVO newStorage = storagePoolDao.persist(storage);
      newStorageId = newStorage.getId();

      DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
      VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
      Mockito.when(
              storageMgr.storagePoolHasEnoughSpace(
                  Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class)))
          .thenReturn(true);
      DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
      int foundAcct = 0;
      for (StoragePoolAllocator allocator : allocators) {
        List<StoragePool> pools =
            allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
        if (!pools.isEmpty()) {
          Assert.assertEquals(pools.size(), 1);
          foundAcct++;
        }
      }

      if (foundAcct > 1 || foundAcct == 0) {
        Assert.fail();
      }
    } catch (Exception e) {
      cleanDb();

      if (newStorageId != null) {
        storagePoolDao.remove(newStorageId);
      }
      Assert.fail();
    }
  }
示例#11
0
  public DataStore createPrimaryDataStore() {
    try {
      String uuid = UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString();
      List<StoragePoolVO> pools = primaryDataStoreDao.findPoolByName(this.primaryName);
      if (pools.size() > 0) {
        return this.dataStoreMgr.getPrimaryDataStore(pools.get(0).getId());
      }

      /*
       * DataStoreProvider provider =
       * dataStoreProviderMgr.getDataStoreProvider
       * ("cloudstack primary data store provider"); Map<String, Object>
       * params = new HashMap<String, Object>(); URI uri = new
       * URI(this.getPrimaryStorageUrl()); params.put("url",
       * this.getPrimaryStorageUrl()); params.put("server",
       * uri.getHost()); params.put("path", uri.getPath());
       * params.put("protocol",
       * Storage.StoragePoolType.NetworkFilesystem); params.put("zoneId",
       * dcId); params.put("clusterId", clusterId); params.put("name",
       * this.primaryName); params.put("port", 1); params.put("podId",
       * this.podId); params.put("roles",
       * DataStoreRole.Primary.toString()); params.put("uuid", uuid);
       * params.put("providerName", String.valueOf(provider.getName()));
       *
       * DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
       * DataStore store = lifeCycle.initialize(params); ClusterScope
       * scope = new ClusterScope(clusterId, podId, dcId);
       * lifeCycle.attachCluster(store, scope);
       */

      StoragePoolVO pool = new StoragePoolVO();
      pool.setClusterId(clusterId);
      pool.setDataCenterId(dcId);
      URI uri = new URI(this.getPrimaryStorageUrl());
      pool.setHostAddress(uri.getHost());
      pool.setPath(uri.getPath());
      pool.setPort(0);
      pool.setName(this.primaryName);
      pool.setUuid(this.getPrimaryStorageUuid());
      pool.setStatus(StoragePoolStatus.Up);
      pool.setPoolType(StoragePoolType.VMFS);
      pool.setPodId(podId);
      pool.setScope(ScopeType.CLUSTER);
      pool.setStorageProviderName("cloudstack primary data store provider");
      pool = this.primaryStoreDao.persist(pool);
      DataStore store = this.dataStoreMgr.getPrimaryDataStore(pool.getId());
      return store;
    } catch (Exception e) {
      return null;
    }
  }