/**
   * Creates the BlockObject BlockSnapshot data.
   *
   * @param name
   * @param numSnapshots
   * @throws Exception
   */
  private void prepareBlockSnapshotData(String name, int numSnapshots) throws Exception {
    // Create the volume for the snapshots
    Volume volume = new Volume();
    URI volumeURI = URIUtil.createId(Volume.class);

    StorageSystem storageSystem = createStorageSystem(false);
    volume.setId(volumeURI);
    volume.setStorageController(storageSystem.getId());
    String volName = "blockSnapshotVolume";
    volume.setLabel(volName);
    BlockConsistencyGroup cg =
        createBlockConsistencyGroup(
            "blockSnapshotConsistencyGroup", storageSystem.getId(), Types.LOCAL.name(), true);
    volume.setConsistencyGroup(cg.getId());
    _dbClient.createObject(volume);

    for (int i = 1; i <= numSnapshots; i++) {
      BlockSnapshot blockSnapshot = new BlockSnapshot();
      URI blockSnapshotURI = URIUtil.createId(BlockSnapshot.class);
      blockSnapshotURIs.add(blockSnapshotURI);
      blockSnapshot.setId(blockSnapshotURI);
      blockSnapshot.setLabel(name + i);
      blockSnapshot.setSnapsetLabel(name + i);
      blockSnapshot.setParent(new NamedURI(volume.getId(), name + i));
      blockSnapshot.addConsistencyGroup(cg.getId().toString());
      _dbClient.createObject(blockSnapshot);
    }
  }
コード例 #2
0
 @Override
 public void process() {
   DbClient dbClient = getDbClient();
   List<URI> volumeURIs = dbClient.queryByType(Volume.class, false);
   Iterator<Volume> volumesIter = dbClient.queryIterativeObjects(Volume.class, volumeURIs);
   while (volumesIter.hasNext()) {
     Volume volume = volumesIter.next();
     URI systemURI = volume.getStorageController();
     if (!NullColumnValueGetter.isNullURI(systemURI)) {
       StorageSystem system = dbClient.queryObject(StorageSystem.class, systemURI);
       if ((system != null)
           && (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType()))) {
         // This is a VPLEX volume. If not already set,
         // set the protocols to FC.
         StringSet protocols = volume.getProtocol();
         if (protocols == null) {
           protocols = new StringSet();
           protocols.add(StorageProtocol.Block.FC.name());
           volume.setProtocol(protocols);
           dbClient.persistObject(volume);
         }
       }
     }
   }
 }
コード例 #3
0
  @Override
  public void removeInitiator(
      StorageSystem storage,
      URI exportMaskId,
      List<Initiator> initiators,
      List<URI> targets,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} removeInitiator START...", storage.getSerialNumber());
    log.info("Export mask id: {}", exportMaskId);
    log.info("removeInitiator: initiators : {}", initiators);
    log.info("removeInitiator: targets : {}", targets);

    try {

      ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
      List<Volume> volumeList = new ArrayList<Volume>();
      StringMap volumes = exportMask.getUserAddedVolumes();

      for (String vol : volumes.values()) {
        Volume volume = dbClient.queryObject(Volume.class, URI.create(vol));
        volumeList.add(volume);
      }

      detachVolumesFromInitiators(storage, volumeList, initiators);

      taskCompleter.ready(dbClient);
    } catch (final Exception ex) {
      log.error("Problem in RemoveInitiators: ", ex);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("doRemoveInitiators", ex.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info("{} removeInitiator END...", storage.getSerialNumber());
  }
コード例 #4
0
  @Override
  public void addVolume(
      StorageSystem storage,
      URI exportMaskId,
      VolumeURIHLU[] volumeURIHLUs,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} addVolume START...", storage.getSerialNumber());
    log.info("Export mask id: {}", exportMaskId);
    log.info("addVolume: volume-HLU pairs: {}", volumeURIHLUs);
    log.info("User assigned HLUs will be ignored as Cinder does not support it.");

    try {

      ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
      List<Volume> volumes = new ArrayList<Volume>();
      List<Initiator> initiatorList = new ArrayList<Initiator>();
      // map to store target LUN id generated for each volume
      Map<URI, Integer> volumeToTargetLunMap = new HashMap<URI, Integer>();
      StringMap initiators = exportMask.getUserAddedInitiators();

      for (VolumeURIHLU volumeURIHLU : volumeURIHLUs) {
        URI volumeURI = volumeURIHLU.getVolumeURI();
        Volume volume = dbClient.queryObject(Volume.class, volumeURI);
        volumes.add(volume);
      }
      for (String ini : initiators.values()) {
        Initiator initiator = dbClient.queryObject(Initiator.class, URI.create(ini));
        initiatorList.add(initiator);
      }

      // Map to store volume to initiatorTargetMap
      Map<Volume, Map<String, List<String>>> volumeToFCInitiatorTargetMap =
          new HashMap<Volume, Map<String, List<String>>>();

      attachVolumesToInitiators(
          storage,
          volumes,
          initiatorList,
          volumeToTargetLunMap,
          volumeToFCInitiatorTargetMap,
          exportMask);

      // Update targets in the export mask
      if (!volumeToFCInitiatorTargetMap.isEmpty()) {
        updateTargetsInExportMask(
            storage, volumes.get(0), volumeToFCInitiatorTargetMap, initiatorList, exportMask);
      }
      updateTargetLunIdInExportMask(volumeToTargetLunMap, exportMask);
      dbClient.updateAndReindexObject(exportMask);

      taskCompleter.ready(dbClient);
    } catch (final Exception ex) {
      log.error("Problem in AddVolumes: ", ex);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("doAddVolumes", ex.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info("{} addVolume END...", storage.getSerialNumber());
  }
コード例 #5
0
  /**
   * Deactivate Quota directory of file system, this will move the Quota directory to a
   * "marked-for-delete" state
   *
   * <p>NOTE: This is an asynchronous operation.
   *
   * @param id the URN of the QuotaDirectory
   * @param param QuotaDirectory delete param for optional force delete
   * @brief Delete file system Quota Dir
   * @return Task resource representation
   * @throws com.emc.storageos.svcs.errorhandling.resources.InternalException
   */
  @POST
  @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  @Path("/{id}/deactivate")
  @CheckPermission(
      roles = {Role.TENANT_ADMIN},
      acls = {ACL.OWN, ACL.ALL})
  public TaskResourceRep deactivateQuotaDirectory(
      @PathParam("id") URI id, QuotaDirectoryDeleteParam param) throws InternalException {

    _log.info("FileService::deactivateQtree Request recieved {}", id);
    String task = UUID.randomUUID().toString();
    ArgValidator.checkFieldUriType(id, QuotaDirectory.class, "id");
    QuotaDirectory quotaDirectory = queryResource(id);
    FileShare fs = queryFileShareResource(quotaDirectory.getParent().getURI());
    ArgValidator.checkFieldNotNull(fs, "filesystem");

    // <TODO> Implement Force delete option when shares and exports for Quota Directory are
    // supported

    Operation op = new Operation();
    op.setResourceType(ResourceOperationTypeEnum.DELETE_FILE_SYSTEM_QUOTA_DIR);
    quotaDirectory.getOpStatus().createTaskStatus(task, op);
    fs.setOpStatus(new OpStatusMap());
    fs.getOpStatus().createTaskStatus(task, op);
    _dbClient.persistObject(fs);
    _dbClient.persistObject(quotaDirectory);

    // Now get ready to make calls into the controller
    StorageSystem device = _dbClient.queryObject(StorageSystem.class, fs.getStorageDevice());
    FileController controller = getController(FileController.class, device.getSystemType());
    try {
      controller.deleteQuotaDirectory(device.getId(), quotaDirectory.getId(), fs.getId(), task);
      // If delete operation is successful, then remove obj from ViPR db by setting inactive=true
      quotaDirectory.setInactive(true);
      _dbClient.persistObject(quotaDirectory);

    } catch (InternalException e) {
      // treating all controller exceptions as internal error for now. controller
      // should discriminate between validation problems vs. internal errors

      throw e;
    }

    auditOp(
        OperationTypeEnum.DELETE_FILE_SYSTEM_QUOTA_DIR,
        true,
        AuditLogManager.AUDITOP_BEGIN,
        quotaDirectory.getLabel(),
        quotaDirectory.getId().toString(),
        fs.getId().toString());

    fs = _dbClient.queryObject(FileShare.class, fs.getId());
    _log.debug(
        "FileService::Quota directory Before sending response, FS ID : {}, Taks : {} ; Status {}",
        fs.getOpStatus().get(task),
        fs.getOpStatus().get(task).getStatus());

    return toTask(quotaDirectory, task, op);
  }
コード例 #6
0
  @Override
  public void deleteSingleVolumeMirror(
      StorageSystem storage, URI mirror, TaskCompleter taskCompleter)
      throws DeviceControllerException {
    _log.info("deleteSingleVolumeMirror operation START");
    try {
      BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
      if (storage.checkIfVmax3()) {
        _helper.removeVolumeFromParkingSLOStorageGroup(storage, mirrorObj.getNativeId(), false);
        _log.info(
            "Done invoking remove volume {} from parking SLO storage group",
            mirrorObj.getNativeId());
      }

      CIMObjectPath mirrorPath = _cimPath.getBlockObjectPath(storage, mirrorObj);
      CIMObjectPath configSvcPath = _cimPath.getConfigSvcPath(storage);
      CIMArgument[] inArgs = _helper.getDeleteMirrorInputArguments(storage, mirrorPath);
      CIMArgument[] outArgs = new CIMArgument[5];
      _helper.invokeMethod(
          storage, configSvcPath, SmisConstants.RETURN_TO_STORAGE_POOL, inArgs, outArgs);
      CIMObjectPath job = _cimPath.getCimObjectPathFromOutputArgs(outArgs, SmisConstants.JOB);
      if (job != null) {
        ControllerServiceImpl.enqueueJob(
            new QueueJob(new SmisBlockDeleteMirrorJob(job, storage.getId(), taskCompleter)));
      }
    } catch (Exception e) {
      _log.info("Problem making SMI-S call: ", e);
      ServiceError serviceError =
          DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
      taskCompleter.error(_dbClient, serviceError);
    }
  }
コード例 #7
0
 /*
  * (non-Javadoc)
  *
  * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExportRemoveVolumes(com.emc.storageos.db.client.model.StorageSystem,
  * com.emc.storageos.db.client.model.ExportMask, java.util.List, com.emc.storageos.volumecontroller.TaskCompleter)
  */
 @Override
 public void doExportRemoveVolumes(
     StorageSystem storage, ExportMask exportMask, List<URI> volumes, TaskCompleter taskCompleter)
     throws DeviceControllerException {
   log.info("{} doExportRemoveVolume START ...", storage.getSerialNumber());
   exportMaskOperationsHelper.removeVolume(storage, exportMask.getId(), volumes, taskCompleter);
   log.info("{} doExportRemoveVolume END ...", storage.getSerialNumber());
 }
コード例 #8
0
  @POST
  @Path("/{id}/deactivate")
  @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  @CheckPermission(roles = {Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN})
  public Response deleteStorageProvider(@PathParam("id") URI id) {
    // Validate the provider
    ArgValidator.checkFieldUriType(id, StorageProvider.class, "id");
    StorageProvider provider = _dbClient.queryObject(StorageProvider.class, id);
    ArgValidator.checkEntityNotNull(provider, id, isIdEmbeddedInURL(id));
    // Verify the provider can be removed without leaving "dangling" storages.
    StringSet providerStorageSystems = provider.getStorageSystems();
    if (null != providerStorageSystems && !providerStorageSystems.isEmpty()) {
      // First we need to verify that all related storage systems has at least   2 providers
      for (String system : providerStorageSystems) {
        StorageSystem storageSys = _dbClient.queryObject(StorageSystem.class, URI.create(system));
        if (storageSys != null
            && !storageSys.getInactive()
            && storageSys.getProviders() != null
            && storageSys.getProviders().size() == 1) {
          throw APIException.badRequests.cannotDeleteProviderWithManagedStorageSystems(
              storageSys.getId());
        }
      }
      // Next we can clear this provider from storage systems.
      for (String system : providerStorageSystems) {
        StorageSystem storageSys = _dbClient.queryObject(StorageSystem.class, URI.create(system));
        provider.removeStorageSystem(_dbClient, storageSys);
      }
    }

    StringSet decommissionedSystems = provider.getDecommissionedSystems();
    if (null != decommissionedSystems && !decommissionedSystems.isEmpty()) {
      for (String decommissioned : decommissionedSystems) {
        DecommissionedResource oldRes =
            _dbClient.queryObject(DecommissionedResource.class, URI.create(decommissioned));
        if (oldRes != null) {
          _dbClient.markForDeletion(oldRes);
        }
      }
    }

    // Set to inactive.
    _dbClient.markForDeletion(provider);

    auditOp(
        OperationTypeEnum.DELETE_STORAGEPROVIDER,
        true,
        null,
        provider.getId().toString(),
        provider.getLabel(),
        provider.getIPAddress(),
        provider.getPortNumber(),
        provider.getUserName(),
        provider.getInterfaceType());

    return Response.ok().build();
  }
コード例 #9
0
  /*
   * (non-Javadoc)
   *
   * @see
   * com.emc.storageos.volumecontroller.BlockStorageDevice#doWaitForSynchronized
   * (java.lang.Class, com.emc.storageos.db.client.model.StorageSystem,
   * java.net.URI, com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void doWaitForSynchronized(
      Class<? extends BlockObject> clazz,
      StorageSystem storageObj,
      URI target,
      TaskCompleter completer) {
    log.info("START waitForSynchronized for {}", target);

    try {
      Volume targetObj = dbClient.queryObject(Volume.class, target);
      // Source could be either Volume or BlockSnapshot
      BlockObject sourceObj = BlockObject.fetch(dbClient, targetObj.getAssociatedSourceVolume());

      // We split the pair which causes the data to be synchronized.
      // When the split is complete that data is synchronized.
      HDSApiClient hdsApiClient =
          hdsApiFactory.getClient(
              HDSUtils.getHDSServerManagementServerInfo(storageObj),
              storageObj.getSmisUserName(),
              storageObj.getSmisPassword());
      HDSApiProtectionManager hdsApiProtectionManager = hdsApiClient.getHdsApiProtectionManager();
      String replicationGroupObjectID = hdsApiProtectionManager.getReplicationGroupObjectId();
      ReplicationInfo replicationInfo =
          hdsApiProtectionManager.getReplicationInfoFromSystem(
                  sourceObj.getNativeId(), targetObj.getNativeId())
              .first;
      hdsApiProtectionManager.modifyShadowImagePair(
          replicationGroupObjectID,
          replicationInfo.getObjectID(),
          HDSApiProtectionManager.ShadowImageOperationType.split);

      // Update state in case we are waiting for synchronization
      // after creation of a new full copy that was not created
      // inactive.
      String state = targetObj.getReplicaState();
      if (!ReplicationState.SYNCHRONIZED.name().equals(state)) {
        targetObj.setSyncActive(true);
        targetObj.setReplicaState(ReplicationState.SYNCHRONIZED.name());
        dbClient.persistObject(targetObj);
      }

      // Queue job to wait for replication status to move to split.
      ControllerServiceImpl.enqueueJob(
          new QueueJob(
              new HDSReplicationSyncJob(
                  storageObj.getId(),
                  sourceObj.getNativeId(),
                  targetObj.getNativeId(),
                  ReplicationStatus.SPLIT,
                  completer)));
    } catch (Exception e) {
      log.error("Exception occurred while waiting for synchronization", e);
      ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
      completer.error(dbClient, serviceError);
    }
    log.info("completed doWaitForSynchronized");
  }
コード例 #10
0
  @Override
  public void addInitiator(
      StorageSystem storage,
      URI exportMaskId,
      List<Initiator> initiators,
      List<URI> targets,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} addInitiator START...", storage.getSerialNumber());
    log.info("Export mask id: {}", exportMaskId);
    log.info("addInitiator: initiators : {}", initiators);
    log.info("addInitiator: targets : {}", targets);

    try {

      ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
      List<Volume> volumeList = new ArrayList<Volume>();
      // map to store target LUN id generated for each volume
      Map<URI, Integer> volumeToTargetLunMap = new HashMap<URI, Integer>();
      StringMap volumes = exportMask.getUserAddedVolumes();

      for (String vol : volumes.values()) {
        Volume volume = dbClient.queryObject(Volume.class, URI.create(vol));
        volumeList.add(volume);
      }

      // Map to store volume to initiatorTargetMap
      Map<Volume, Map<String, List<String>>> volumeToFCInitiatorTargetMap =
          new HashMap<Volume, Map<String, List<String>>>();

      attachVolumesToInitiators(
          storage,
          volumeList,
          initiators,
          volumeToTargetLunMap,
          volumeToFCInitiatorTargetMap,
          exportMask);

      // Update targets in the export mask
      if (!volumeToFCInitiatorTargetMap.isEmpty()) {
        updateTargetsInExportMask(
            storage, volumeList.get(0), volumeToFCInitiatorTargetMap, initiators, exportMask);
        dbClient.updateAndReindexObject(exportMask);
      }

      // TODO : update volumeToTargetLunMap in export mask.?
      // Do we get different LUN ID for the new initiators from the same Host.?
      taskCompleter.ready(dbClient);
    } catch (final Exception ex) {
      log.error("Problem in AddInitiators: ", ex);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("doAddInitiators", ex.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info("{} addInitiator END...", storage.getSerialNumber());
  }
コード例 #11
0
  /*
   * (non-Javadoc)
   *
   * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExpandVolume(com.emc.storageos.db.client.model.StorageSystem,
   * com.emc.storageos.db.client.model.StoragePool, com.emc.storageos.db.client.model.Volume, java.lang.Long,
   * com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void doExpandVolume(
      StorageSystem storageSystem,
      StoragePool storagePool,
      Volume volume,
      Long size,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {

    log.info(
        String.format(
            "Expand Volume Start - Array: %s, Pool: %s, Volume: %s, New size: %d",
            storageSystem.getSerialNumber(), storagePool.getNativeGuid(), volume.getLabel(), size));
    try {
      HDSApiClient hdsApiClient =
          hdsApiFactory.getClient(
              HDSUtils.getHDSServerManagementServerInfo(storageSystem),
              storageSystem.getSmisUserName(),
              storageSystem.getSmisPassword());
      String systemObjectID = HDSUtils.getSystemObjectID(storageSystem);
      String asyncTaskMessageId = null;

      if (volume.getThinlyProvisioned()) {
        asyncTaskMessageId =
            hdsApiClient.modifyThinVolume(
                systemObjectID,
                HDSUtils.getLogicalUnitObjectId(volume.getNativeId(), storageSystem),
                size);
      }

      if (null != asyncTaskMessageId) {
        HDSJob expandVolumeJob =
            new HDSVolumeExpandJob(
                asyncTaskMessageId,
                storageSystem.getId(),
                storagePool.getId(),
                taskCompleter,
                "ExpandVolume");
        ControllerServiceImpl.enqueueJob(new QueueJob(expandVolumeJob));
      }
    } catch (final InternalException e) {
      log.error("Problem in doExpandVolume: ", e);
      taskCompleter.error(dbClient, e);
    } catch (final Exception e) {
      log.error("Problem in doExpandVolume: ", e);
      ServiceError serviceError =
          DeviceControllerErrors.hds.methodFailed("doExpandVolume", e.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info(
        String.format(
            "Expand Volume End - Array: %s, Pool: %s, Volume: %s",
            storageSystem.getSerialNumber(), storagePool.getNativeGuid(), volume.getLabel()));
  }
コード例 #12
0
  /**
   * Re-validate the ExportMask
   *
   * <p>This is required to be done as the ExportMask gets updated by reading the cinder export
   * volume response.
   *
   * @param varrayURI
   * @param initiatorPortMap
   * @param mask
   * @param invalidMasks
   * @param directorToInitiatorIds
   * @param idToInitiatorMap
   * @param dbClient
   * @param portWwnToClusterMap
   */
  public void updateZoningMapAndvalidateExportMask(
      URI varrayURI,
      Map<URI, List<StoragePort>> initiatorPortMap,
      URI exportMaskURI,
      Map<String, Set<String>> directorToInitiatorIds,
      Map<String, Initiator> idToInitiatorMap,
      Map<String, String> portWwnToClusterMap,
      StorageSystem vplex,
      StorageSystem array,
      String clusterId,
      String stepId) {

    try {
      WorkflowStepCompleter.stepExecuting(stepId);
      // Export Mask is updated, read it from DB
      ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);

      // First step would be to update the zoning map based on the connectivity
      updateZoningMap(initiatorPortMap, directorToInitiatorIds, exportMask);

      boolean passed =
          VPlexBackEndOrchestratorUtil.validateExportMask(
              varrayURI,
              initiatorPortMap,
              exportMask,
              null,
              directorToInitiatorIds,
              idToInitiatorMap,
              _dbClient,
              portWwnToClusterMap);

      if (!passed) {
        // Mark this mask as inactive, so that we dont pick it in the next iteration
        exportMask.setInactive(Boolean.TRUE);
        _dbClient.persistObject(exportMask);

        _log.error("Export Mask is not suitable for VPLEX to backend storage system");
        WorkflowStepCompleter.stepFailed(
            stepId,
            VPlexApiException.exceptions.couldNotFindValidArrayExportMask(
                vplex.getNativeGuid(), array.getNativeGuid(), clusterId));
        throw VPlexApiException.exceptions.couldNotFindValidArrayExportMask(
            vplex.getNativeGuid(), array.getNativeGuid(), clusterId);
      }

      WorkflowStepCompleter.stepSucceded(stepId);

    } catch (Exception ex) {
      _log.error("Failed to validate export mask for cinder: ", ex);
      VPlexApiException vplexex =
          DeviceControllerExceptions.vplex.failedToValidateExportMask(exportMaskURI.toString(), ex);
      WorkflowStepCompleter.stepFailed(stepId, vplexex);
    }
  }
コード例 #13
0
  /**
   * This method cleans up UnManaged Volumes in DB, which had been deleted manually from the Array
   * 1. Get All UnManagedVolumes from DB 2. Store URIs of unmanaged volumes returned from the
   * Provider in unManagedVolumesBookKeepingList. 3. If unmanaged volume is found only in DB, but
   * not in unManagedVolumesBookKeepingList, then set unmanaged volume to inactive.
   *
   * <p>DB | Provider
   *
   * <p>x,y,z | y,z.a [a --> new entry has been added but indexes didn't get added yet into DB]
   *
   * <p>x--> will be set to inactive
   *
   * @param storageSystem
   * @param discoveredUnManagedVolumes
   * @param dbClient
   * @param partitionManager
   */
  public static void markInActiveUnManagedVolumes(
      StorageSystem storageSystem,
      Set<URI> discoveredUnManagedVolumes,
      DbClient dbClient,
      PartitionManager partitionManager) {

    _log.info(
        " -- Processing {} discovered UnManaged Volumes Objects from -- {}",
        discoveredUnManagedVolumes.size(),
        storageSystem.getLabel());
    if (discoveredUnManagedVolumes.isEmpty()) {
      return;
    }
    // Get all available existing unmanaged Volume URIs for this array from DB
    URIQueryResultList allAvailableUnManagedVolumesInDB = new URIQueryResultList();
    dbClient.queryByConstraint(
        ContainmentConstraint.Factory.getStorageDeviceUnManagedVolumeConstraint(
            storageSystem.getId()),
        allAvailableUnManagedVolumesInDB);

    Set<URI> unManagedVolumesInDBSet = new HashSet<URI>();
    Iterator<URI> allAvailableUnManagedVolumesItr = allAvailableUnManagedVolumesInDB.iterator();
    while (allAvailableUnManagedVolumesItr.hasNext()) {
      unManagedVolumesInDBSet.add(allAvailableUnManagedVolumesItr.next());
    }

    SetView<URI> onlyAvailableinDB =
        Sets.difference(unManagedVolumesInDBSet, discoveredUnManagedVolumes);

    _log.info("Diff :" + Joiner.on("\t").join(onlyAvailableinDB));
    if (!onlyAvailableinDB.isEmpty()) {
      List<UnManagedVolume> unManagedVolumeTobeDeleted = new ArrayList<UnManagedVolume>();
      Iterator<UnManagedVolume> unManagedVolumes =
          dbClient.queryIterativeObjects(
              UnManagedVolume.class, new ArrayList<URI>(onlyAvailableinDB));

      while (unManagedVolumes.hasNext()) {
        UnManagedVolume volume = unManagedVolumes.next();
        if (null == volume || volume.getInactive()) {
          continue;
        }

        _log.info("Setting unManagedVolume {} inactive", volume.getId());
        volume.setStoragePoolUri(NullColumnValueGetter.getNullURI());
        volume.setStorageSystemUri(NullColumnValueGetter.getNullURI());
        volume.setInactive(true);
        unManagedVolumeTobeDeleted.add(volume);
      }
      if (!unManagedVolumeTobeDeleted.isEmpty()) {
        partitionManager.updateAndReIndexInBatches(
            unManagedVolumeTobeDeleted, 1000, dbClient, UNMANAGED_VOLUME);
      }
    }
  }
 /**
  * Create block volumes and associated local array consistency group.
  *
  * @throws Exception
  */
 private void prepareLocalArrayConsistencyGroupData() throws Exception {
   // Create a non-VPlex storage system
   StorageSystem storageSystem = createStorageSystem(false);
   // Create the block volumes that will be part of the cg
   List<Volume> blockVolumes = createBlockVolumes("blockVolume", 3, storageSystem.getId());
   // Create the consistency group and add the block volumes
   BlockConsistencyGroup localArrayCg =
       createBlockConsistencyGroup(
           "localArrayCg", storageSystem.getId(), Types.LOCAL.name(), true);
   localArrayConsistencyGroupURI = localArrayCg.getId();
   addVolumesToBlockConsistencyGroup(localArrayCg.getId(), blockVolumes);
 }
コード例 #15
0
  /**
   * Detaches volumes from initiators.
   *
   * @param storage the storage
   * @param volumes the volumes
   * @param initiators the initiators
   * @throws Exception the exception
   */
  private void detachVolumesFromInitiators(
      StorageSystem storage, List<Volume> volumes, List<Initiator> initiators) throws Exception {
    CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storage.getActiveProviderURI(), dbClient);
    log.debug("Getting the cinder APi for the provider with id {}", storage.getActiveProviderURI());
    CinderApi cinderApi = cinderApiFactory.getApi(storage.getActiveProviderURI(), ep);

    List<Initiator> iSCSIInitiators = new ArrayList<Initiator>();
    List<Initiator> fcInitiators = new ArrayList<Initiator>();
    splitInitiatorsByProtocol(initiators, iSCSIInitiators, fcInitiators);
    String host = getHostNameFromInitiators(initiators);

    Map<String, String[]> mapSettingVsValues = getFCInitiatorsArray(fcInitiators);
    String[] fcInitiatorsWwpns = mapSettingVsValues.get(WWPNS);
    String[] fcInitiatorsWwnns = mapSettingVsValues.get(WWNNS);

    for (Volume volume : volumes) {
      // cinder generated volume ID
      String volumeId = volume.getNativeId();

      // for iSCSI
      for (Initiator initiator : iSCSIInitiators) {
        String initiatorPort = initiator.getInitiatorPort();
        log.debug(
            String.format(
                "Detaching volume %s ( %s ) from initiator %s on Openstack cinder node",
                volumeId, volume.getId(), initiatorPort));
        cinderApi.detachVolume(volumeId, initiatorPort, null, null, host);

        // TODO : Do not use Job to poll status till we figure out how
        // to get detach status.
        /*
         * CinderJob detachJob = new CinderDetachVolumeJob(volumeId,
         * volume.getLabel(), storage.getId(),
         * CinderConstants.ComponentType.volume.name(), ep,
         * taskCompleter); ControllerServiceImpl.enqueueJob(new
         * QueueJob(detachJob));
         */
      }

      // for FC
      if (fcInitiatorsWwpns.length > 0) {
        log.debug(
            String.format(
                "Detaching volume %s ( %s ) from initiator %s on Openstack cinder node",
                volumeId, volume.getId(), fcInitiatorsWwpns));
        cinderApi.detachVolume(volumeId, null, fcInitiatorsWwpns, fcInitiatorsWwnns, host);
      }

      // If ITLs are added, remove them
      removeITLsFromVolume(volume);
    }
  }
コード例 #16
0
  /**
   * Invokes the FC or iSCSI ports operation based on the type of the export/attach operation
   *
   * @param attachResponse
   * @throws IOException
   */
  public void invoke(VolumeAttachResponse attachResponse) {
    logger.info(
        "Cinder Storage Port Invoke Operation Started for" + " the storage system : {}",
        storageSystem.getId());

    synchronized (this) {
      try {

        // Get the transport type
        String protocolType = attachResponse.connection_info.driver_volume_type;

        Map<String, List<String>> initiatorTargetMap = null;
        if (CinderConstants.ATTACH_RESPONSE_FC_TYPE.equalsIgnoreCase(protocolType)) {
          initiatorTargetMap = attachResponse.connection_info.data.initiator_target_map;
          if (null != initiatorTargetMap && !initiatorTargetMap.isEmpty()) {
            logger.debug("FC Initiator and Target mappings : {} ", initiatorTargetMap.toString());
            performFCOperation(initiatorTargetMap);
          }
        }

        String iqn = null;
        if (CinderConstants.ATTACH_RESPONSE_ISCSI_TYPE.equalsIgnoreCase(protocolType)) {
          iqn = attachResponse.connection_info.data.target_iqn;
          logger.debug("iSCSI target IQN is :{}", iqn);
          performISCSIOperation(iqn);
        }

        // Update the port to network associations for modified ports and newly created ports.
        if (!modifiedStoragePortsList.isEmpty()) {
          StoragePortAssociationHelper.updatePortAssociations(modifiedStoragePortsList, dbClient);
        }

        if (!newStoragePortsList.isEmpty()) {
          StoragePortAssociationHelper.updatePortAssociations(newStoragePortsList, dbClient);
        }
      } catch (Exception e) {
        logger.error(
            "There is an error while creating/modifying ports after export/attach,"
                + " Reason:"
                + e.getMessage(),
            e);
      } finally {
        // clear modified and new ports list
        modifiedStoragePortsList.clear();
        newStoragePortsList.clear();
      }
    }

    logger.info(
        "Cinder Storage Port Invoke Operation completed for" + " the storage system :{} ",
        storageSystem.getId());
  }
コード例 #17
0
 /*
  * (non-Javadoc)
  *
  * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExportAddInitiator(com.emc.storageos.db.client.model.StorageSystem,
  * com.emc.storageos.db.client.model.ExportMask, com.emc.storageos.db.client.model.Initiator, java.util.List,
  * com.emc.storageos.volumecontroller.TaskCompleter)
  */
 @Override
 public void doExportAddInitiator(
     StorageSystem storage,
     ExportMask exportMask,
     Initiator initiator,
     List<URI> targets,
     TaskCompleter taskCompleter)
     throws DeviceControllerException {
   log.info("{} doExportAddInitiator START ...", storage.getSerialNumber());
   exportMaskOperationsHelper.addInitiator(
       storage, exportMask.getId(), Arrays.asList(initiator), targets, taskCompleter);
   log.info("{} doExportAddInitiator END ...", storage.getSerialNumber());
 }
コード例 #18
0
 /*
  * (non-Javadoc)
  *
  * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExportRemoveInitiators(com.emc.storageos.db.client.model.StorageSystem,
  * com.emc.storageos.db.client.model.ExportMask, java.util.List, java.util.List, com.emc.storageos.volumecontroller.TaskCompleter)
  */
 @Override
 public void doExportRemoveInitiators(
     StorageSystem storage,
     ExportMask exportMask,
     List<Initiator> initiators,
     List<URI> targets,
     TaskCompleter taskCompleter)
     throws DeviceControllerException {
   log.info("{} doExportRemoveInitiator START ...", storage.getSerialNumber());
   exportMaskOperationsHelper.removeInitiator(
       storage, exportMask.getId(), initiators, targets, taskCompleter);
   log.info("{} doExportRemoveInitiator END ...", storage.getSerialNumber());
 }
コード例 #19
0
  /**
   * Gets the instance from the map if already created, otherwise creates one
   *
   * @param system
   * @param response
   * @return
   */
  public static CinderStoragePortOperations getInstance(StorageSystem system, DbClient dbc) {
    CinderStoragePortOperations instance = instancesMap.get(system.getId());
    if (null == instance) {
      synchronized (instancesMap) {
        if (null == instance) {
          instance = new CinderStoragePortOperations(system, dbc);
          instancesMap.put(system.getId(), instance);
        }
      }
    }

    return instance;
  }
コード例 #20
0
  /**
   * Include only Unified,Virtual [Thin] and Device Storage Pools (Thick Pool)
   *
   * @param poolInstance
   * @return String [] array of pool class name (as a first element) and supported volume types (as
   *     a second element)
   */
  private String[] determinePoolClassNameAndSupportedVolumeTypes(
      CIMInstance poolInstance, StorageSystem system) {

    if (StoragePool.PoolClassNames.Clar_DeviceStoragePool.toString()
        .equalsIgnoreCase(poolInstance.getClassName())) {
      return new String[] {
        StoragePool.PoolClassNames.Clar_DeviceStoragePool.toString(),
        StoragePool.SupportedResourceTypes.THICK_ONLY.toString()
      };
    } else if (StoragePool.PoolClassNames.Clar_UnifiedStoragePool.toString()
        .equalsIgnoreCase(poolInstance.getClassName())) {
      return new String[] {
        StoragePool.PoolClassNames.Clar_UnifiedStoragePool.toString(),
        StoragePool.SupportedResourceTypes.THIN_AND_THICK.toString()
      };
    }

    if (!system.checkIfVmax3()) {
      if (StoragePool.PoolClassNames.Symm_DeviceStoragePool.toString()
              .equalsIgnoreCase(poolInstance.getClassName())
          && !SupportedProvisioningTypes.THIN
              .toString()
              .equalsIgnoreCase(system.getSupportedProvisioningType())) {
        return new String[] {
          StoragePool.PoolClassNames.Symm_DeviceStoragePool.toString(),
          StoragePool.SupportedResourceTypes.THICK_ONLY.toString()
        };
      } else if (StoragePool.PoolClassNames.Symm_VirtualProvisioningPool.toString()
              .equalsIgnoreCase(poolInstance.getClassName())
          && !SupportedProvisioningTypes.THICK
              .toString()
              .equalsIgnoreCase(system.getSupportedProvisioningType())) {
        return new String[] {
          StoragePool.PoolClassNames.Symm_VirtualProvisioningPool.toString(),
          StoragePool.SupportedResourceTypes.THIN_ONLY.toString()
        };
      }
    } else {
      // VMAX3 has StorageResourcePools (SRP). These are composed of ThinPools, which we can
      // discover, but would not have write access to. So, we will only discovery SRP pools
      // and skip over other pool discoveries.
      if (StoragePool.PoolClassNames.Symm_SRPStoragePool.toString()
          .equalsIgnoreCase(poolInstance.getClassName())) {
        return new String[] {
          StoragePool.PoolClassNames.Symm_SRPStoragePool.toString(),
          StoragePool.SupportedResourceTypes.THIN_ONLY.toString()
        };
      }
    }
    return null;
  }
コード例 #21
0
  /**
   * Checks the UnManaged Volume's policy with vPool's policy.
   *
   * @param vPool the vPool
   * @param autoTierPolicyId the auto tier policy id on unmanaged volume
   * @param system the system
   * @return true, if matching, false otherwise
   */
  public static boolean checkVPoolValidForUnManagedVolumeAutoTieringPolicy(
      VirtualPool vPool, String autoTierPolicyId, StorageSystem system) {

    _log.debug("Policy Id: {}, vPool: {}", autoTierPolicyId, vPool);
    boolean policyMatching = false;
    String policyIdfromVPool = vPool.getAutoTierPolicyName();
    if (autoTierPolicyId != null) {
      if (policyIdfromVPool != null) {
        if (vPool.getUniquePolicyNames()
            || DiscoveredDataObject.Type.vnxblock.name().equalsIgnoreCase(system.getSystemType())) {
          // Unique Policy names field will not be set for VNX. vPool will have policy name, not the
          // policy's nativeGuid
          policyIdfromVPool =
              NativeGUIDGenerator.generateAutoTierPolicyNativeGuid(
                  system.getNativeGuid(),
                  policyIdfromVPool,
                  NativeGUIDGenerator.getTieringPolicyKeyForSystem(system));
          _log.debug("Policy Id generated: {}", policyIdfromVPool);
        }
        if (autoTierPolicyId.equalsIgnoreCase(policyIdfromVPool)) {
          policyMatching = true;
        }
      }
    } else if ((policyIdfromVPool == null) || (policyIdfromVPool.equalsIgnoreCase("none"))) {
      // if policy is not set in both unmanaged volume and vPool. Note
      // that the value in the vpool could be set to "none".
      policyMatching = true;
    }

    // Default policy for VNX - match volume with default policy to vPool with no policy as well
    if (!policyMatching
        && DiscoveredDataObject.Type.vnxblock.name().equalsIgnoreCase(system.getSystemType())) {
      if (autoTierPolicyId != null
          && autoTierPolicyId.contains(VnxFastPolicy.DEFAULT_START_HIGH_THEN_AUTOTIER.name())
          && policyIdfromVPool == null) {
        policyMatching = true;
      }
    }

    // Default policy for HDS - match volume with default policy to vPool with no policy as well
    if (!policyMatching
        && DiscoveredDataObject.Type.hds.name().equalsIgnoreCase(system.getSystemType())) {
      if (autoTierPolicyId != null
          && autoTierPolicyId.contains(HitachiTieringPolicy.All.name())
          && policyIdfromVPool == null) {
        policyMatching = true;
      }
    }

    return policyMatching;
  }
コード例 #22
0
  @Override
  public void deleteExportMask(
      StorageSystem storage,
      URI exportMaskId,
      List<URI> volumeURIList,
      List<URI> targetURIList,
      List<Initiator> initiatorList,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} deleteExportMask START...", storage.getSerialNumber());
    log.info("Export mask id: {}", exportMaskId);

    try {
      // There is no masking concept on Cinder to delete the export mask.
      // But before marking the task completer as ready,
      // detach the volumes from the initiators that are there in the export mask.
      ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
      List<Volume> volumeList = new ArrayList<Volume>();
      StringMap volumes = exportMask.getUserAddedVolumes();
      StringMap initiators = exportMask.getUserAddedInitiators();
      if (volumes != null) {
        for (String vol : volumes.values()) {
          URI volumeURI = URI.create(vol);
          volumeURIList.add(volumeURI);
          Volume volume = dbClient.queryObject(Volume.class, volumeURI);
          volumeList.add(volume);
        }
      }
      if (initiators != null) {
        for (String ini : initiators.values()) {
          Initiator initiatorObj = dbClient.queryObject(Initiator.class, URI.create(ini));
          initiatorList.add(initiatorObj);
        }
      }

      log.info("deleteExportMask: volumes:  {}", volumeURIList);
      log.info("deleteExportMask: assignments: {}", targetURIList);
      log.info("deleteExportMask: initiators: {}", initiatorList);

      detachVolumesFromInitiators(storage, volumeList, initiatorList);

      taskCompleter.ready(dbClient);
    } catch (final Exception ex) {
      log.error("Problem in DetachVolumes: ", ex);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("doDetachVolumes", ex.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info("{} deleteExportMask END...", storage.getSerialNumber());
  }
コード例 #23
0
 /*
  * (non-Javadoc)
  *
  * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExportGroupDelete(com.emc.storageos.db.client.model.StorageSystem,
  * com.emc.storageos.db.client.model.ExportMask, com.emc.storageos.volumecontroller.TaskCompleter)
  */
 @Override
 public void doExportGroupDelete(
     StorageSystem storage, ExportMask exportMask, TaskCompleter taskCompleter)
     throws DeviceControllerException {
   log.info("{} doExportGroupDelete START ...", storage.getSerialNumber());
   exportMaskOperationsHelper.deleteExportMask(
       storage,
       exportMask.getId(),
       new ArrayList<URI>(),
       new ArrayList<URI>(),
       new ArrayList<Initiator>(),
       taskCompleter);
   log.info("{} doExportGroupDelete END ...", storage.getSerialNumber());
 }
  /**
   * Determines if the storage system for the passed BlockSnapshot instance supports snapshot
   * sessions.
   *
   * @param snapshot A reference to the snapshot.
   * @return true if the system for the passed snapshot supports snapshot sessions, false otherwise.
   */
  private boolean isSnapshotSessionSupported(BlockSnapshot snapshot) {
    boolean isSupported = false;
    URI systemURI = snapshot.getStorageController();
    StorageSystem system = dbClient.queryObject(StorageSystem.class, systemURI);
    if ((system != null) && (system.checkIfVmax3())) {
      s_logger.info(
          "BlockSnapshotSession supported for snapshot {}:{}",
          snapshot.getId(),
          snapshot.getLabel());
      isSupported = true;
    }

    return isSupported;
  }
  /**
   * Prepare the VPlex volumes and associated consistency group data.
   *
   * @throws Exception
   */
  private void prepareVPlexConsistencyGroupData() throws Exception {
    // Create a VPlex storage system
    StorageSystem storageSystem = createStorageSystem(true);

    // Create the VPlex volumes and add them to the VPlex consistency group
    List<Volume> vplexVolumes = createVPlexVolumes("vplexVolume", 3, storageSystem.getId());
    // Prior to 2.2, VPlex only CGs (nothing to do with RP) did not set the CG type of VPLEX. So we
    // pass false here.
    BlockConsistencyGroup vplexCg =
        createBlockConsistencyGroup("vplexCg", storageSystem.getId(), Types.VPLEX.name(), false);
    // Save a references to the cg for migration verification
    vplexConsistencyGroupURI = vplexCg.getId();
    // Add the VPlex volumes to the VPlex consistency group
    addVolumesToBlockConsistencyGroup(vplexCg.getId(), vplexVolumes);
  }
コード例 #26
0
  @Override
  public void doExportAddVolumes(
      StorageSystem storage,
      ExportMask exportMask,
      Map<URI, Integer> volumes,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} doExportAddVolume START ...", storage.getSerialNumber());
    VolumeURIHLU[] volumeLunArray =
        ControllerUtils.getVolumeURIHLUArray(storage.getSystemType(), volumes, dbClient);

    exportMaskOperationsHelper.addVolume(
        storage, exportMask.getId(), volumeLunArray, taskCompleter);
    log.info("{} doExportAddVolume END ...", storage.getSerialNumber());
  }
コード例 #27
0
 @Override
 public void fractureSingleVolumeMirror(
     StorageSystem storage, URI mirror, Boolean sync, TaskCompleter taskCompleter)
     throws DeviceControllerException {
   _log.info("fractureSingleVolumeMirror operation START");
   CloseableIterator<CIMObjectPath> storageSyncRefs = null;
   try {
     BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
     CIMObjectPath mirrorPath = _cimPath.getBlockObjectPath(storage, mirrorObj);
     // Get reference to the CIM_StorageSynchronized instance
     storageSyncRefs =
         _helper.getReference(storage, mirrorPath, SmisConstants.CIM_STORAGE_SYNCHRONIZED, null);
     boolean isVmax3 = storage.checkIfVmax3();
     while (storageSyncRefs.hasNext()) {
       CIMObjectPath storageSync = storageSyncRefs.next();
       CIMArgument[] inArgs =
           isVmax3
               ? _helper.getFractureMirrorInputArgumentsWithCopyState(storageSync, sync)
               : _helper.getFractureMirrorInputArguments(storageSync, sync);
       CIMArgument[] outArgs = new CIMArgument[5];
       // Invoke method to fracture the synchronization
       _helper.callModifyReplica(storage, inArgs, outArgs);
       taskCompleter.ready(_dbClient);
     }
   } catch (Exception e) {
     _log.info("Problem making SMI-S call", e);
     ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
     taskCompleter.error(_dbClient, serviceError);
   } finally {
     if (storageSyncRefs != null) {
       storageSyncRefs.close();
     }
   }
 }
コード例 #28
0
  @Override
  public void deleteSingleVolumeSnapshot(
      StorageSystem storage, URI snapshot, TaskCompleter taskCompleter)
      throws DeviceControllerException {

    try {
      BlockSnapshot snap = _dbClient.queryObject(BlockSnapshot.class, snapshot);
      VNXeApiClient apiClient = getVnxeClient(storage);
      VNXeLunSnap lunSnap = apiClient.getLunSnapshot(snap.getNativeId());
      if (lunSnap != null) {
        VNXeCommandJob job = apiClient.deleteLunSnap(lunSnap.getId());
        if (job != null) {
          ControllerServiceImpl.enqueueJob(
              new QueueJob(
                  new VNXeBlockDeleteSnapshotJob(job.getId(), storage.getId(), taskCompleter)));
        }
      } else {
        // Perhaps, it's already been deleted or was deleted on the array.
        // In that case, we'll just say all is well, so that this operation
        // is idempotent.
        snap.setInactive(true);
        snap.setIsSyncActive(false);
        _dbClient.updateObject(snap);
        taskCompleter.ready(_dbClient);
      }
    } catch (VNXeException e) {
      _log.error("Delete volume snapshot got the exception", e);
      taskCompleter.error(_dbClient, e);
    } catch (Exception ex) {
      _log.error("Delete volume snapshot got the exception", ex);
      ServiceError error = DeviceControllerErrors.vnxe.jobFailed("DeleteSnapshot", ex.getMessage());
      taskCompleter.error(_dbClient, error);
    }
  }
コード例 #29
0
 /*
  * (non-Javadoc)
  *
  * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExportGroupCreate(com.emc.storageos.db.client.model.StorageSystem,
  * com.emc.storageos.db.client.model.ExportMask, java.util.Map, java.util.List, java.util.List,
  * com.emc.storageos.volumecontroller.TaskCompleter)
  */
 @Override
 public void doExportGroupCreate(
     StorageSystem storage,
     ExportMask exportMask,
     Map<URI, Integer> volumeMap,
     List<Initiator> initiators,
     List<URI> targets,
     TaskCompleter taskCompleter)
     throws DeviceControllerException {
   log.info("{} doExportGroupCreate START ...", storage.getSerialNumber());
   VolumeURIHLU[] volumeLunArray =
       ControllerUtils.getVolumeURIHLUArray(storage.getSystemType(), volumeMap, dbClient);
   exportMaskOperationsHelper.createExportMask(
       storage, exportMask.getId(), volumeLunArray, targets, initiators, taskCompleter);
   log.info("{} doExportGroupCreate END ...", storage.getSerialNumber());
 }
コード例 #30
0
  /** {@inheritDoc} */
  @Override
  public void validateFullCopyCreateRequest(List<BlockObject> fcSourceObjList, int count) {
    if (fcSourceObjList.size() > 0) {
      URI fcSourceObjURI = fcSourceObjList.get(0).getId();
      if (URIUtil.isType(fcSourceObjURI, BlockSnapshot.class)) {
        // Currently you cannot create a full copy of a VPLEX snapshot.
        throw APIException.badRequests.cantCreateFullCopyForVPlexSnapshot();
      } else {
        // Call super first.
        super.validateFullCopyCreateRequest(fcSourceObjList, count);

        // Platform specific checks.
        for (BlockObject fcSourceObj : fcSourceObjList) {
          Volume fcSourceVolume = (Volume) fcSourceObj;
          StorageSystem system =
              _dbClient.queryObject(StorageSystem.class, fcSourceVolume.getStorageController());
          if (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType())) {
            // If the volume is a VPLEX volume, then we need to be sure that
            // storage pool of the source backend volume of the VPLEX volume,
            // which is volume used to create the native full copy, supports
            // full copy.
            Volume srcBackendVolume =
                VPlexUtil.getVPLEXBackendVolume(fcSourceVolume, true, _dbClient, true);
            StoragePool storagePool =
                _dbClient.queryObject(StoragePool.class, srcBackendVolume.getPool());
            verifyFullCopySupportedForStoragePool(storagePool);

            // If the full copy source is itself a full copy, it is not
            // detached, and the native full copy i.e., the source side
            // backend volume, is VNX, then creating a full copy of the
            // volume will fail. As such, we prevent it.
            if ((BlockFullCopyUtils.isVolumeFullCopy(fcSourceVolume, _dbClient))
                && (!BlockFullCopyUtils.isFullCopyDetached(fcSourceVolume, _dbClient))) {
              URI backendSystemURI = srcBackendVolume.getStorageController();
              StorageSystem backendSystem =
                  _dbClient.queryObject(StorageSystem.class, backendSystemURI);
              if (DiscoveredDataObject.Type.vnxblock.name().equals(backendSystem.getSystemType())) {
                throw APIException.badRequests.cantCreateFullCopyOfVPlexFullCopyUsingVNX();
              }
            }
          }
        }
      }
    }
  }