@Override
  public void deleteSingleVolumeSnapshot(
      StorageSystem storage, URI snapshot, TaskCompleter taskCompleter)
      throws DeviceControllerException {

    try {
      BlockSnapshot snap = _dbClient.queryObject(BlockSnapshot.class, snapshot);
      VNXeApiClient apiClient = getVnxeClient(storage);
      VNXeLunSnap lunSnap = apiClient.getLunSnapshot(snap.getNativeId());
      if (lunSnap != null) {
        VNXeCommandJob job = apiClient.deleteLunSnap(lunSnap.getId());
        if (job != null) {
          ControllerServiceImpl.enqueueJob(
              new QueueJob(
                  new VNXeBlockDeleteSnapshotJob(job.getId(), storage.getId(), taskCompleter)));
        }
      } else {
        // Perhaps, it's already been deleted or was deleted on the array.
        // In that case, we'll just say all is well, so that this operation
        // is idempotent.
        snap.setInactive(true);
        snap.setIsSyncActive(false);
        _dbClient.updateObject(snap);
        taskCompleter.ready(_dbClient);
      }
    } catch (VNXeException e) {
      _log.error("Delete volume snapshot got the exception", e);
      taskCompleter.error(_dbClient, e);
    } catch (Exception ex) {
      _log.error("Delete volume snapshot got the exception", ex);
      ServiceError error = DeviceControllerErrors.vnxe.jobFailed("DeleteSnapshot", ex.getMessage());
      taskCompleter.error(_dbClient, error);
    }
  }
  @Override
  public void detachSingleVolumeMirror(
      StorageSystem storage, URI mirror, TaskCompleter taskCompleter)
      throws DeviceControllerException {
    _log.info("detachSingleVolumeMirror operation START");
    try {
      BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
      CIMArgument[] inArgs = _helper.getDetachSynchronizationInputArguments(storage, mirrorObj);
      CIMArgument[] outArgs = new CIMArgument[5];

      // Invoke method to detach the local mirror
      UnsignedInteger32 result =
          (UnsignedInteger32) _helper.callModifyReplica(storage, inArgs, outArgs);
      if (JOB_COMPLETED_NO_ERROR.equals(result)) {
        taskCompleter.ready(_dbClient);
      } else {
        String msg = String.format("SMI-S call returned unsuccessfully: %s", result);
        taskCompleter.error(_dbClient, DeviceControllerException.errors.smis.jobFailed(msg));
      }
    } catch (Exception e) {
      _log.error("Problem making SMI-S call: ", e);
      ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
      taskCompleter.error(_dbClient, serviceError);
    }
  }
  /*
   * (non-Javadoc)
   *
   * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExpandVolume(com.emc.storageos.db.client.model.StorageSystem,
   * com.emc.storageos.db.client.model.StoragePool, com.emc.storageos.db.client.model.Volume, java.lang.Long,
   * com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void doExpandVolume(
      StorageSystem storageSystem,
      StoragePool storagePool,
      Volume volume,
      Long size,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {

    log.info(
        String.format(
            "Expand Volume Start - Array: %s, Pool: %s, Volume: %s, New size: %d",
            storageSystem.getSerialNumber(), storagePool.getNativeGuid(), volume.getLabel(), size));
    try {
      HDSApiClient hdsApiClient =
          hdsApiFactory.getClient(
              HDSUtils.getHDSServerManagementServerInfo(storageSystem),
              storageSystem.getSmisUserName(),
              storageSystem.getSmisPassword());
      String systemObjectID = HDSUtils.getSystemObjectID(storageSystem);
      String asyncTaskMessageId = null;

      if (volume.getThinlyProvisioned()) {
        asyncTaskMessageId =
            hdsApiClient.modifyThinVolume(
                systemObjectID,
                HDSUtils.getLogicalUnitObjectId(volume.getNativeId(), storageSystem),
                size);
      }

      if (null != asyncTaskMessageId) {
        HDSJob expandVolumeJob =
            new HDSVolumeExpandJob(
                asyncTaskMessageId,
                storageSystem.getId(),
                storagePool.getId(),
                taskCompleter,
                "ExpandVolume");
        ControllerServiceImpl.enqueueJob(new QueueJob(expandVolumeJob));
      }
    } catch (final InternalException e) {
      log.error("Problem in doExpandVolume: ", e);
      taskCompleter.error(dbClient, e);
    } catch (final Exception e) {
      log.error("Problem in doExpandVolume: ", e);
      ServiceError serviceError =
          DeviceControllerErrors.hds.methodFailed("doExpandVolume", e.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info(
        String.format(
            "Expand Volume End - Array: %s, Pool: %s, Volume: %s",
            storageSystem.getSerialNumber(), storagePool.getNativeGuid(), volume.getLabel()));
  }
  @Override
  public void deleteGroupSnapshots(StorageSystem storage, URI snapshot, TaskCompleter taskCompleter)
      throws DeviceControllerException {
    try {
      List<BlockSnapshot> snapshots =
          _dbClient.queryObject(BlockSnapshot.class, Arrays.asList(snapshot));
      BlockSnapshot snapshotObj = snapshots.get(0);

      VNXeApiClient apiClient = getVnxeClient(storage);
      VNXeLunGroupSnap lunGroupSnap =
          apiClient.getLunGroupSnapshot(snapshotObj.getReplicationGroupInstance());
      if (lunGroupSnap != null) {
        VNXeCommandJob job = apiClient.deleteLunGroupSnap(lunGroupSnap.getId());
        if (job != null) {
          ControllerServiceImpl.enqueueJob(
              new QueueJob(
                  new VNXeBlockDeleteSnapshotJob(job.getId(), storage.getId(), taskCompleter)));
        } else {
          // Should not take this path, but treat as an error if we do
          // happen to get a null job due to some error in the client.
          _log.error("Unexpected null job from VNXe client call to delete group snapshot.");
          ServiceCoded sc =
              DeviceControllerExceptions.vnxe.nullJobForDeleteGroupSnapshot(
                  snapshotObj.forDisplay(), snapshotObj.getReplicationGroupInstance());
          taskCompleter.error(_dbClient, sc);
        }
      } else {
        // Treat as in the single volume snapshot case and presume
        // the group snapshot has already been deleted.
        List<BlockSnapshot> grpSnapshots =
            ControllerUtils.getSnapshotsPartOfReplicationGroup(snapshotObj, _dbClient);
        for (BlockSnapshot grpSnapshot : grpSnapshots) {
          grpSnapshot.setInactive(true);
          grpSnapshot.setIsSyncActive(false);
        }
        _dbClient.updateObject(grpSnapshots);
        taskCompleter.ready(_dbClient);
      }
    } catch (VNXeException e) {
      _log.error("Delete group snapshot got the exception", e);
      taskCompleter.error(_dbClient, e);
    } catch (Exception ex) {
      _log.error("Delete group snapshot got the exception", ex);
      ServiceError error =
          DeviceControllerErrors.vnxe.jobFailed("DeletGroupSnapshot", ex.getMessage());
      taskCompleter.error(_dbClient, error);
    }
  }
 @Override
 public void fractureSingleVolumeMirror(
     StorageSystem storage, URI mirror, Boolean sync, TaskCompleter taskCompleter)
     throws DeviceControllerException {
   _log.info("fractureSingleVolumeMirror operation START");
   CloseableIterator<CIMObjectPath> storageSyncRefs = null;
   try {
     BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
     CIMObjectPath mirrorPath = _cimPath.getBlockObjectPath(storage, mirrorObj);
     // Get reference to the CIM_StorageSynchronized instance
     storageSyncRefs =
         _helper.getReference(storage, mirrorPath, SmisConstants.CIM_STORAGE_SYNCHRONIZED, null);
     boolean isVmax3 = storage.checkIfVmax3();
     while (storageSyncRefs.hasNext()) {
       CIMObjectPath storageSync = storageSyncRefs.next();
       CIMArgument[] inArgs =
           isVmax3
               ? _helper.getFractureMirrorInputArgumentsWithCopyState(storageSync, sync)
               : _helper.getFractureMirrorInputArguments(storageSync, sync);
       CIMArgument[] outArgs = new CIMArgument[5];
       // Invoke method to fracture the synchronization
       _helper.callModifyReplica(storage, inArgs, outArgs);
       taskCompleter.ready(_dbClient);
     }
   } catch (Exception e) {
     _log.info("Problem making SMI-S call", e);
     ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
     taskCompleter.error(_dbClient, serviceError);
   } finally {
     if (storageSyncRefs != null) {
       storageSyncRefs.close();
     }
   }
 }
  @Override
  public void removeInitiator(
      StorageSystem storage,
      URI exportMaskId,
      List<Initiator> initiators,
      List<URI> targets,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} removeInitiator START...", storage.getSerialNumber());
    log.info("Export mask id: {}", exportMaskId);
    log.info("removeInitiator: initiators : {}", initiators);
    log.info("removeInitiator: targets : {}", targets);

    try {

      ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
      List<Volume> volumeList = new ArrayList<Volume>();
      StringMap volumes = exportMask.getUserAddedVolumes();

      for (String vol : volumes.values()) {
        Volume volume = dbClient.queryObject(Volume.class, URI.create(vol));
        volumeList.add(volume);
      }

      detachVolumesFromInitiators(storage, volumeList, initiators);

      taskCompleter.ready(dbClient);
    } catch (final Exception ex) {
      log.error("Problem in RemoveInitiators: ", ex);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("doRemoveInitiators", ex.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info("{} removeInitiator END...", storage.getSerialNumber());
  }
  @Override
  public void addVolume(
      StorageSystem storage,
      URI exportMaskId,
      VolumeURIHLU[] volumeURIHLUs,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} addVolume START...", storage.getSerialNumber());
    log.info("Export mask id: {}", exportMaskId);
    log.info("addVolume: volume-HLU pairs: {}", volumeURIHLUs);
    log.info("User assigned HLUs will be ignored as Cinder does not support it.");

    try {

      ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
      List<Volume> volumes = new ArrayList<Volume>();
      List<Initiator> initiatorList = new ArrayList<Initiator>();
      // map to store target LUN id generated for each volume
      Map<URI, Integer> volumeToTargetLunMap = new HashMap<URI, Integer>();
      StringMap initiators = exportMask.getUserAddedInitiators();

      for (VolumeURIHLU volumeURIHLU : volumeURIHLUs) {
        URI volumeURI = volumeURIHLU.getVolumeURI();
        Volume volume = dbClient.queryObject(Volume.class, volumeURI);
        volumes.add(volume);
      }
      for (String ini : initiators.values()) {
        Initiator initiator = dbClient.queryObject(Initiator.class, URI.create(ini));
        initiatorList.add(initiator);
      }

      // Map to store volume to initiatorTargetMap
      Map<Volume, Map<String, List<String>>> volumeToFCInitiatorTargetMap =
          new HashMap<Volume, Map<String, List<String>>>();

      attachVolumesToInitiators(
          storage,
          volumes,
          initiatorList,
          volumeToTargetLunMap,
          volumeToFCInitiatorTargetMap,
          exportMask);

      // Update targets in the export mask
      if (!volumeToFCInitiatorTargetMap.isEmpty()) {
        updateTargetsInExportMask(
            storage, volumes.get(0), volumeToFCInitiatorTargetMap, initiatorList, exportMask);
      }
      updateTargetLunIdInExportMask(volumeToTargetLunMap, exportMask);
      dbClient.updateAndReindexObject(exportMask);

      taskCompleter.ready(dbClient);
    } catch (final Exception ex) {
      log.error("Problem in AddVolumes: ", ex);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("doAddVolumes", ex.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info("{} addVolume END...", storage.getSerialNumber());
  }
  @Override
  public void deleteSingleVolumeMirror(
      StorageSystem storage, URI mirror, TaskCompleter taskCompleter)
      throws DeviceControllerException {
    _log.info("deleteSingleVolumeMirror operation START");
    try {
      BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
      if (storage.checkIfVmax3()) {
        _helper.removeVolumeFromParkingSLOStorageGroup(storage, mirrorObj.getNativeId(), false);
        _log.info(
            "Done invoking remove volume {} from parking SLO storage group",
            mirrorObj.getNativeId());
      }

      CIMObjectPath mirrorPath = _cimPath.getBlockObjectPath(storage, mirrorObj);
      CIMObjectPath configSvcPath = _cimPath.getConfigSvcPath(storage);
      CIMArgument[] inArgs = _helper.getDeleteMirrorInputArguments(storage, mirrorPath);
      CIMArgument[] outArgs = new CIMArgument[5];
      _helper.invokeMethod(
          storage, configSvcPath, SmisConstants.RETURN_TO_STORAGE_POOL, inArgs, outArgs);
      CIMObjectPath job = _cimPath.getCimObjectPathFromOutputArgs(outArgs, SmisConstants.JOB);
      if (job != null) {
        ControllerServiceImpl.enqueueJob(
            new QueueJob(new SmisBlockDeleteMirrorJob(job, storage.getId(), taskCompleter)));
      }
    } catch (Exception e) {
      _log.info("Problem making SMI-S call: ", e);
      ServiceError serviceError =
          DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
      taskCompleter.error(_dbClient, serviceError);
    }
  }
  /*
   * (non-Javadoc)
   *
   * @see
   * com.emc.storageos.volumecontroller.BlockStorageDevice#doWaitForSynchronized
   * (java.lang.Class, com.emc.storageos.db.client.model.StorageSystem,
   * java.net.URI, com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void doWaitForSynchronized(
      Class<? extends BlockObject> clazz,
      StorageSystem storageObj,
      URI target,
      TaskCompleter completer) {
    log.info("START waitForSynchronized for {}", target);

    try {
      Volume targetObj = dbClient.queryObject(Volume.class, target);
      // Source could be either Volume or BlockSnapshot
      BlockObject sourceObj = BlockObject.fetch(dbClient, targetObj.getAssociatedSourceVolume());

      // We split the pair which causes the data to be synchronized.
      // When the split is complete that data is synchronized.
      HDSApiClient hdsApiClient =
          hdsApiFactory.getClient(
              HDSUtils.getHDSServerManagementServerInfo(storageObj),
              storageObj.getSmisUserName(),
              storageObj.getSmisPassword());
      HDSApiProtectionManager hdsApiProtectionManager = hdsApiClient.getHdsApiProtectionManager();
      String replicationGroupObjectID = hdsApiProtectionManager.getReplicationGroupObjectId();
      ReplicationInfo replicationInfo =
          hdsApiProtectionManager.getReplicationInfoFromSystem(
                  sourceObj.getNativeId(), targetObj.getNativeId())
              .first;
      hdsApiProtectionManager.modifyShadowImagePair(
          replicationGroupObjectID,
          replicationInfo.getObjectID(),
          HDSApiProtectionManager.ShadowImageOperationType.split);

      // Update state in case we are waiting for synchronization
      // after creation of a new full copy that was not created
      // inactive.
      String state = targetObj.getReplicaState();
      if (!ReplicationState.SYNCHRONIZED.name().equals(state)) {
        targetObj.setSyncActive(true);
        targetObj.setReplicaState(ReplicationState.SYNCHRONIZED.name());
        dbClient.persistObject(targetObj);
      }

      // Queue job to wait for replication status to move to split.
      ControllerServiceImpl.enqueueJob(
          new QueueJob(
              new HDSReplicationSyncJob(
                  storageObj.getId(),
                  sourceObj.getNativeId(),
                  targetObj.getNativeId(),
                  ReplicationStatus.SPLIT,
                  completer)));
    } catch (Exception e) {
      log.error("Exception occurred while waiting for synchronization", e);
      ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
      completer.error(dbClient, serviceError);
    }
    log.info("completed doWaitForSynchronized");
  }
  @Override
  public void addInitiator(
      StorageSystem storage,
      URI exportMaskId,
      List<Initiator> initiators,
      List<URI> targets,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} addInitiator START...", storage.getSerialNumber());
    log.info("Export mask id: {}", exportMaskId);
    log.info("addInitiator: initiators : {}", initiators);
    log.info("addInitiator: targets : {}", targets);

    try {

      ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
      List<Volume> volumeList = new ArrayList<Volume>();
      // map to store target LUN id generated for each volume
      Map<URI, Integer> volumeToTargetLunMap = new HashMap<URI, Integer>();
      StringMap volumes = exportMask.getUserAddedVolumes();

      for (String vol : volumes.values()) {
        Volume volume = dbClient.queryObject(Volume.class, URI.create(vol));
        volumeList.add(volume);
      }

      // Map to store volume to initiatorTargetMap
      Map<Volume, Map<String, List<String>>> volumeToFCInitiatorTargetMap =
          new HashMap<Volume, Map<String, List<String>>>();

      attachVolumesToInitiators(
          storage,
          volumeList,
          initiators,
          volumeToTargetLunMap,
          volumeToFCInitiatorTargetMap,
          exportMask);

      // Update targets in the export mask
      if (!volumeToFCInitiatorTargetMap.isEmpty()) {
        updateTargetsInExportMask(
            storage, volumeList.get(0), volumeToFCInitiatorTargetMap, initiators, exportMask);
        dbClient.updateAndReindexObject(exportMask);
      }

      // TODO : update volumeToTargetLunMap in export mask.?
      // Do we get different LUN ID for the new initiators from the same Host.?
      taskCompleter.ready(dbClient);
    } catch (final Exception ex) {
      log.error("Problem in AddInitiators: ", ex);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("doAddInitiators", ex.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info("{} addInitiator END...", storage.getSerialNumber());
  }
  @Override
  public void createGroupSnapshots(
      StorageSystem storage,
      List<URI> snapshotList,
      Boolean createInactive,
      Boolean readOnly,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    try {
      URI snapshot = snapshotList.get(0);
      BlockSnapshot snapshotObj = _dbClient.queryObject(BlockSnapshot.class, snapshot);

      Volume volume = _dbClient.queryObject(Volume.class, snapshotObj.getParent());
      TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, volume.getTenant().getURI());
      String tenantName = tenant.getLabel();
      String snapLabelToUse =
          _nameGenerator.generate(
              tenantName,
              snapshotObj.getLabel(),
              snapshot.toString(),
              '-',
              SmisConstants.MAX_SNAPSHOT_NAME_LENGTH);
      String groupName = getConsistencyGroupName(snapshotObj);
      VNXeApiClient apiClient = getVnxeClient(storage);
      VNXeCommandJob job = apiClient.createLunGroupSnap(groupName, snapLabelToUse);
      if (job != null) {
        ControllerServiceImpl.enqueueJob(
            new QueueJob(
                new VNXeBlockCreateCGSnapshotJob(
                    job.getId(), storage.getId(), !createInactive, taskCompleter)));
      }

    } catch (VNXeException e) {
      _log.error("Create volume snapshot got the exception", e);
      taskCompleter.error(_dbClient, e);

    } catch (Exception ex) {
      _log.error("Create volume snapshot got the exception", ex);
      ServiceError error =
          DeviceControllerErrors.vnxe.jobFailed("CreateCGSnapshot", ex.getMessage());
      taskCompleter.error(_dbClient, error);
    }
  }
  @Override
  public void deleteExportMask(
      StorageSystem storage,
      URI exportMaskId,
      List<URI> volumeURIList,
      List<URI> targetURIList,
      List<Initiator> initiatorList,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    log.info("{} deleteExportMask START...", storage.getSerialNumber());
    log.info("Export mask id: {}", exportMaskId);

    try {
      // There is no masking concept on Cinder to delete the export mask.
      // But before marking the task completer as ready,
      // detach the volumes from the initiators that are there in the export mask.
      ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
      List<Volume> volumeList = new ArrayList<Volume>();
      StringMap volumes = exportMask.getUserAddedVolumes();
      StringMap initiators = exportMask.getUserAddedInitiators();
      if (volumes != null) {
        for (String vol : volumes.values()) {
          URI volumeURI = URI.create(vol);
          volumeURIList.add(volumeURI);
          Volume volume = dbClient.queryObject(Volume.class, volumeURI);
          volumeList.add(volume);
        }
      }
      if (initiators != null) {
        for (String ini : initiators.values()) {
          Initiator initiatorObj = dbClient.queryObject(Initiator.class, URI.create(ini));
          initiatorList.add(initiatorObj);
        }
      }

      log.info("deleteExportMask: volumes:  {}", volumeURIList);
      log.info("deleteExportMask: assignments: {}", targetURIList);
      log.info("deleteExportMask: initiators: {}", initiatorList);

      detachVolumesFromInitiators(storage, volumeList, initiatorList);

      taskCompleter.ready(dbClient);
    } catch (final Exception ex) {
      log.error("Problem in DetachVolumes: ", ex);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("doDetachVolumes", ex.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    log.info("{} deleteExportMask END...", storage.getSerialNumber());
  }
  @Override
  public void restoreGroupSnapshots(
      StorageSystem storage, URI volume, URI snapshot, TaskCompleter taskCompleter)
      throws DeviceControllerException {

    try {
      BlockSnapshot snapshotObj = _dbClient.queryObject(BlockSnapshot.class, snapshot);

      VNXeApiClient apiClient = getVnxeClient(storage);
      VNXeLunGroupSnap lunGroupSnap =
          apiClient.getLunGroupSnapshot(snapshotObj.getReplicationGroupInstance());
      // Error out if the snapshot is attached
      if (lunGroupSnap.getIsAttached()) {
        _log.error(
            "Snapshot {})is attached and cannot be used for restore", snapshotObj.getLabel());
        ServiceError error =
            DeviceControllerErrors.vnxe.cannotRestoreAttachedSnapshot(snapshot.toString());
        taskCompleter.error(_dbClient, error);
      }
      VNXeCommandJob job = apiClient.restoreLunGroupSnap(lunGroupSnap.getId());
      if (job != null) {
        ControllerServiceImpl.enqueueJob(
            new QueueJob(
                new VNXeBlockRestoreSnapshotJob(job.getId(), storage.getId(), taskCompleter)));
      }

    } catch (VNXeException e) {
      _log.error("Restore group snapshot got the exception", e);
      taskCompleter.error(_dbClient, e);

    } catch (Exception ex) {
      _log.error("Restore group snapshot got the exception", ex);
      ServiceError error =
          DeviceControllerErrors.vnxe.jobFailed("RestoreSnapshotJob", ex.getMessage());
      taskCompleter.error(_dbClient, error);
    }
  }
 @Override
 public void resumeSingleVolumeMirror(
     StorageSystem storage, URI mirror, TaskCompleter taskCompleter)
     throws DeviceControllerException {
   _log.info("resumeSingleVolumeMirror operation START");
   CloseableIterator<CIMObjectPath> storageSyncRefs = null;
   try {
     BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
     CIMObjectPath mirrorPath = _cimPath.getBlockObjectPath(storage, mirrorObj);
     // Get reference to the CIM_StorageSynchronized instance
     storageSyncRefs =
         _helper.getReference(storage, mirrorPath, SmisConstants.CIM_STORAGE_SYNCHRONIZED, null);
     if (!storageSyncRefs.hasNext()) {
       _log.error("No synchronization instance found for {}", mirror);
       taskCompleter.error(
           _dbClient, DeviceControllerException.exceptions.resumeVolumeMirrorFailed(mirror));
       return;
     }
     boolean isVmax3 = storage.checkIfVmax3();
     while (storageSyncRefs.hasNext()) {
       CIMObjectPath storageSync = storageSyncRefs.next();
       _log.debug(storageSync.toString());
       /**
        * JIRA CTRL-11855 User created mirror and did pause operation using SMI 4.6.2. Then He
        * upgraded to SMI 8.0.3. While doing mirror resume getting exception from SMI because of
        * the existing mirrorObj.getSynchronizedInstance() contains
        * SystemName=\"SYMMETRIX+000195701573\"" This is wrong with 8.0.3 as
        * SystemName=\"SYMMETRIX-+-000195701573\"". To resolve this issue setting new value
        * collected from current smis provider here.
        */
       mirrorObj.setSynchronizedInstance(storageSync.toString());
       _dbClient.persistObject(mirrorObj);
       CIMArgument[] inArgs =
           isVmax3
               ? _helper.getResumeSynchronizationInputArgumentsWithCopyState(storageSync)
               : _helper.getResumeSynchronizationInputArguments(storageSync);
       CIMArgument[] outArgs = new CIMArgument[5];
       _helper.callModifyReplica(storage, inArgs, outArgs);
       CIMObjectPath job = _cimPath.getCimObjectPathFromOutputArgs(outArgs, SmisConstants.JOB);
       if (job != null) {
         ControllerServiceImpl.enqueueJob(
             new QueueJob(new SmisBlockResumeMirrorJob(job, storage.getId(), taskCompleter)));
       } else {
         CIMInstance syncObject =
             _helper.getInstance(
                 storage, storageSync, false, false, new String[] {SmisConstants.CP_SYNC_STATE});
         mirrorObj.setSyncState(
             CIMPropertyFactory.getPropertyValue(syncObject, SmisConstants.CP_SYNC_STATE));
         _dbClient.persistObject(mirrorObj);
         taskCompleter.ready(_dbClient);
       }
     }
   } catch (Exception e) {
     _log.error("Failed to resume single volume mirror: {}", mirror);
     ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
     taskCompleter.error(_dbClient, serviceError);
   } finally {
     if (storageSyncRefs != null) {
       storageSyncRefs.close();
     }
   }
 }
  private void createOrUpdateVcenterCluster(
      boolean createCluster,
      AsyncTask task,
      URI clusterUri,
      URI[] addHostUris,
      URI[] removeHostUris,
      URI[] volumeUris)
      throws InternalException {
    TaskCompleter completer = null;
    try {
      _log.info(
          "createOrUpdateVcenterCluster "
              + createCluster
              + " "
              + task
              + " "
              + clusterUri
              + " "
              + addHostUris
              + " "
              + removeHostUris);

      if (task == null) {
        _log.error("AsyncTask is null");
        throw new Exception("AsyncTask is null");
      }
      URI vcenterDataCenterId = task._id;
      VcenterDataCenter vcenterDataCenter =
          _dbClient.queryObject(VcenterDataCenter.class, vcenterDataCenterId);

      if (clusterUri == null) {
        _log.error("Cluster URI is null");
        throw new Exception("Cluster URI is null");
      }
      Cluster cluster = _dbClient.queryObject(Cluster.class, clusterUri);

      Vcenter vcenter = _dbClient.queryObject(Vcenter.class, vcenterDataCenter.getVcenter());
      _log.info(
          "Request to create or update cluster "
              + vcenter.getIpAddress()
              + "/"
              + vcenterDataCenter.getLabel()
              + "/"
              + cluster.getLabel());

      Collection<Host> addHosts = new ArrayList<Host>();
      if (addHostUris == null || addHostUris.length == 0) {
        _log.info("Add host URIs is null or empty - Cluster will be created without hosts");
      } else {
        for (URI hostUri : addHostUris) {
          _log.info("createOrUpdateVcenterCluster " + clusterUri + " with add host " + hostUri);
        }
        addHosts = _dbClient.queryObject(Host.class, addHostUris);
      }

      Collection<Host> removeHosts = new ArrayList<Host>();
      if (removeHostUris == null || removeHostUris.length == 0) {
        _log.info("Remove host URIs is null or empty - Cluster will have no removed hosts");
      } else {
        for (URI hostUri : removeHostUris) {
          _log.info("createOrUpdateVcenterCluster " + clusterUri + " with remove host " + hostUri);
        }
        removeHosts = _dbClient.queryObject(Host.class, removeHostUris);
      }

      Collection<Volume> volumes = new ArrayList<Volume>();
      if (volumeUris == null || volumeUris.length == 0) {
        _log.info("Volume URIs is null or empty - Cluster will be created without datastores");
      } else {
        for (URI volumeUri : volumeUris) {
          _log.info("createOrUpdateVcenterCluster " + clusterUri + " with volume " + volumeUri);
        }
        volumes = _dbClient.queryObject(Volume.class, volumeUris);
      }

      completer =
          new VcenterClusterCompleter(
              vcenterDataCenterId,
              task._opId,
              OperationTypeEnum.CREATE_UPDATE_VCENTER_CLUSTER,
              "VCENTER_CONTROLLER");
      Workflow workflow =
          _workflowService.getNewWorkflow(
              this, "CREATE_UPDATE_VCENTER_CLUSTER_WORKFLOW", true, task._opId);
      String clusterStep =
          workflow.createStep(
              "CREATE_UPDATE_VCENTER_CLUSTER_STEP",
              String.format(
                  "vCenter cluster operation in vCenter datacenter %s", vcenterDataCenterId),
              null,
              vcenterDataCenterId,
              vcenterDataCenterId.toString(),
              this.getClass(),
              new Workflow.Method(
                  "createUpdateVcenterClusterOperation",
                  createCluster,
                  vcenter.getId(),
                  vcenterDataCenter.getId(),
                  cluster.getId()),
              null,
              null);

      String lastStep = clusterStep;
      if (removeHosts.size() > 0) {
        for (Host host : removeHosts) {
          String hostStep =
              workflow.createStep(
                  "VCENTER_CLUSTER_REMOVE_HOST",
                  String.format("vCenter cluster remove host operation %s", host.getId()),
                  clusterStep,
                  vcenterDataCenterId,
                  vcenterDataCenterId.toString(),
                  this.getClass(),
                  new Workflow.Method(
                      "vcenterClusterRemoveHostOperation",
                      vcenter.getId(),
                      vcenterDataCenter.getId(),
                      cluster.getId(),
                      host.getId()),
                  null,
                  null);
          lastStep = hostStep; // add host will wait on last of these
        }
      }

      if (addHosts.size() > 0) {
        for (Host host : addHosts) {
          String hostStep =
              workflow.createStep(
                  "VCENTER_CLUSTER_ADD_HOST",
                  String.format("vCenter cluster add host operation %s", host.getId()),
                  lastStep,
                  vcenterDataCenterId,
                  vcenterDataCenterId.toString(),
                  this.getClass(),
                  new Workflow.Method(
                      "vcenterClusterAddHostOperation",
                      vcenter.getId(),
                      vcenterDataCenter.getId(),
                      cluster.getId(),
                      host.getId()),
                  null,
                  null);
        }

        if (volumes.size() > 0) {
          // Once all hosts in cluster select a host to use for shared storage operations
          String selectHostForStorageOperationsStep =
              workflow.createStep(
                  "VCENTER_CLUSTER_SELECT_HOST",
                  String.format(
                      "vCenter cluster select host for storage operations operation vCenter datacenter %s",
                      vcenterDataCenterId),
                  "VCENTER_CLUSTER_ADD_HOST",
                  vcenterDataCenterId,
                  vcenterDataCenterId.toString(),
                  this.getClass(),
                  new Workflow.Method(
                      "vcenterClusterSelectHostOperation",
                      vcenter.getId(),
                      vcenterDataCenter.getId(),
                      cluster.getId(),
                      addHostUris),
                  null,
                  null);

          // Do not run datastore creation in parallel
          // First datastore waits on selectHostForStorageOperationsStep step then next wait on the
          // previous datastore operation
          String volumeStep = null;
          for (Volume volume : volumes) {
            volumeStep =
                workflow.createStep(
                    "VCENTER_CLUSTER_CREATE_DATASTORE",
                    String.format("vCenter cluster create datastore operation %s", volume.getId()),
                    volumeStep == null ? selectHostForStorageOperationsStep : volumeStep,
                    vcenterDataCenterId,
                    vcenterDataCenterId.toString(),
                    this.getClass(),
                    new Workflow.Method(
                        "vcenterClusterCreateDatastoreOperation",
                        vcenter.getId(),
                        vcenterDataCenter.getId(),
                        cluster.getId(),
                        volume.getId(),
                        selectHostForStorageOperationsStep),
                    null,
                    null);
          }
        }
      }

      workflow.executePlan(completer, "Success");
    } catch (Exception e) {
      _log.error("createOrUpdateVcenterCluster caught an exception.", e);
      ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
      completer.error(_dbClient, serviceError);
    }
  }
Exemplo n.º 16
0
  /*
   * (non-Javadoc)
   *
   * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doDeleteVolumes(com.emc.storageos.db.client.model.StorageSystem,
   * java.lang.String, java.util.List, com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void doDeleteVolumes(
      StorageSystem storageSystem, String opId, List<Volume> volumes, TaskCompleter taskCompleter)
      throws DeviceControllerException {

    try {
      StringBuilder logMsgBuilder =
          new StringBuilder(
              String.format("Delete Volume Start - Array:%s", storageSystem.getSerialNumber()));
      MultiVolumeTaskCompleter multiVolumeTaskCompleter = (MultiVolumeTaskCompleter) taskCompleter;
      Set<String> thickLogicalUnitIdList = new HashSet<String>();
      Set<String> thinLogicalUnitIdList = new HashSet<String>();
      HDSApiClient hdsApiClient =
          hdsApiFactory.getClient(
              HDSUtils.getHDSServerManagementServerInfo(storageSystem),
              storageSystem.getSmisUserName(),
              storageSystem.getSmisPassword());
      String systemObjectId = HDSUtils.getSystemObjectID(storageSystem);
      log.info("volumes size: {}", volumes.size());
      for (Volume volume : volumes) {
        logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel()));
        String logicalUnitObjectId =
            HDSUtils.getLogicalUnitObjectId(volume.getNativeId(), storageSystem);
        LogicalUnit logicalUnit =
            hdsApiClient.getLogicalUnitInfo(systemObjectId, logicalUnitObjectId);
        if (logicalUnit == null) {
          // related volume state (if any) has been deleted. skip
          // processing, if already deleted from array.
          log.info(String.format("Volume %s already deleted: ", volume.getNativeId()));
          volume.setInactive(true);
          dbClient.persistObject(volume);
          VolumeTaskCompleter deleteTaskCompleter =
              multiVolumeTaskCompleter.skipTaskCompleter(volume.getId());
          deleteTaskCompleter.ready(dbClient);
          continue;
        }
        if (volume.getThinlyProvisioned()) {
          thinLogicalUnitIdList.add(logicalUnitObjectId);
        } else {
          thickLogicalUnitIdList.add(logicalUnitObjectId);
        }
      }
      log.info(logMsgBuilder.toString());
      if (!multiVolumeTaskCompleter.isVolumeTaskCompletersEmpty()) {
        if (null != thickLogicalUnitIdList && !thickLogicalUnitIdList.isEmpty()) {
          String asyncThickLUsJobId =
              hdsApiClient.deleteThickLogicalUnits(systemObjectId, thickLogicalUnitIdList);
          if (null != asyncThickLUsJobId) {
            ControllerServiceImpl.enqueueJob(
                new QueueJob(
                    new HDSDeleteVolumeJob(
                        asyncThickLUsJobId, volumes.get(0).getStorageController(), taskCompleter)));
          }
        }

        if (null != thinLogicalUnitIdList && !thinLogicalUnitIdList.isEmpty()) {
          String asyncThinHDSJobId =
              hdsApiClient.deleteThinLogicalUnits(systemObjectId, thinLogicalUnitIdList);

          // Not sure whether this really works as tracking two jobs
          // in single operation.
          if (null != asyncThinHDSJobId) {
            ControllerServiceImpl.enqueueJob(
                new QueueJob(
                    new HDSDeleteVolumeJob(
                        asyncThinHDSJobId, volumes.get(0).getStorageController(), taskCompleter)));
          }
        }
      } else {
        // If we are here, there are no volumes to delete, we have
        // invoked ready() for the VolumeDeleteCompleter, and told
        // the multiVolumeTaskCompleter to skip these completers.
        // In this case, the multiVolumeTaskCompleter complete()
        // method will not be invoked and the result is that the
        // workflow that initiated this delete request will never
        // be updated. So, here we just call complete() on the
        // multiVolumeTaskCompleter to ensure the workflow status is
        // updated.
        multiVolumeTaskCompleter.ready(dbClient);
      }
    } catch (Exception e) {
      log.error("Problem in doDeleteVolume: ", e);
      ServiceError error =
          DeviceControllerErrors.hds.methodFailed("doDeleteVolume", e.getMessage());
      taskCompleter.error(dbClient, error);
    }
    StringBuilder logMsgBuilder =
        new StringBuilder(
            String.format("Delete Volume End - Array: %s", storageSystem.getSerialNumber()));
    for (Volume volume : volumes) {
      logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel()));
    }
    log.info(logMsgBuilder.toString());
  }
Exemplo n.º 17
0
  /*
   * (non-Javadoc)
   *
   * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doCreateVolumes(com.emc.storageos.db.client.model.StorageSystem,
   * com.emc.storageos.db.client.model.StoragePool, java.lang.String, java.util.List,
   * com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper, com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void doCreateVolumes(
      StorageSystem storageSystem,
      StoragePool storagePool,
      String opId,
      List<Volume> volumes,
      VirtualPoolCapabilityValuesWrapper capabilities,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    String label = null;
    Long capacity = null;
    boolean isThinVolume = false;
    boolean opCreationFailed = false;
    StringBuilder logMsgBuilder =
        new StringBuilder(
            String.format(
                "Create Volume Start - Array:%s, Pool:%s",
                storageSystem.getSerialNumber(), storagePool.getNativeGuid()));
    for (Volume volume : volumes) {
      logMsgBuilder.append(
          String.format(
              "%nVolume:%s , IsThinlyProvisioned: %s",
              volume.getLabel(), volume.getThinlyProvisioned()));

      if ((label == null) && (volumes.size() == 1)) {
        String tenantName = "";
        try {
          TenantOrg tenant = dbClient.queryObject(TenantOrg.class, volume.getTenant().getURI());
          tenantName = tenant.getLabel();
        } catch (DatabaseException e) {
          log.error("Error lookup TenantOrb object", e);
        }
        label =
            nameGenerator.generate(
                tenantName,
                volume.getLabel(),
                volume.getId().toString(),
                '-',
                HDSConstants.MAX_VOLUME_NAME_LENGTH);
      }

      if (capacity == null) {
        capacity = volume.getCapacity();
      }
      isThinVolume = volume.getThinlyProvisioned();
    }
    log.info(logMsgBuilder.toString());
    try {
      multiVolumeCheckForHitachiModel(volumes, storageSystem);

      HDSApiClient hdsApiClient =
          hdsApiFactory.getClient(
              HDSUtils.getHDSServerManagementServerInfo(storageSystem),
              storageSystem.getSmisUserName(),
              storageSystem.getSmisPassword());
      String systemObjectID = HDSUtils.getSystemObjectID(storageSystem);
      String poolObjectID = HDSUtils.getPoolObjectID(storagePool);
      String asyncTaskMessageId = null;

      // isThinVolume = true, creates VirtualVolumes
      // isThinVolume = false, creates LogicalUnits
      if (isThinVolume) {
        asyncTaskMessageId =
            hdsApiClient.createThinVolumes(
                systemObjectID,
                storagePool.getNativeId(),
                capacity,
                volumes.size(),
                label,
                QUICK_FORMAT_TYPE,
                storageSystem.getModel());
      } else if (!isThinVolume) {
        asyncTaskMessageId =
            hdsApiClient.createThickVolumes(
                systemObjectID,
                poolObjectID,
                capacity,
                volumes.size(),
                label,
                null,
                storageSystem.getModel(),
                null);
      }

      if (asyncTaskMessageId != null) {
        HDSJob createHDSJob =
            (volumes.size() > 1)
                ? new HDSCreateMultiVolumeJob(
                    asyncTaskMessageId,
                    volumes.get(0).getStorageController(),
                    storagePool.getId(),
                    volumes.size(),
                    taskCompleter)
                : new HDSCreateVolumeJob(
                    asyncTaskMessageId,
                    volumes.get(0).getStorageController(),
                    storagePool.getId(),
                    taskCompleter);
        ControllerServiceImpl.enqueueJob(new QueueJob(createHDSJob));
      }
    } catch (final InternalException e) {
      log.error("Problem in doCreateVolumes: ", e);
      opCreationFailed = true;
      taskCompleter.error(dbClient, e);
    } catch (final Exception e) {
      log.error("Problem in doCreateVolumes: ", e);
      opCreationFailed = true;
      ServiceError serviceError =
          DeviceControllerErrors.hds.methodFailed("doCreateVolumes", e.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
    if (opCreationFailed) {
      for (Volume vol : volumes) {
        vol.setInactive(true);
        dbClient.persistObject(vol);
      }
    }

    logMsgBuilder =
        new StringBuilder(
            String.format(
                "Create Volumes End - Array:%s, Pool:%s",
                storageSystem.getSerialNumber(), storagePool.getNativeGuid()));
    for (Volume volume : volumes) {
      logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel()));
    }
    log.info(logMsgBuilder.toString());
  }
  /*
   * (non-Javadoc)
   *
   * @see com.emc.storageos.volumecontroller.CloneOperations#createSingleClone(
   * com.emc.storageos.db.client.model.StorageSystem, java.net.URI, java.net.URI,
   * java.lang.Boolean,
   * com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void createSingleClone(
      StorageSystem storageSystem,
      URI sourceObject,
      URI cloneVolume,
      Boolean createInactive,
      TaskCompleter taskCompleter) {
    log.info("START createSingleClone operation");
    boolean isVolumeClone = true;
    try {
      BlockObject sourceObj = BlockObject.fetch(dbClient, sourceObject);
      URI tenantUri = null;
      if (sourceObj
          instanceof BlockSnapshot) { // In case of snapshot, get the tenant from its parent volume
        NamedURI parentVolUri = ((BlockSnapshot) sourceObj).getParent();
        Volume parentVolume = dbClient.queryObject(Volume.class, parentVolUri);
        tenantUri = parentVolume.getTenant().getURI();
        isVolumeClone = false;
      } else { // This is a default flow
        tenantUri = ((Volume) sourceObj).getTenant().getURI();
        isVolumeClone = true;
      }

      Volume cloneObj = dbClient.queryObject(Volume.class, cloneVolume);
      StoragePool targetPool = dbClient.queryObject(StoragePool.class, cloneObj.getPool());
      TenantOrg tenantOrg = dbClient.queryObject(TenantOrg.class, tenantUri);
      // String cloneLabel = generateLabel(tenantOrg, cloneObj);

      CinderEndPointInfo ep =
          CinderUtils.getCinderEndPoint(storageSystem.getActiveProviderURI(), dbClient);
      log.info(
          "Getting the cinder APi for the provider with id "
              + storageSystem.getActiveProviderURI());
      CinderApi cinderApi = cinderApiFactory.getApi(storageSystem.getActiveProviderURI(), ep);

      String volumeId = "";
      if (isVolumeClone) {
        volumeId =
            cinderApi.cloneVolume(
                cloneObj.getLabel(),
                (cloneObj.getCapacity() / (1024 * 1024 * 1024)),
                targetPool.getNativeId(),
                sourceObj.getNativeId());
      } else {
        volumeId =
            cinderApi.createVolumeFromSnapshot(
                cloneObj.getLabel(),
                (cloneObj.getCapacity() / (1024 * 1024 * 1024)),
                targetPool.getNativeId(),
                sourceObj.getNativeId());
      }

      log.debug("Creating volume with the id " + volumeId + " on Openstack cinder node");
      if (volumeId != null) {
        Map<String, URI> volumeIds = new HashMap<String, URI>();
        volumeIds.put(volumeId, cloneObj.getId());
        ControllerServiceImpl.enqueueJob(
            new QueueJob(
                new CinderSingleVolumeCreateJob(
                    volumeId,
                    cloneObj.getLabel(),
                    storageSystem.getId(),
                    CinderConstants.ComponentType.volume.name(),
                    ep,
                    taskCompleter,
                    targetPool.getId(),
                    volumeIds)));
      }
    } catch (InternalException e) {
      String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume);
      log.error(errorMsg, e);
      taskCompleter.error(dbClient, e);
    } catch (Exception e) {
      String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume);
      log.error(errorMsg, e);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("createSingleClone", e.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
  }
  @Override
  public void deleteGroupMirrors(
      StorageSystem storage, List<URI> mirrorList, TaskCompleter taskCompleter)
      throws DeviceControllerException {
    _log.info("deleteGroupMirrors operation START");
    if (!((storage.getUsingSmis80() && storage.deviceIsType(Type.vmax))
        || storage.deviceIsType(Type.vnxblock))) {
      throw DeviceControllerException.exceptions.blockDeviceOperationNotSupported();
    }

    try {
      String[] deviceIds = null;
      BlockMirror firstMirror = _dbClient.queryObject(BlockMirror.class, mirrorList.get(0));
      String repGroupName = firstMirror.getReplicationGroupInstance();
      if (NullColumnValueGetter.isNotNullValue(repGroupName)) {
        CIMObjectPath repGroupPath = _cimPath.getReplicationGroupPath(storage, repGroupName);
        Set<String> deviceIdsSet =
            _helper.getVolumeDeviceIdsFromStorageGroup(storage, repGroupPath);
        deviceIds = deviceIdsSet.toArray(new String[deviceIdsSet.size()]);

        // Delete replication group
        ReplicationUtils.deleteReplicationGroup(
            storage, repGroupName, _dbClient, _helper, _cimPath);
        // Set mirrors replication group to null
        List<BlockMirror> mirrors = _dbClient.queryObject(BlockMirror.class, mirrorList);
        for (BlockMirror mirror : mirrors) {
          mirror.setConsistencyGroup(NullColumnValueGetter.getNullURI());
          mirror.setReplicationGroupInstance(NullColumnValueGetter.getNullStr());
        }

        _dbClient.persistObject(mirrors);
      } else {
        deviceIds = _helper.getBlockObjectNativeIds(mirrorList);
      }

      if (storage.checkIfVmax3()) {
        for (String deviceId : deviceIds) {
          _helper.removeVolumeFromParkingSLOStorageGroup(storage, deviceId, false);
          _log.info("Done invoking remove volume {} from parking SLO storage group", deviceId);
        }
      }

      CIMObjectPath[] mirrorPaths = _cimPath.getVolumePaths(storage, deviceIds);
      CIMObjectPath configSvcPath = _cimPath.getConfigSvcPath(storage);
      CIMArgument[] inArgs = null;
      if (storage.deviceIsType(Type.vnxblock)) {
        inArgs = _helper.getReturnElementsToStoragePoolArguments(mirrorPaths);
      } else {
        inArgs =
            _helper.getReturnElementsToStoragePoolArguments(
                mirrorPaths, SmisConstants.CONTINUE_ON_NONEXISTENT_ELEMENT);
      }
      CIMArgument[] outArgs = new CIMArgument[5];
      _helper.invokeMethod(
          storage, configSvcPath, SmisConstants.RETURN_ELEMENTS_TO_STORAGE_POOL, inArgs, outArgs);
      CIMObjectPath job = _cimPath.getCimObjectPathFromOutputArgs(outArgs, SmisConstants.JOB);
      ControllerServiceImpl.enqueueJob(
          new QueueJob(new SmisBlockDeleteCGMirrorJob(job, storage.getId(), taskCompleter)));
    } catch (Exception e) {
      _log.error("Problem making SMI-S call: ", e);
      ServiceError serviceError =
          DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
      taskCompleter.error(_dbClient, serviceError);
    }
  }
  @Override
  public void createSingleVolumeMirror(
      StorageSystem storage, URI mirror, Boolean createInactive, TaskCompleter taskCompleter)
      throws DeviceControllerException {
    _log.info("createSingleVolumeMirror operation START");
    try {
      BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
      StoragePool targetPool = _dbClient.queryObject(StoragePool.class, mirrorObj.getPool());
      Volume source = _dbClient.queryObject(Volume.class, mirrorObj.getSource());
      TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, source.getTenant().getURI());
      String tenantName = tenant.getLabel();
      String targetLabelToUse =
          _nameGenerator.generate(
              tenantName,
              mirrorObj.getLabel(),
              mirror.toString(),
              '-',
              SmisConstants.MAX_VOLUME_NAME_LENGTH);
      CIMObjectPath replicationSvcPath = _cimPath.getControllerReplicationSvcPath(storage);
      CIMArgument[] inArgs = null;
      if (storage.checkIfVmax3()) {
        CIMObjectPath volumeGroupPath = _helper.getVolumeGroupPath(storage, source, targetPool);
        CIMInstance replicaSettingData = getDefaultReplicationSettingData(storage);
        inArgs =
            _helper.getCreateElementReplicaMirrorInputArguments(
                storage,
                source,
                targetPool,
                createInactive,
                targetLabelToUse,
                volumeGroupPath,
                replicaSettingData);
      } else {
        inArgs =
            _helper.getCreateElementReplicaMirrorInputArguments(
                storage, source, targetPool, createInactive, targetLabelToUse);
      }

      CIMArgument[] outArgs = new CIMArgument[5];
      _helper.invokeMethod(
          storage, replicationSvcPath, SmisConstants.CREATE_ELEMENT_REPLICA, inArgs, outArgs);
      CIMObjectPath job = _cimPath.getCimObjectPathFromOutputArgs(outArgs, SmisConstants.JOB);
      if (job != null) {
        ControllerServiceImpl.enqueueJob(
            new QueueJob(
                new SmisBlockCreateMirrorJob(
                    job, storage.getId(), !createInactive, taskCompleter)));
        // Resynchronizing state applies to the initial copy as well as future
        // re-synchronization's.
        mirrorObj.setSyncState(SynchronizationState.RESYNCHRONIZING.toString());
        _dbClient.persistObject(mirrorObj);
      }
    } catch (final InternalException e) {
      _log.info("Problem making SMI-S call: ", e);
      taskCompleter.error(_dbClient, e);
    } catch (Exception e) {
      _log.info("Problem making SMI-S call: ", e);
      ServiceError serviceError =
          DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
      taskCompleter.error(_dbClient, serviceError);
    }
  }
Exemplo n.º 21
0
  @Override
  public void doModifyVolumes(
      StorageSystem storage,
      StoragePool storagePool,
      String opId,
      List<Volume> volumes,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    StringBuilder logMsgBuilder =
        new StringBuilder(
            String.format(
                "Modify Volume Start - Array:%s, Pool:%s",
                storage.getSerialNumber(), storagePool.getNativeGuid()));

    String systemObjectID = HDSUtils.getSystemObjectID(storage);
    for (Volume volume : volumes) {
      try {
        HDSApiClient hdsApiClient =
            hdsApiFactory.getClient(
                HDSUtils.getHDSServerManagementServerInfo(storage),
                storage.getSmisUserName(),
                storage.getSmisPassword());
        logMsgBuilder.append(
            String.format(
                "%nVolume:%s , IsThinlyProvisioned: %s, tieringPolicy: %s",
                volume.getLabel(),
                volume.getThinlyProvisioned(),
                volume.getAutoTieringPolicyUri()));
        LogicalUnit logicalUnit =
            hdsApiClient.getLogicalUnitInfo(
                systemObjectID, HDSUtils.getLogicalUnitObjectId(volume.getNativeId(), storage));
        String policyName = ControllerUtils.getAutoTieringPolicyName(volume.getId(), dbClient);
        String autoTierPolicyName = null;
        if (policyName.equals(Constants.NONE)) {
          autoTierPolicyName = null;
        } else {
          autoTierPolicyName =
              HitachiTieringPolicy.getPolicy(
                      policyName.replaceAll(
                          HDSConstants.SLASH_OPERATOR, HDSConstants.UNDERSCORE_OPERATOR))
                  .getKey();
        }
        if (null != logicalUnit
            && null != logicalUnit.getLdevList()
            && !logicalUnit.getLdevList().isEmpty()) {
          Iterator<LDEV> ldevItr = logicalUnit.getLdevList().iterator();
          if (ldevItr.hasNext()) {
            LDEV ldev = ldevItr.next();
            String asyncMessageId =
                hdsApiClient.modifyThinVolumeTieringPolicy(
                    systemObjectID,
                    logicalUnit.getObjectID(),
                    ldev.getObjectID(),
                    autoTierPolicyName);
            if (null != asyncMessageId) {
              HDSJob modifyHDSJob =
                  new HDSModifyVolumeJob(
                      asyncMessageId,
                      volume.getStorageController(),
                      taskCompleter,
                      HDSModifyVolumeJob.VOLUME_MODIFY_JOB);
              ControllerServiceImpl.enqueueJob(new QueueJob(modifyHDSJob));
            }
          }
        } else {
          String errorMsg = String.format("No LDEV's found for volume: %s", volume.getId());
          log.info(errorMsg);
          ServiceError serviceError =
              DeviceControllerErrors.hds.methodFailed("doModifyVolumes", errorMsg);
          taskCompleter.error(dbClient, serviceError);
        }
      } catch (final InternalException e) {
        log.error("Problem in doModifyVolumes: ", e);
        taskCompleter.error(dbClient, e);
      } catch (final Exception e) {
        log.error("Problem in doModifyVolumes: ", e);
        ServiceError serviceError =
            DeviceControllerErrors.hds.methodFailed("doModifyVolumes", e.getMessage());
        taskCompleter.error(dbClient, serviceError);
      }
    }
  }