コード例 #1
0
  /** {@inheritDoc} */
  @Override
  public List<BlockObject> getAllSourceObjectsForFullCopyRequest(BlockObject fcSourceObj) {

    // Treats full copies of snapshots as is done in base class.
    if (URIUtil.isType(fcSourceObj.getId(), BlockSnapshot.class)) {
      return super.getAllSourceObjectsForFullCopyRequest(fcSourceObj);
    }

    // By default, if the passed volume is in a consistency group
    // all volumes in the consistency group should be copied.
    List<BlockObject> fcSourceObjList = new ArrayList<BlockObject>();
    URI cgURI = fcSourceObj.getConsistencyGroup();
    if (!NullColumnValueGetter.isNullURI(cgURI)) {
      BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, cgURI);
      // If there is no corresponding native CG for the VPLEX
      // CG, then this is a CG created prior to 2.2 and in this
      // case we want full copies treated like snapshots, which
      // is only create a copy of the passed object.
      if (!cg.checkForType(Types.LOCAL)) {
        fcSourceObjList.add(fcSourceObj);
      } else {
        fcSourceObjList.addAll(getActiveCGVolumes(cg));
      }
    } else {
      fcSourceObjList.add(fcSourceObj);
    }

    return fcSourceObjList;
  }
コード例 #2
0
  /** {@inheritDoc} */
  @Override
  public void validateFullCopyCreateRequest(List<BlockObject> fcSourceObjList, int count) {
    // Call super first.
    super.validateFullCopyCreateRequest(fcSourceObjList, count);

    // For VMAX3 you cannot have active snap and full copy sessions,
    // so verify there are no active snapshots for the volume. Note
    // that we know the source is a volume, because full copies are
    // not allowed for vmax snapshots, which would have been caught
    // in the call to super.
    for (BlockObject fcSourceObj : fcSourceObjList) {
      BlockServiceUtils.validateVMAX3ActiveSnapSessionsExists(
          fcSourceObj.getId(), _dbClient, FULLCOPIES);
    }
  }
コード例 #3
0
  /**
   * Routine contains logic to create an export mask on the array
   *
   * @param workflow - Workflow object to create steps against
   * @param previousStep - [optional] Identifier of workflow step to wait for
   * @param device - BlockStorageDevice implementation
   * @param storage - StorageSystem object representing the underlying array
   * @param exportGroup - ExportGroup object representing Bourne-level masking
   * @param initiatorURIs - List of Initiator URIs
   * @param volumeMap - Map of Volume URIs to requested Integer HLUs
   * @param zoningStepNeeded - Not required ofr HDS
   * @param token - Identifier for the operation
   * @throws Exception
   */
  public boolean determineExportGroupCreateSteps(
      Workflow workflow,
      String previousStep,
      BlockStorageDevice device,
      StorageSystem storage,
      ExportGroup exportGroup,
      List<URI> initiatorURIs,
      Map<URI, Integer> volumeMap,
      boolean zoningStepNeeded,
      String token)
      throws Exception {
    Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
    List<URI> volumeURIs = new ArrayList<URI>();
    volumeURIs.addAll(volumeMap.keySet());
    Map<URI, URI> hostToExistingExportMaskMap = new HashMap<URI, URI>();
    List<URI> hostURIs = new ArrayList<URI>();
    List<String> portNames = new ArrayList<String>();
    List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
    // Populate the port WWN/IQNs (portNames) and the
    // mapping of the WWN/IQNs to Initiator URIs
    processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);

    // We always want to have the full list of initiators for the hosts involved in
    // this export. This will allow the export operation to always find any
    // existing exports for a given host.
    queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);

    // Find the export masks that are associated with any or all the ports in
    // portNames. We will have to do processing differently based on whether
    // or there is an existing ExportMasks.
    Map<String, Set<URI>> matchingExportMaskURIs =
        device.findExportMasks(storage, portNames, false);
    if (matchingExportMaskURIs.isEmpty()) {

      _log.info(
          String.format(
              "No existing mask found w/ initiators { %s }", Joiner.on(",").join(portNames)));

      createNewExportMaskWorkflowForInitiators(
          initiatorURIs, exportGroup, workflow, volumeMap, storage, token, previousStep);
    } else {
      _log.info(
          String.format(
              "Mask(s) found w/ initiators {%s}. "
                  + "MatchingExportMaskURIs {%s}, portNameToInitiators {%s}",
              Joiner.on(",").join(portNames),
              Joiner.on(",").join(matchingExportMaskURIs.values()),
              Joiner.on(",").join(portNameToInitiatorURI.entrySet())));
      // There are some initiators that already exist. We need to create a
      // workflow that create new masking containers or updates masking
      // containers as necessary.

      // These data structures will be used to track new initiators - ones
      // that don't already exist on the array
      List<URI> initiatorURIsCopy = new ArrayList<URI>();
      initiatorURIsCopy.addAll(initiatorURIs);

      // This loop will determine a list of volumes to update per export mask
      Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes =
          new HashMap<URI, Map<URI, Integer>>();
      Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators =
          new HashMap<URI, Set<Initiator>>();
      for (Map.Entry<String, Set<URI>> entry : matchingExportMaskURIs.entrySet()) {
        URI initiatorURI = portNameToInitiatorURI.get(entry.getKey());
        Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
        // Keep track of those initiators that have been found to exist already
        // in some export mask on the array
        initiatorURIsCopy.remove(initiatorURI);
        // Get a list of the ExportMasks that were matched to the initiator
        List<URI> exportMaskURIs = new ArrayList<URI>();
        exportMaskURIs.addAll(entry.getValue());
        List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
        _log.info(
            String.format(
                "initiator %s masks {%s}",
                initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
        for (ExportMask mask : masks) {
          // ExportMask is created using non-vipr. Set the mask name if it doesn't have.
          if (null == mask.getMaskName()) {
            String maskName =
                ExportMaskUtils.getMaskName(_dbClient, initiators, exportGroup, storage);
            _log.info("Generated mask name: {}", maskName);
            mask.setMaskName(maskName);
          }

          // Check for NO_VIPR.  If found, avoid this mask.
          if (mask.getMaskName() != null
              && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
            _log.info(
                String.format(
                    "ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it",
                    mask.getMaskName(), ExportUtils.NO_VIPR));
            continue;
          }

          _log.info(
              String.format(
                  "mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
          if (mask.getCreatedBySystem()) {
            _log.info(
                String.format(
                    "initiator %s is in persisted mask %s",
                    initiator.getInitiatorPort(), mask.getMaskName()));

            // We're still OK if the mask contains ONLY initiators that can be found
            // in our export group, because we would simply add to them.
            if (mask.getInitiators() != null) {
              for (String existingMaskInitiatorStr : mask.getInitiators()) {

                // Now look at it from a different angle.  Which one of our export group initiators
                // are NOT in the current mask?  And if so, if it belongs to the same host as an
                // existing one,
                // we should add it to this mask.
                Iterator<URI> initiatorIter = initiatorURIsCopy.iterator();
                while (initiatorIter.hasNext()) {
                  Initiator initiatorCopy =
                      _dbClient.queryObject(Initiator.class, initiatorIter.next());

                  if (initiatorCopy != null
                      && initiatorCopy.getId() != null
                      && !mask.hasInitiator(initiatorCopy.getId().toString())) {
                    Initiator existingMaskInitiator =
                        _dbClient.queryObject(
                            Initiator.class, URI.create(existingMaskInitiatorStr));
                    if (existingMaskInitiator != null
                        && initiatorCopy.getHost() != null
                        && initiatorCopy.getHost().equals(existingMaskInitiator.getHost())) {
                      // Add to the list of initiators we need to add to this mask
                      Set<Initiator> existingMaskInitiators =
                          existingMasksToUpdateWithNewInitiators.get(mask.getId());
                      if (existingMaskInitiators == null) {
                        existingMaskInitiators = new HashSet<Initiator>();
                        existingMasksToUpdateWithNewInitiators.put(
                            mask.getId(), existingMaskInitiators);
                      }
                      existingMaskInitiators.add(initiatorCopy);
                      initiatorIter
                          .remove(); // remove this from the list of initiators we'll make a new
                      // mask from
                    }
                  }
                }
              }
            }
          } else {
            // Insert this initiator into the mask's list of initiators managed by the system.
            // This will get persisted below.
            mask.addInitiator(initiator);
            if (!NullColumnValueGetter.isNullURI(initiator.getHost())) {
              hostToExistingExportMaskMap.put(initiator.getHost(), mask.getId());
            }
          }

          // We need to see if the volume also exists the mask,
          // if it doesn't then we'll add it to the list of volumes to add.
          for (URI boURI : volumeURIs) {
            BlockObject bo = BlockObject.fetch(_dbClient, boURI);
            if (!mask.hasExistingVolume(bo)) {
              _log.info(
                  String.format(
                      "volume %s is not in mask %s", bo.getNativeGuid(), mask.getMaskName()));
              // The volume doesn't exist, so we have to add it to
              // the masking container.
              Map<URI, Integer> newVolumes = existingMasksToUpdateWithNewVolumes.get(mask.getId());
              if (newVolumes == null) {
                newVolumes = new HashMap<URI, Integer>();
                existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumes);
              }
              // Check if the requested HLU for the volume is
              // already taken by a pre-existing volume.
              Integer requestedHLU = volumeMap.get(bo.getId());
              StringMap existingVolumesInMask = mask.getExistingVolumes();
              if (existingVolumesInMask != null
                  && existingVolumesInMask.containsValue(requestedHLU.toString())) {
                ExportOrchestrationTask completer =
                    new ExportOrchestrationTask(exportGroup.getId(), token);
                ServiceError serviceError =
                    DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU(
                        boURI.toString(), requestedHLU.toString());
                completer.error(_dbClient, serviceError);
                return false;
              }
              newVolumes.put(bo.getId(), requestedHLU);
              mask.addToUserCreatedVolumes(bo);
            }
          }

          // Update the list of volumes and initiators for the mask
          Map<URI, Integer> volumeMapForExistingMask =
              existingMasksToUpdateWithNewVolumes.get(mask.getId());
          if (volumeMapForExistingMask != null && !volumeMapForExistingMask.isEmpty()) {
            mask.addVolumes(volumeMapForExistingMask);
          }

          Set<Initiator> initiatorSetForExistingMask =
              existingMasksToUpdateWithNewInitiators.get(mask.getId());
          if (initiatorSetForExistingMask != null && initiatorSetForExistingMask.isEmpty()) {
            mask.addInitiators(initiatorSetForExistingMask);
          }

          updateZoningMap(exportGroup, mask);
          _dbClient.updateAndReindexObject(mask);
          // TODO: All export group modifications should be moved to completers
          exportGroup.addExportMask(mask.getId());
          _dbClient.updateAndReindexObject(exportGroup);
        }
      }

      // The initiatorURIsCopy was used in the foreach initiator loop to see
      // which initiators already exist in a mask. If it is non-empty,
      // then it means there are initiators that are new,
      // so let's add them to the main tracker
      Map<URI, List<URI>> hostInitiatorMap = new HashMap<URI, List<URI>>();
      if (!initiatorURIsCopy.isEmpty()) {
        for (URI newExportMaskInitiator : initiatorURIsCopy) {

          Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
          List<URI> initiatorSet = hostInitiatorMap.get(initiator.getHost());
          if (initiatorSet == null) {
            initiatorSet = new ArrayList<URI>();
            hostInitiatorMap.put(initiator.getHost(), initiatorSet);
          }
          initiatorSet.add(initiator.getId());

          _log.info(
              String.format(
                  "host = %s, "
                      + "initiators to add: %d, "
                      + "existingMasksToUpdateWithNewVolumes.size = %d",
                  initiator.getHost(),
                  hostInitiatorMap.get(initiator.getHost()).size(),
                  existingMasksToUpdateWithNewVolumes.size()));
        }
      }

      _log.info(
          String.format(
              "existingMasksToUpdateWithNewVolumes.size = %d",
              existingMasksToUpdateWithNewVolumes.size()));

      // At this point we have the necessary data structures populated to
      // determine the workflow steps. We are going to create new masks
      // and/or add volumes to existing masks.
      if (!hostInitiatorMap.isEmpty()) {
        for (URI hostID : hostInitiatorMap.keySet()) {
          // Check if there is an existing mask (created outside of ViPR) for
          // the host. If there is we will need to add these intiators
          // associated with that host to the list
          if (hostToExistingExportMaskMap.containsKey(hostID)) {
            URI existingExportMaskURI = hostToExistingExportMaskMap.get(hostID);
            Set<Initiator> toAddInits = new HashSet<Initiator>();
            List<URI> hostInitaitorList = hostInitiatorMap.get(hostID);
            for (URI initURI : hostInitaitorList) {
              Initiator initiator = _dbClient.queryObject(Initiator.class, initURI);
              if (!initiator.getInactive()) {
                toAddInits.add(initiator);
              }
            }
            _log.info(
                String.format(
                    "Need to add new initiators to existing mask %s, %s",
                    existingExportMaskURI.toString(), Joiner.on(',').join(hostInitaitorList)));
            existingMasksToUpdateWithNewInitiators.put(existingExportMaskURI, toAddInits);
            continue;
          }
          // We have some brand new initiators, let's add them to new masks
          _log.info(
              String.format(
                  "new export masks %s", Joiner.on(",").join(hostInitiatorMap.get(hostID))));

          generateExportMaskCreateWorkflow(
              workflow,
              previousStep,
              storage,
              exportGroup,
              hostInitiatorMap.get(hostID),
              volumeMap,
              token);
        }
      }

      Map<URI, String> stepMap = new HashMap<URI, String>();
      for (Map.Entry<URI, Map<URI, Integer>> entry :
          existingMasksToUpdateWithNewVolumes.entrySet()) {
        ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
        Map<URI, Integer> volumesToAdd = entry.getValue();
        _log.info(
            String.format(
                "adding these volumes %s to mask %s",
                Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
        stepMap.put(
            entry.getKey(),
            generateExportMaskAddVolumesWorkflow(
                workflow, null, storage, exportGroup, mask, volumesToAdd));
      }

      for (Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
        ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
        Set<Initiator> initiatorsToAdd = entry.getValue();
        List<URI> initiatorsURIs = new ArrayList<URI>();
        for (Initiator initiator : initiatorsToAdd) {
          initiatorsURIs.add(initiator.getId());
        }
        _log.info(
            String.format(
                "adding these initiators %s to mask %s",
                Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
        previousStep =
            stepMap.get(entry.getKey()) == null ? previousStep : stepMap.get(entry.getKey());
        generateExportMaskAddInitiatorsWorkflow(
            workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, null, token);
      }
    }
    return true;
  }
コード例 #4
0
  /** {@inheritDoc} */
  @Override
  public TaskList create(
      List<BlockObject> fcSourceObjList,
      VirtualArray varray,
      String name,
      boolean createInactive,
      int count,
      String taskId) {

    // Populate the descriptors list with all volumes required
    // to create the VPLEX volume copies.
    int sourceCounter = 0;
    URI vplexSrcSystemId = null;
    TaskList taskList = new TaskList();
    List<Volume> vplexCopyVolumes = new ArrayList<Volume>();
    List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>();
    List<BlockObject> sortedSourceObjectList = sortFullCopySourceList(fcSourceObjList);
    for (BlockObject fcSourceObj : sortedSourceObjectList) {
      URI fcSourceURI = fcSourceObj.getId();
      if (URIUtil.isType(fcSourceURI, BlockSnapshot.class)) {
        // Full copy of snapshots is not supported for VPLEX.
        return super.create(sortedSourceObjectList, varray, name, createInactive, count, taskId);
      }

      Volume vplexSrcVolume = (Volume) fcSourceObj;
      String copyName = name + (sortedSourceObjectList.size() > 1 ? "-" + ++sourceCounter : "");

      // Create a volume descriptor for the source VPLEX volume being copied.
      // and add it to the descriptors list. Be sure to identify this VPLEX
      // volume as the source volume being copied.
      vplexSrcSystemId = fcSourceObj.getStorageController();
      VolumeDescriptor vplexSrcVolumeDescr =
          new VolumeDescriptor(
              VolumeDescriptor.Type.VPLEX_VIRT_VOLUME, vplexSrcSystemId, fcSourceURI, null, null);
      Map<String, Object> descrParams = new HashMap<String, Object>();
      descrParams.put(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID, Boolean.TRUE);
      vplexSrcVolumeDescr.setParameters(descrParams);
      volumeDescriptors.add(vplexSrcVolumeDescr);

      // Get some info about the VPLEX volume being copied and its storage
      // system.
      Project vplexSrcProject =
          BlockFullCopyUtils.queryFullCopySourceProject(fcSourceObj, _dbClient);
      StorageSystem vplexSrcSystem = _dbClient.queryObject(StorageSystem.class, vplexSrcSystemId);
      Project vplexSystemProject =
          VPlexBlockServiceApiImpl.getVplexProject(vplexSrcSystem, _dbClient, _tenantsService);

      // For the VPLEX volume being copied, determine which of the associated
      // backend volumes is the primary and, for distributed volumes, which
      // is the HA volume. The primary volume will be natively copied and we
      // we need to place and prepare a volume to hold the copy. This copy
      // will be the primary backend volume for the VPLEX volume copy. For
      // a distributed virtual volume, we will need to place and prepare
      // a volume to hold the HA volume of the VPLEX volume copy.
      Volume vplexSrcPrimaryVolume = null;
      Volume vplexSrcHAVolume = null;
      StringSet assocVolumeURIs = vplexSrcVolume.getAssociatedVolumes();
      Iterator<String> assocVolumeURIsIter = assocVolumeURIs.iterator();
      while (assocVolumeURIsIter.hasNext()) {
        URI assocVolumeURI = URI.create(assocVolumeURIsIter.next());
        Volume assocVolume = _dbClient.queryObject(Volume.class, assocVolumeURI);
        if (assocVolume.getVirtualArray().toString().equals(varray.getId().toString())) {
          vplexSrcPrimaryVolume = assocVolume;
        } else {
          vplexSrcHAVolume = assocVolume;
        }
      }

      // Get the capabilities
      VirtualPool vpool = BlockFullCopyUtils.queryFullCopySourceVPool(fcSourceObj, _dbClient);
      VirtualPoolCapabilityValuesWrapper capabilities =
          getCapabilitiesForFullCopyCreate(fcSourceObj, vpool, count);

      // Get the number of copies to create and the size of the volumes.
      // Note that for the size, we must use the actual provisioned size
      // of the source side backend volume. The size passed in the
      // capabilities will be the size of the VPLEX volume. When the
      // source side backend volume for the copy is provisioned, you
      // might not get that actual size. On VMAX, the size will be slightly
      // larger while for VNX the size will be exactly what is requested.
      // So, if the source side is a VMAX, the source side for the copy
      // will be slightly larger than the size in the capabilities. If the HA
      // side is VNX and we use the size in the capabilities, then you will
      // get exactly that size for the HA backend volume. As a result, source
      // side backend volume for the copy will be slightly larger than the
      // HA side. Now the way a VPLEX copy is made is it uses native full
      // copy to create a native full copy of the source side backend
      // volume. It then provisions the HA side volume. The new source side
      // backend copy is then imported into VPLEX in the same way as is done
      // for a vpool change that imports a volume to VPLEX. This code in the
      // VPLEX controller creates a local VPLEX volume using the source side
      // copy and for a distributed volume it then attaches as a remote
      // mirror the HA backend volume that is provisioned. If the HA volume
      // is slightly smaller, then this will fail on the VPLEX. So, we must
      // ensure that HA side volume is big enough by using the provisioned
      // capacity of the source side backend volume of the VPLEX volume being
      // copied.
      long size = vplexSrcPrimaryVolume.getProvisionedCapacity();

      // Place and prepare a volume for each copy to serve as a native
      // copy of a VPLEX backend volume. The VPLEX backend volume that
      // is copied is the backend volume in the same virtual array as the
      // VPLEX volume i.e, the primary backend volume. Create
      // descriptors for these prepared volumes and add them to the list.
      List<Volume> vplexCopyPrimaryVolumes =
          prepareFullCopyPrimaryVolumes(
              copyName, count, vplexSrcPrimaryVolume, capabilities, volumeDescriptors);

      // If the VPLEX volume being copied is distributed, then the VPLEX
      // HA volume should be non-null. We use the VPLEX scheduler to place
      // and then prepare volumes for the HA volumes of the VPLEX volume
      // copies. This should be done in the same manner as is done for the
      // import volume routine. This is because to form the VPLEX volume
      // copy we import the copy of the primary backend volume.
      List<Volume> vplexCopyHAVolumes = new ArrayList<Volume>();
      if (vplexSrcHAVolume != null) {
        vplexCopyHAVolumes.addAll(
            prepareFullCopyHAVolumes(
                copyName,
                count,
                size,
                vplexSrcSystem,
                vplexSystemProject,
                varray,
                vplexSrcHAVolume,
                taskId,
                volumeDescriptors));
      }

      // For each copy to be created, place and prepare a volume for the
      // primary backend volume copy. When copying a distributed VPLEX
      // volume, we also must place and prepare a volume for the HA
      // backend volume copy. Lastly, we must prepare a volume for the
      // VPLEX volume copy. Create descriptors for these prepared volumes
      // and add them to the volume descriptors list.
      for (int i = 0; i < count; i++) {
        // Prepare a new VPLEX volume for each copy.
        Volume vplexCopyPrimaryVolume = vplexCopyPrimaryVolumes.get(i);
        Volume vplexCopyHAVolume = null;
        if (vplexCopyHAVolumes.size() != 0) {
          vplexCopyHAVolume = vplexCopyHAVolumes.get(i);
        }
        Volume vplexCopyVolume =
            prepareFullCopyVPlexVolume(
                copyName,
                count,
                i,
                size,
                vplexSrcVolume,
                vplexSrcProject,
                varray,
                vpool,
                vplexSrcSystemId,
                vplexCopyPrimaryVolume,
                vplexCopyHAVolume,
                taskId,
                volumeDescriptors);
        vplexCopyVolumes.add(vplexCopyVolume);

        // Create task for each copy.
        Operation op = vplexCopyVolume.getOpStatus().get(taskId);
        TaskResourceRep task = toTask(vplexCopyVolume, taskId, op);
        taskList.getTaskList().add(task);
      }
    }

    // Invoke the VPLEX controller to create the copies.
    try {
      s_logger.info("Getting VPlex controller {}.", taskId);
      VPlexController controller =
          getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString());
      // TBD controller needs to be updated to handle CGs.
      controller.createFullCopy(vplexSrcSystemId, volumeDescriptors, taskId);
      s_logger.info("Successfully invoked controller.");
    } catch (InternalException e) {
      s_logger.error("Controller error", e);

      // Update the status for the VPLEX volume copies and their
      // corresponding tasks.
      for (Volume vplexCopyVolume : vplexCopyVolumes) {
        Operation op = vplexCopyVolume.getOpStatus().get(taskId);
        if (op != null) {
          op.error(e);
          vplexCopyVolume.getOpStatus().updateTaskStatus(taskId, op);
          _dbClient.persistObject(vplexCopyVolume);
          for (TaskResourceRep task : taskList.getTaskList()) {
            if (task.getResource().getId().equals(vplexCopyVolume.getId())) {
              task.setState(op.getStatus());
              task.setMessage(op.getMessage());
              break;
            }
          }
        }
      }

      // Mark all volumes inactive, except for the VPLEX volume
      // we were trying to copy.
      for (VolumeDescriptor descriptor : volumeDescriptors) {
        if (descriptor.getParameters().get(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID) == null) {
          Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI());
          volume.setInactive(true);
          _dbClient.persistObject(volume);
        }
      }
    }

    return taskList;
  }