/**
  * Validate the Target Volume VirtualArray with the Source Volume VPool VirtualArray.
  *
  * @param type
  * @param unManagedVolume
  * @param virtualArray
  */
 private void validateTargetVolumeVpoolWithSourceVolume(
     UnManagedVolume unManagedVolume, VirtualArray virtualArray) {
   String sourceUnManagedVolumeId =
       PropertySetterUtil.extractValueFromStringSet(
           SupportedVolumeInformation.REMOTE_MIRROR_SOURCE_VOLUME.toString(),
           unManagedVolume.getVolumeInformation());
   String sourceVolumeId =
       sourceUnManagedVolumeId.replace(
           VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
   List<URI> sourceUris =
       _dbClient.queryByConstraint(
           AlternateIdConstraint.Factory.getVolumeNativeGuidConstraint(sourceVolumeId));
   if (sourceUris.isEmpty()) {
     _logger.info(
         "Source {} Not found for target {}", sourceVolumeId, unManagedVolume.getNativeGuid());
   } else {
     // if source volume is ingested, then
     Volume sourceVolume = _dbClient.queryObject(Volume.class, sourceUris.get(0));
     // check whether the source Volume's VPool is actually having this
     // target Volume's varray
     // specified as remote
     VirtualPool sourceVPool =
         _dbClient.queryObject(VirtualPool.class, sourceVolume.getVirtualPool());
     Map<URI, VpoolRemoteCopyProtectionSettings> settings =
         sourceVPool.getRemoteProtectionSettings(sourceVPool, _dbClient);
     if (null == settings || settings.isEmpty() || !settings.containsKey(virtualArray.getId())) {
       _logger.info(
           "Target Volume's VArray {} is not matching already ingested source volume virtual pool's remote VArray ",
           virtualArray.getId());
       throw IngestionException.exceptions.unmanagedSRDFTargetVolumeVArrayMismatch(
           unManagedVolume.getLabel(), sourceVolume.getVirtualArray().toString());
     }
   }
 }
  @Override
  public <T extends BlockObject> T ingestBlockObjects(
      List<URI> systemCache,
      List<URI> poolCache,
      StorageSystem system,
      UnManagedVolume unManagedVolume,
      VirtualPool vPool,
      VirtualArray virtualArray,
      Project project,
      TenantOrg tenant,
      List<UnManagedVolume> unManagedVolumesToBeDeleted,
      Map<String, BlockObject> createdObjectMap,
      Map<String, List<DataObject>> updatedObjectMap,
      boolean unManagedVolumeExported,
      Class<T> clazz,
      Map<String, StringBuffer> taskStatusMap)
      throws IngestionException {
    // For VPLEX volumes, verify that it is OK to ingest the unmanaged
    // volume into the requested virtual array.

    long timeRightNow = new Date().getTime();
    if (timeRightNow > (cacheLastRefreshed + CACHE_TIMEOUT)) {
      _logger.debug("clearing vplex ingestion api info cache");
      clusterIdToNameMap.clear();
      varrayToClusterIdMap.clear();
      cacheLastRefreshed = timeRightNow;
    }

    if (!VolumeIngestionUtil.isValidVarrayForUnmanagedVolume(
        unManagedVolume,
        virtualArray.getId(),
        clusterIdToNameMap,
        varrayToClusterIdMap,
        _dbClient)) {
      _logger.warn(
          "UnManaged Volume {} cannot be ingested into the requested varray. Skipping Ingestion.",
          unManagedVolume.getLabel());

      throw IngestionException.exceptions.varrayIsInvalidForVplexVolume(
          virtualArray.getLabel(), unManagedVolume.getLabel());
    }

    return super.ingestBlockObjects(
        systemCache,
        poolCache,
        system,
        unManagedVolume,
        vPool,
        virtualArray,
        project,
        tenant,
        unManagedVolumesToBeDeleted,
        createdObjectMap,
        updatedObjectMap,
        unManagedVolumeExported,
        clazz,
        taskStatusMap);
  }
  /**
   * Allocate, initialize and persist state of the Bucket being created.
   *
   * @param param
   * @param project
   * @param tenantOrg
   * @param neighborhood
   * @param vpool
   * @param flags
   * @param placement
   * @return
   */
  private Bucket prepareBucket(
      BucketParam param,
      Project project,
      TenantOrg tenantOrg,
      VirtualArray neighborhood,
      VirtualPool vpool,
      DataObject.Flag[] flags,
      BucketRecommendation placement) {
    _log.debug("Preparing Bucket creation for Param : {}", param);
    StoragePool pool = null;
    Bucket bucket = new Bucket();
    bucket.setId(URIUtil.createId(Bucket.class));
    bucket.setLabel(param.getLabel().replaceAll(SPECIAL_CHAR_REGEX, ""));
    bucket.setHardQuota(SizeUtil.translateSize(param.getHardQuota()));
    bucket.setSoftQuota(SizeUtil.translateSize(param.getSoftQuota()));
    bucket.setRetention(Integer.valueOf(param.getRetention()));
    bucket.setOwner(getOwner(param.getOwner()));
    bucket.setNamespace(tenantOrg.getNamespace());
    bucket.setVirtualPool(param.getVpool());
    if (project != null) {
      bucket.setProject(new NamedURI(project.getId(), bucket.getLabel()));
    }
    bucket.setTenant(new NamedURI(tenantOrg.getId(), param.getLabel()));
    bucket.setVirtualArray(neighborhood.getId());

    if (null != placement.getSourceStoragePool()) {
      pool = _dbClient.queryObject(StoragePool.class, placement.getSourceStoragePool());
      if (null != pool) {
        bucket.setProtocol(new StringSet());
        bucket
            .getProtocol()
            .addAll(
                VirtualPoolUtil.getMatchingProtocols(vpool.getProtocols(), pool.getProtocols()));
      }
    }

    bucket.setStorageDevice(placement.getSourceStorageSystem());
    bucket.setPool(placement.getSourceStoragePool());
    bucket.setOpStatus(new OpStatusMap());

    // Bucket name to be used at Storage System
    String bucketName = project.getLabel() + UNDER_SCORE + param.getLabel();
    bucket.setName(bucketName.replaceAll(SPECIAL_CHAR_REGEX, ""));

    // Update Bucket path
    StringBuilder bucketPath = new StringBuilder();
    bucketPath
        .append(tenantOrg.getNamespace())
        .append(SLASH)
        .append(project.getLabel())
        .append(SLASH)
        .append(param.getLabel());
    bucket.setPath(bucketPath.toString());

    if (flags != null) {
      bucket.addInternalFlags(flags);
    }
    _dbClient.createObject(bucket);
    return bucket;
  }
  /**
   * Get device registered status of a virtual array
   *
   * @param id the URN of a ViPR varray
   * @return the VirtualArrayInternalFlags
   */
  @GET
  @Path("/{id}/deviceRegistered")
  @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  public VirtualArrayInternalFlags getDeviceRegistered(@PathParam("id") URI id) {
    VirtualArray varray = getVirtualArrayById(id, true);

    VirtualArrayInternalFlags varrayInternalFlags = new VirtualArrayInternalFlags();
    varrayInternalFlags.setDeviceRegistered(varray.getDeviceRegistered());

    auditOp(
        OperationTypeEnum.GET_VARRAY_REGISTERED,
        true,
        null,
        id.toString(),
        varray.getLabel(),
        String.valueOf(varray.getDeviceRegistered()));
    return varrayInternalFlags;
  }
  /**
   * Unset protection type assigned to the varray
   *
   * @param id the URN of a ViPR varry
   * @prereq none
   * @brief unset protection type field
   * @return No data returned in response body
   */
  @DELETE
  @Path("/{id}/protectionType")
  public Response unsetProtectionType(@PathParam("id") URI id) {
    VirtualArray varray = getVirtualArrayById(id, true);

    String origProtectionType =
        (varray.getProtectionType() == null) ? "" : varray.getProtectionType();

    varray.setProtectionType("");
    _dbClient.persistObject(varray);

    auditOp(
        OperationTypeEnum.UNSET_VARRAY_PROTECTIONTYPE,
        true,
        null,
        id.toString(),
        varray.getLabel(),
        origProtectionType);
    return Response.ok().build();
  }
  /**
   * Set device registered flag for varray
   *
   * @param id the URN of a ViPR varray
   * @param value the device registered status
   * @return the updated virtual array info
   */
  @PUT
  @Path("/{id}/deviceRegistered")
  @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  public VirtualArrayRestRep setDeviceRegistered(
      @PathParam("id") URI id, @QueryParam("value") boolean deviceRegistered) {

    VirtualArray varray = getVirtualArrayById(id, true);

    varray.setDeviceRegistered(deviceRegistered);
    _dbClient.persistObject(varray);

    auditOp(
        OperationTypeEnum.SET_VARRAY_REGISTERED,
        true,
        null,
        id.toString(),
        varray.getLabel(),
        String.valueOf(deviceRegistered));
    return map(varray);
  }
  @SuppressWarnings("deprecation")
  @Override
  protected void prepareData() throws Exception {

    // Create a virtual array.
    VirtualArray varray = new VirtualArray();
    varrayURI = URIUtil.createId(VirtualArray.class);
    varray.setId(varrayURI);
    _dbClient.createObject(varray);

    // Create a network and set the virtual array.
    Network network = new Network();
    network.setId(URIUtil.createId(Network.class));
    network.setLabel("NetworkWithVarray");
    network.setVirtualArray(varrayURI);
    _dbClient.createObject(network);

    // Create another network without a virtual array.
    network = new Network();
    network.setId(URIUtil.createId(Network.class));
    network.setLabel("NetworkWithoutVArray");
    _dbClient.createObject(network);
  }
  /**
   * Get protectionType attached with a virtual array
   *
   * @param id the URN of a ViPR varray
   * @return the VirtualArrayInternalFlags
   */
  @GET
  @Path("/{id}/protectionType")
  @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  public VirtualArrayInternalFlags getProtectionType(@PathParam("id") URI id) {

    String protectionType = "";
    VirtualArray varray = getVirtualArrayById(id, true);

    if (varray.getProtectionType() != null) {
      protectionType = varray.getProtectionType();
    }

    VirtualArrayInternalFlags varrayInternalFlags = new VirtualArrayInternalFlags();
    varrayInternalFlags.setProtectionType(protectionType);

    auditOp(
        OperationTypeEnum.GET_VARRAY_PROTECTIONTYPE,
        true,
        null,
        id.toString(),
        varray.getLabel(),
        protectionType);
    return varrayInternalFlags;
  }
  /**
   * Set protection type for varray
   *
   * @param id the URN of a ViPR varray
   * @param value the value of the protection type
   * @return the updated virtual array info
   */
  @PUT
  @Path("/{id}/protectionType")
  @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  public VirtualArrayRestRep setProtectionType(
      @PathParam("id") URI id, @QueryParam("value") String protectionType) {
    if (protectionType == null || protectionType.isEmpty()) {
      throw APIException.badRequests.invalidParameterProtectionTypeIsEmpty();
    }

    VirtualArray varray = getVirtualArrayById(id, true);

    varray.setProtectionType(protectionType);
    _dbClient.persistObject(varray);

    auditOp(
        OperationTypeEnum.SET_VARRAY_PROTECTIONTYPE,
        true,
        null,
        id.toString(),
        varray.getLabel(),
        protectionType);
    return map(varray);
  }
 @Override
 protected void checkUnManagedVolumeAddedToCG(
     UnManagedVolume unManagedVolume,
     VirtualArray virtualArray,
     TenantOrg tenant,
     Project project,
     VirtualPool vPool) {
   if (VolumeIngestionUtil.checkUnManagedResourceAddedToConsistencyGroup(unManagedVolume)) {
     URI consistencyGroupUri =
         VolumeIngestionUtil.getVplexConsistencyGroup(
             unManagedVolume,
             vPool,
             project.getId(),
             tenant.getId(),
             virtualArray.getId(),
             _dbClient);
     if (null == consistencyGroupUri) {
       _logger.warn(
           "A Consistency Group for the VPLEX volume could not be determined. Skipping Ingestion.");
       throw IngestionException.exceptions
           .unmanagedVolumeVplexConsistencyGroupCouldNotBeIdentified(unManagedVolume.getLabel());
     }
   }
 }
  private TaskResourceRep initiateBucketCreation(
      BucketParam param, Project project, TenantOrg tenant, DataObject.Flag[] flags)
      throws InternalException {
    ArgValidator.checkFieldUriType(param.getVpool(), VirtualPool.class, "vpool");
    ArgValidator.checkFieldUriType(param.getVarray(), VirtualArray.class, "varray");

    Long softQuota = SizeUtil.translateSize(param.getSoftQuota());
    Long hardQuota = SizeUtil.translateSize(param.getHardQuota());
    Integer retention = Integer.valueOf(param.getRetention());

    // Hard Quota should be more than SoftQuota
    verifyQuotaValues(softQuota, hardQuota, param.getLabel());

    // check varray
    VirtualArray neighborhood = _dbClient.queryObject(VirtualArray.class, param.getVarray());
    ArgValidator.checkEntity(neighborhood, param.getVarray(), false);
    _permissionsHelper.checkTenantHasAccessToVirtualArray(tenant.getId(), neighborhood);

    // check vpool reference
    VirtualPool cos = _dbClient.queryObject(VirtualPool.class, param.getVpool());
    _permissionsHelper.checkTenantHasAccessToVirtualPool(tenant.getId(), cos);
    ArgValidator.checkEntity(cos, param.getVpool(), false);
    if (!VirtualPool.Type.object.name().equals(cos.getType())) {
      throw APIException.badRequests.virtualPoolNotForObjectStorage(VirtualPool.Type.object.name());
    }

    // verify retention. Its validated only if Retention is configured.
    if (retention != 0 && cos.getMaxRetention() != 0 && retention > cos.getMaxRetention()) {
      throw APIException.badRequests.insufficientRetentionForVirtualPool(cos.getLabel(), "bucket");
    }

    VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();

    capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, Integer.valueOf(1));
    capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, Boolean.FALSE);

    List<BucketRecommendation> placement =
        _bucketScheduler.placeBucket(neighborhood, cos, capabilities);
    if (placement.isEmpty()) {
      throw APIException.badRequests.noMatchingStoragePoolsForVpoolAndVarray(
          cos.getId(), neighborhood.getId());
    }

    // Randomly select a recommended pool
    Collections.shuffle(placement);
    BucketRecommendation recommendation = placement.get(0);

    String task = UUID.randomUUID().toString();
    Bucket bucket = prepareBucket(param, project, tenant, neighborhood, cos, flags, recommendation);

    _log.info(
        String.format(
            "createBucket --- Bucket: %1$s, StoragePool: %2$s, StorageSystem: %3$s",
            bucket.getId(),
            recommendation.getSourceStoragePool(),
            recommendation.getSourceStorageSystem()));

    Operation op =
        _dbClient.createTaskOpStatus(
            Bucket.class, bucket.getId(), task, ResourceOperationTypeEnum.CREATE_BUCKET);
    op.setDescription("Bucket Create");

    // Controller invocation
    StorageSystem system =
        _dbClient.queryObject(StorageSystem.class, recommendation.getSourceStorageSystem());
    ObjectController controller = getController(ObjectController.class, system.getSystemType());
    controller.createBucket(
        recommendation.getSourceStorageSystem(),
        recommendation.getSourceStoragePool(),
        bucket.getId(),
        bucket.getName(),
        bucket.getNamespace(),
        bucket.getRetention(),
        bucket.getHardQuota(),
        bucket.getSoftQuota(),
        bucket.getOwner(),
        task);

    auditOp(
        OperationTypeEnum.CREATE_BUCKET,
        true,
        AuditLogManager.AUDITOP_BEGIN,
        param.getLabel(),
        param.getHardQuota(),
        neighborhood.getId().toString(),
        project == null ? null : project.getId().toString());

    return toTask(bucket, task, op);
  }
  /** {@inheritDoc} */
  @Override
  public TaskList create(
      List<BlockObject> fcSourceObjList,
      VirtualArray varray,
      String name,
      boolean createInactive,
      int count,
      String taskId) {

    // Populate the descriptors list with all volumes required
    // to create the VPLEX volume copies.
    int sourceCounter = 0;
    URI vplexSrcSystemId = null;
    TaskList taskList = new TaskList();
    List<Volume> vplexCopyVolumes = new ArrayList<Volume>();
    List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>();
    List<BlockObject> sortedSourceObjectList = sortFullCopySourceList(fcSourceObjList);
    for (BlockObject fcSourceObj : sortedSourceObjectList) {
      URI fcSourceURI = fcSourceObj.getId();
      if (URIUtil.isType(fcSourceURI, BlockSnapshot.class)) {
        // Full copy of snapshots is not supported for VPLEX.
        return super.create(sortedSourceObjectList, varray, name, createInactive, count, taskId);
      }

      Volume vplexSrcVolume = (Volume) fcSourceObj;
      String copyName = name + (sortedSourceObjectList.size() > 1 ? "-" + ++sourceCounter : "");

      // Create a volume descriptor for the source VPLEX volume being copied.
      // and add it to the descriptors list. Be sure to identify this VPLEX
      // volume as the source volume being copied.
      vplexSrcSystemId = fcSourceObj.getStorageController();
      VolumeDescriptor vplexSrcVolumeDescr =
          new VolumeDescriptor(
              VolumeDescriptor.Type.VPLEX_VIRT_VOLUME, vplexSrcSystemId, fcSourceURI, null, null);
      Map<String, Object> descrParams = new HashMap<String, Object>();
      descrParams.put(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID, Boolean.TRUE);
      vplexSrcVolumeDescr.setParameters(descrParams);
      volumeDescriptors.add(vplexSrcVolumeDescr);

      // Get some info about the VPLEX volume being copied and its storage
      // system.
      Project vplexSrcProject =
          BlockFullCopyUtils.queryFullCopySourceProject(fcSourceObj, _dbClient);
      StorageSystem vplexSrcSystem = _dbClient.queryObject(StorageSystem.class, vplexSrcSystemId);
      Project vplexSystemProject =
          VPlexBlockServiceApiImpl.getVplexProject(vplexSrcSystem, _dbClient, _tenantsService);

      // For the VPLEX volume being copied, determine which of the associated
      // backend volumes is the primary and, for distributed volumes, which
      // is the HA volume. The primary volume will be natively copied and we
      // we need to place and prepare a volume to hold the copy. This copy
      // will be the primary backend volume for the VPLEX volume copy. For
      // a distributed virtual volume, we will need to place and prepare
      // a volume to hold the HA volume of the VPLEX volume copy.
      Volume vplexSrcPrimaryVolume = null;
      Volume vplexSrcHAVolume = null;
      StringSet assocVolumeURIs = vplexSrcVolume.getAssociatedVolumes();
      Iterator<String> assocVolumeURIsIter = assocVolumeURIs.iterator();
      while (assocVolumeURIsIter.hasNext()) {
        URI assocVolumeURI = URI.create(assocVolumeURIsIter.next());
        Volume assocVolume = _dbClient.queryObject(Volume.class, assocVolumeURI);
        if (assocVolume.getVirtualArray().toString().equals(varray.getId().toString())) {
          vplexSrcPrimaryVolume = assocVolume;
        } else {
          vplexSrcHAVolume = assocVolume;
        }
      }

      // Get the capabilities
      VirtualPool vpool = BlockFullCopyUtils.queryFullCopySourceVPool(fcSourceObj, _dbClient);
      VirtualPoolCapabilityValuesWrapper capabilities =
          getCapabilitiesForFullCopyCreate(fcSourceObj, vpool, count);

      // Get the number of copies to create and the size of the volumes.
      // Note that for the size, we must use the actual provisioned size
      // of the source side backend volume. The size passed in the
      // capabilities will be the size of the VPLEX volume. When the
      // source side backend volume for the copy is provisioned, you
      // might not get that actual size. On VMAX, the size will be slightly
      // larger while for VNX the size will be exactly what is requested.
      // So, if the source side is a VMAX, the source side for the copy
      // will be slightly larger than the size in the capabilities. If the HA
      // side is VNX and we use the size in the capabilities, then you will
      // get exactly that size for the HA backend volume. As a result, source
      // side backend volume for the copy will be slightly larger than the
      // HA side. Now the way a VPLEX copy is made is it uses native full
      // copy to create a native full copy of the source side backend
      // volume. It then provisions the HA side volume. The new source side
      // backend copy is then imported into VPLEX in the same way as is done
      // for a vpool change that imports a volume to VPLEX. This code in the
      // VPLEX controller creates a local VPLEX volume using the source side
      // copy and for a distributed volume it then attaches as a remote
      // mirror the HA backend volume that is provisioned. If the HA volume
      // is slightly smaller, then this will fail on the VPLEX. So, we must
      // ensure that HA side volume is big enough by using the provisioned
      // capacity of the source side backend volume of the VPLEX volume being
      // copied.
      long size = vplexSrcPrimaryVolume.getProvisionedCapacity();

      // Place and prepare a volume for each copy to serve as a native
      // copy of a VPLEX backend volume. The VPLEX backend volume that
      // is copied is the backend volume in the same virtual array as the
      // VPLEX volume i.e, the primary backend volume. Create
      // descriptors for these prepared volumes and add them to the list.
      List<Volume> vplexCopyPrimaryVolumes =
          prepareFullCopyPrimaryVolumes(
              copyName, count, vplexSrcPrimaryVolume, capabilities, volumeDescriptors);

      // If the VPLEX volume being copied is distributed, then the VPLEX
      // HA volume should be non-null. We use the VPLEX scheduler to place
      // and then prepare volumes for the HA volumes of the VPLEX volume
      // copies. This should be done in the same manner as is done for the
      // import volume routine. This is because to form the VPLEX volume
      // copy we import the copy of the primary backend volume.
      List<Volume> vplexCopyHAVolumes = new ArrayList<Volume>();
      if (vplexSrcHAVolume != null) {
        vplexCopyHAVolumes.addAll(
            prepareFullCopyHAVolumes(
                copyName,
                count,
                size,
                vplexSrcSystem,
                vplexSystemProject,
                varray,
                vplexSrcHAVolume,
                taskId,
                volumeDescriptors));
      }

      // For each copy to be created, place and prepare a volume for the
      // primary backend volume copy. When copying a distributed VPLEX
      // volume, we also must place and prepare a volume for the HA
      // backend volume copy. Lastly, we must prepare a volume for the
      // VPLEX volume copy. Create descriptors for these prepared volumes
      // and add them to the volume descriptors list.
      for (int i = 0; i < count; i++) {
        // Prepare a new VPLEX volume for each copy.
        Volume vplexCopyPrimaryVolume = vplexCopyPrimaryVolumes.get(i);
        Volume vplexCopyHAVolume = null;
        if (vplexCopyHAVolumes.size() != 0) {
          vplexCopyHAVolume = vplexCopyHAVolumes.get(i);
        }
        Volume vplexCopyVolume =
            prepareFullCopyVPlexVolume(
                copyName,
                count,
                i,
                size,
                vplexSrcVolume,
                vplexSrcProject,
                varray,
                vpool,
                vplexSrcSystemId,
                vplexCopyPrimaryVolume,
                vplexCopyHAVolume,
                taskId,
                volumeDescriptors);
        vplexCopyVolumes.add(vplexCopyVolume);

        // Create task for each copy.
        Operation op = vplexCopyVolume.getOpStatus().get(taskId);
        TaskResourceRep task = toTask(vplexCopyVolume, taskId, op);
        taskList.getTaskList().add(task);
      }
    }

    // Invoke the VPLEX controller to create the copies.
    try {
      s_logger.info("Getting VPlex controller {}.", taskId);
      VPlexController controller =
          getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString());
      // TBD controller needs to be updated to handle CGs.
      controller.createFullCopy(vplexSrcSystemId, volumeDescriptors, taskId);
      s_logger.info("Successfully invoked controller.");
    } catch (InternalException e) {
      s_logger.error("Controller error", e);

      // Update the status for the VPLEX volume copies and their
      // corresponding tasks.
      for (Volume vplexCopyVolume : vplexCopyVolumes) {
        Operation op = vplexCopyVolume.getOpStatus().get(taskId);
        if (op != null) {
          op.error(e);
          vplexCopyVolume.getOpStatus().updateTaskStatus(taskId, op);
          _dbClient.persistObject(vplexCopyVolume);
          for (TaskResourceRep task : taskList.getTaskList()) {
            if (task.getResource().getId().equals(vplexCopyVolume.getId())) {
              task.setState(op.getStatus());
              task.setMessage(op.getMessage());
              break;
            }
          }
        }
      }

      // Mark all volumes inactive, except for the VPLEX volume
      // we were trying to copy.
      for (VolumeDescriptor descriptor : volumeDescriptors) {
        if (descriptor.getParameters().get(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID) == null) {
          Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI());
          volume.setInactive(true);
          _dbClient.persistObject(volume);
        }
      }
    }

    return taskList;
  }