コード例 #1
0
 @Override
 public void process() {
   DbClient dbClient = getDbClient();
   List<URI> volumeURIs = dbClient.queryByType(Volume.class, false);
   Iterator<Volume> volumesIter = dbClient.queryIterativeObjects(Volume.class, volumeURIs);
   while (volumesIter.hasNext()) {
     Volume volume = volumesIter.next();
     URI systemURI = volume.getStorageController();
     if (!NullColumnValueGetter.isNullURI(systemURI)) {
       StorageSystem system = dbClient.queryObject(StorageSystem.class, systemURI);
       if ((system != null)
           && (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType()))) {
         // This is a VPLEX volume. If not already set,
         // set the protocols to FC.
         StringSet protocols = volume.getProtocol();
         if (protocols == null) {
           protocols = new StringSet();
           protocols.add(StorageProtocol.Block.FC.name());
           volume.setProtocol(protocols);
           dbClient.persistObject(volume);
         }
       }
     }
   }
 }
  /**
   * Verify the RP+VPlex consistency group and its volumes have been properly migrated.
   *
   * @throws Exception
   */
  private void verifyRpVplexConsistencyGroupMigration() throws Exception {
    log.info("Verifying RP+VPlex BlockConsistencyGroup and associated volume migration.");
    List<BlockObject> blockObjects = new ArrayList<BlockObject>();

    BlockConsistencyGroup rpVplexPrimaryCg =
        _dbClient.queryObject(BlockConsistencyGroup.class, rpVplexPrimaryConsistencyGroupURI);

    // Verify the RP+VPLEX consistency group was properly migrated
    verifyConsistencyGroupMigration(rpVplexPrimaryCg, Types.RP.name(), Types.VPLEX.name());

    Assert.assertNotNull(
        "The RP+VPlex BlockConsistencyGroup.systemConsistencyGroups field should be populated.",
        rpVplexPrimaryCg.getSystemConsistencyGroups());
    Assert.assertNotNull(
        "The RP+VPlex BlockConsistencyGroup.systemConsistencyGroups field should contain an entry for "
            + protectionSystemURI.toString(),
        rpVplexPrimaryCg.getSystemConsistencyGroups().get(protectionSystemURI.toString()));
    Assert.assertTrue(
        "The RP+VPlex BlockConsistencyGroup.systemConsistencyGroups field should contain a mapping for "
            + protectionSystemURI.toString()
            + "-> ViPR-"
            + rpVplexPrimaryCg.getLabel(),
        rpVplexPrimaryCg
            .getSystemConsistencyGroups()
            .get(protectionSystemURI.toString())
            .contains("ViPR-" + rpVplexPrimaryCg.getLabel()));

    // Verify that primary CG has a mapping reference for each of the VPlex storage system/cg name.
    for (URI rpVplexVolumeId : rpVplexVolumeToCgMapping.keySet()) {
      Volume rpVplexVolume = _dbClient.queryObject(Volume.class, rpVplexVolumeId);
      blockObjects.add(rpVplexVolume);

      // Get the VPlex consistency group
      URI cgUri = rpVplexVolumeToCgMapping.get(rpVplexVolumeId);
      BlockConsistencyGroup vplexCg = _dbClient.queryObject(BlockConsistencyGroup.class, cgUri);

      String cgName = vplexCg.getLabel();
      String clusterName = getVPlexClusterFromVolume(rpVplexVolume);
      String storageSystem = rpVplexVolume.getStorageController().toString();
      String clusterCgName = BlockConsistencyGroupUtils.buildClusterCgName(clusterName, cgName);

      // Verify that primary CG contains the correct mapping
      Assert.assertTrue(
          "The RP+VPlex BlockConsistencyGroup.systemConsistencyGroups field should contain a mapping for "
              + storageSystem
              + "->"
              + clusterCgName,
          rpVplexPrimaryCg.getSystemConsistencyGroups().get(storageSystem).contains(clusterCgName));

      // Verify that the VPlex CG has been marked for deletion
      Assert.assertTrue(
          "The VPlex BlockConsistencyGroup " + vplexCg.getLabel() + "should be inactive.",
          vplexCg.getInactive());
    }

    // Verify the volume migration took place correctly
    verifyBlockObjects(blockObjects);
  }
コード例 #3
0
  /** {@inheritDoc} */
  @Override
  public void validateFullCopyCreateRequest(List<BlockObject> fcSourceObjList, int count) {
    if (fcSourceObjList.size() > 0) {
      URI fcSourceObjURI = fcSourceObjList.get(0).getId();
      if (URIUtil.isType(fcSourceObjURI, BlockSnapshot.class)) {
        // Currently you cannot create a full copy of a VPLEX snapshot.
        throw APIException.badRequests.cantCreateFullCopyForVPlexSnapshot();
      } else {
        // Call super first.
        super.validateFullCopyCreateRequest(fcSourceObjList, count);

        // Platform specific checks.
        for (BlockObject fcSourceObj : fcSourceObjList) {
          Volume fcSourceVolume = (Volume) fcSourceObj;
          StorageSystem system =
              _dbClient.queryObject(StorageSystem.class, fcSourceVolume.getStorageController());
          if (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType())) {
            // If the volume is a VPLEX volume, then we need to be sure that
            // storage pool of the source backend volume of the VPLEX volume,
            // which is volume used to create the native full copy, supports
            // full copy.
            Volume srcBackendVolume =
                VPlexUtil.getVPLEXBackendVolume(fcSourceVolume, true, _dbClient, true);
            StoragePool storagePool =
                _dbClient.queryObject(StoragePool.class, srcBackendVolume.getPool());
            verifyFullCopySupportedForStoragePool(storagePool);

            // If the full copy source is itself a full copy, it is not
            // detached, and the native full copy i.e., the source side
            // backend volume, is VNX, then creating a full copy of the
            // volume will fail. As such, we prevent it.
            if ((BlockFullCopyUtils.isVolumeFullCopy(fcSourceVolume, _dbClient))
                && (!BlockFullCopyUtils.isFullCopyDetached(fcSourceVolume, _dbClient))) {
              URI backendSystemURI = srcBackendVolume.getStorageController();
              StorageSystem backendSystem =
                  _dbClient.queryObject(StorageSystem.class, backendSystemURI);
              if (DiscoveredDataObject.Type.vnxblock.name().equals(backendSystem.getSystemType())) {
                throw APIException.badRequests.cantCreateFullCopyOfVPlexFullCopyUsingVNX();
              }
            }
          }
        }
      }
    }
  }
コード例 #4
0
  /**
   * Places and prepares the primary copy volumes when copying a VPLEX virtual volume.
   *
   * @param name The base name for the volume.
   * @param copyCount The number of copies to be made.
   * @param srcPrimaryVolume The primary volume of the VPLEX volume being copied.
   * @param srcCapabilities The capabilities of the primary volume.
   * @param volumeDescriptors The list of descriptors.
   * @return A list of the prepared primary volumes for the VPLEX volume copy.
   */
  private List<Volume> prepareFullCopyPrimaryVolumes(
      String name,
      int copyCount,
      Volume srcPrimaryVolume,
      VirtualPoolCapabilityValuesWrapper srcCapabilities,
      List<VolumeDescriptor> volumeDescriptors) {

    List<Volume> copyPrimaryVolumes = new ArrayList<Volume>();

    // Get the placement recommendations for the primary volume copies.
    // Use the same method as is done for native volume copy.
    VirtualArray vArray =
        _dbClient.queryObject(VirtualArray.class, srcPrimaryVolume.getVirtualArray());
    VirtualPool vPool = _dbClient.queryObject(VirtualPool.class, srcPrimaryVolume.getVirtualPool());
    List<VolumeRecommendation> recommendations =
        ((VPlexScheduler) _scheduler)
            .getBlockScheduler()
            .getRecommendationsForVolumeClones(vArray, vPool, srcPrimaryVolume, srcCapabilities);
    if (recommendations.isEmpty()) {
      throw APIException.badRequests.noStorageForPrimaryVolumesForVplexVolumeCopies();
    }

    // Prepare the copy volumes for each recommendation. Again,
    // use the same manner as is done for native volume copy.
    StringBuilder nameBuilder = new StringBuilder(name);
    nameBuilder.append("-0");
    int copyIndex = (copyCount > 1) ? 1 : 0;
    for (VolumeRecommendation recommendation : recommendations) {
      Volume volume =
          StorageScheduler.prepareFullCopyVolume(
              _dbClient,
              nameBuilder.toString(),
              srcPrimaryVolume,
              recommendation,
              copyIndex++,
              srcCapabilities);
      volume.addInternalFlags(Flag.INTERNAL_OBJECT);
      _dbClient.persistObject(volume);
      copyPrimaryVolumes.add(volume);

      // Create the volume descriptor and add it to the passed list.
      VolumeDescriptor volumeDescriptor =
          new VolumeDescriptor(
              VolumeDescriptor.Type.VPLEX_IMPORT_VOLUME,
              volume.getStorageController(),
              volume.getId(),
              volume.getPool(),
              srcCapabilities);
      volumeDescriptors.add(volumeDescriptor);
    }

    return copyPrimaryVolumes;
  }
  /**
   * Verify the VPlex consistency group and its volumes have been properly migrated.
   *
   * @throws Exception
   */
  private void verifyVplexConsistencyGroupMigration() throws Exception {
    log.info("Verifying VPlex BlockConsistencyGroup and associated volume migration.");

    BlockConsistencyGroup vplexCg =
        _dbClient.queryObject(BlockConsistencyGroup.class, vplexConsistencyGroupURI);
    Iterator<Volume> vplexVolumeItr =
        _dbClient.queryIterativeObjects(Volume.class, vplexVolumeURIs);

    // Verify the VPLEX consistency group was properly migrated
    verifyConsistencyGroupMigration(vplexCg, Types.VPLEX.name());

    while (vplexVolumeItr.hasNext()) {
      Volume vplexVolume = vplexVolumeItr.next();
      // Get the VPlex consistency group
      String cgName = vplexCg.getLabel();
      String clusterName = getVPlexClusterFromVolume(vplexVolume);
      String storageSystem = vplexVolume.getStorageController().toString();
      String clusterCgName = BlockConsistencyGroupUtils.buildClusterCgName(clusterName, cgName);

      // Verify that primary CG contains the correct mapping
      Assert.assertNotNull(
          "The VPlex BlockConsistencyGroup.vplexStorageSystemToCg field should be populated.",
          vplexCg.getSystemConsistencyGroups());
      Assert.assertTrue(
          "The VPlex BlockConsistencyGroup.vplexStorageSystemToCg should contain a key for storage system "
              + storageSystem,
          vplexCg.getSystemConsistencyGroups().containsKey(storageSystem));
      Assert.assertTrue(
          "The VPlex BlockConsistencyGroup.vplexStorageSystemToCg field should contain a mapping for "
              + storageSystem
              + "->"
              + clusterCgName,
          vplexCg.getSystemConsistencyGroups().get(storageSystem).contains(clusterCgName));
    }

    // Verify the volume migration took place correctly
    List<BlockObject> blockObjects = new ArrayList<BlockObject>();
    while (vplexVolumeItr.hasNext()) {
      blockObjects.add(vplexVolumeItr.next());
    }

    verifyBlockObjects(blockObjects);
  }
コード例 #6
0
  /**
   * get the volume to be updated after application add and remove operations could be the volume
   * passed in if it's a simple block volume or the vplex virtual volume if it's a backing volume
   *
   * @param voluri uri of volume operated on during add or remove volume from application operation
   * @param dbClient
   * @return the volume to update
   */
  private Volume getVolume(URI voluri, DbClient dbClient) {
    // if this is a vplex volume, update the parent virtual volume
    List<Volume> vplexVolumes =
        CustomQueryUtility.queryActiveResourcesByConstraint(
            dbClient, Volume.class, getVolumesByAssociatedId(voluri.toString()));

    Volume volume = null;

    for (Volume vplexVolume : vplexVolumes) {
      URI storageURI = vplexVolume.getStorageController();
      StorageSystem storage = dbClient.queryObject(StorageSystem.class, storageURI);
      if (DiscoveredDataObject.Type.vplex.name().equals(storage.getSystemType())) {
        volume = vplexVolume;
      }
    }

    if (volume == null) {
      volume = dbClient.queryObject(Volume.class, voluri);
    }
    return volume;
  }
コード例 #7
0
  @Override
  public void doModifyVolumes(
      StorageSystem storage,
      StoragePool storagePool,
      String opId,
      List<Volume> volumes,
      TaskCompleter taskCompleter)
      throws DeviceControllerException {
    StringBuilder logMsgBuilder =
        new StringBuilder(
            String.format(
                "Modify Volume Start - Array:%s, Pool:%s",
                storage.getSerialNumber(), storagePool.getNativeGuid()));

    String systemObjectID = HDSUtils.getSystemObjectID(storage);
    for (Volume volume : volumes) {
      try {
        HDSApiClient hdsApiClient =
            hdsApiFactory.getClient(
                HDSUtils.getHDSServerManagementServerInfo(storage),
                storage.getSmisUserName(),
                storage.getSmisPassword());
        logMsgBuilder.append(
            String.format(
                "%nVolume:%s , IsThinlyProvisioned: %s, tieringPolicy: %s",
                volume.getLabel(),
                volume.getThinlyProvisioned(),
                volume.getAutoTieringPolicyUri()));
        LogicalUnit logicalUnit =
            hdsApiClient.getLogicalUnitInfo(
                systemObjectID, HDSUtils.getLogicalUnitObjectId(volume.getNativeId(), storage));
        String policyName = ControllerUtils.getAutoTieringPolicyName(volume.getId(), dbClient);
        String autoTierPolicyName = null;
        if (policyName.equals(Constants.NONE)) {
          autoTierPolicyName = null;
        } else {
          autoTierPolicyName =
              HitachiTieringPolicy.getPolicy(
                      policyName.replaceAll(
                          HDSConstants.SLASH_OPERATOR, HDSConstants.UNDERSCORE_OPERATOR))
                  .getKey();
        }
        if (null != logicalUnit
            && null != logicalUnit.getLdevList()
            && !logicalUnit.getLdevList().isEmpty()) {
          Iterator<LDEV> ldevItr = logicalUnit.getLdevList().iterator();
          if (ldevItr.hasNext()) {
            LDEV ldev = ldevItr.next();
            String asyncMessageId =
                hdsApiClient.modifyThinVolumeTieringPolicy(
                    systemObjectID,
                    logicalUnit.getObjectID(),
                    ldev.getObjectID(),
                    autoTierPolicyName);
            if (null != asyncMessageId) {
              HDSJob modifyHDSJob =
                  new HDSModifyVolumeJob(
                      asyncMessageId,
                      volume.getStorageController(),
                      taskCompleter,
                      HDSModifyVolumeJob.VOLUME_MODIFY_JOB);
              ControllerServiceImpl.enqueueJob(new QueueJob(modifyHDSJob));
            }
          }
        } else {
          String errorMsg = String.format("No LDEV's found for volume: %s", volume.getId());
          log.info(errorMsg);
          ServiceError serviceError =
              DeviceControllerErrors.hds.methodFailed("doModifyVolumes", errorMsg);
          taskCompleter.error(dbClient, serviceError);
        }
      } catch (final InternalException e) {
        log.error("Problem in doModifyVolumes: ", e);
        taskCompleter.error(dbClient, e);
      } catch (final Exception e) {
        log.error("Problem in doModifyVolumes: ", e);
        ServiceError serviceError =
            DeviceControllerErrors.hds.methodFailed("doModifyVolumes", e.getMessage());
        taskCompleter.error(dbClient, serviceError);
      }
    }
  }
コード例 #8
0
  /**
   * Called to update the job status when the volume expand job completes.
   *
   * @param jobContext The job context.
   */
  public void updateStatus(JobContext jobContext) throws Exception {
    CloseableIterator<CIMObjectPath> associatorIterator = null;
    CloseableIterator<CIMInstance> instanceIterator = null;
    JobStatus jobStatus = getJobStatus();

    try {
      if (jobStatus == JobStatus.IN_PROGRESS) {
        return;
      }

      DbClient dbClient = jobContext.getDbClient();
      CIMConnectionFactory cimConnectionFactory = jobContext.getCimConnectionFactory();
      WBEMClient client = getWBEMClient(dbClient, cimConnectionFactory);

      // If terminal state update storage pool capacity and remove reservation for volume capacity
      // from pool's reserved capacity map.
      if (jobStatus == JobStatus.SUCCESS
          || jobStatus == JobStatus.FAILED
          || jobStatus == JobStatus.FATAL_ERROR) {
        SmisUtils.updateStoragePoolCapacity(dbClient, client, _storagePoolURI);

        StoragePool pool = dbClient.queryObject(StoragePool.class, _storagePoolURI);
        StringMap reservationMap = pool.getReservedCapacityMap();
        URI volumeId = getTaskCompleter().getId();
        // remove from reservation map
        reservationMap.remove(volumeId.toString());
        dbClient.persistObject(pool);
      }

      String opId = getTaskCompleter().getOpId();
      StringBuilder logMsgBuilder =
          new StringBuilder(
              String.format(
                  "Updating status of job %s to %s, task: %s",
                  this.getJobName(), jobStatus.name(), opId));

      if (jobStatus == JobStatus.SUCCESS) {
        VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter();
        Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId());
        // set requested capacity
        volume.setCapacity(taskCompleter.getSize());
        // set meta related properties
        volume.setTotalMetaMemberCapacity(taskCompleter.getTotalMetaMembersSize());
        volume.setMetaMemberCount(taskCompleter.getMetaMemberCount());
        volume.setMetaMemberSize(taskCompleter.getMetaMemberSize());
        volume.setIsComposite(taskCompleter.isComposite());
        volume.setCompositionType(taskCompleter.getMetaVolumeType());

        // set provisioned capacity
        associatorIterator =
            client.associatorNames(getCimJob(), null, SmisConstants.CIM_STORAGE_VOLUME, null, null);
        if (associatorIterator.hasNext()) {
          CIMObjectPath volumePath = associatorIterator.next();
          CIMInstance volumeInstance = client.getInstance(volumePath, true, false, null);
          if (volumeInstance != null) {
            CIMProperty consumableBlocks =
                volumeInstance.getProperty(SmisConstants.CP_CONSUMABLE_BLOCKS);
            CIMProperty blockSize = volumeInstance.getProperty(SmisConstants.CP_BLOCK_SIZE);
            // calculate provisionedCapacity = consumableBlocks * block size
            Long provisionedCapacity =
                Long.valueOf(consumableBlocks.getValue().toString())
                    * Long.valueOf(blockSize.getValue().toString());
            volume.setProvisionedCapacity(provisionedCapacity);
          }

          // set allocated capacity
          instanceIterator =
              client.referenceInstances(
                  volumePath,
                  SmisConstants.CIM_ALLOCATED_FROM_STORAGEPOOL,
                  null,
                  false,
                  SmisConstants.PS_SPACE_CONSUMED);
          if (instanceIterator.hasNext()) {
            CIMInstance allocatedFromStoragePoolPath = instanceIterator.next();
            CIMProperty spaceConsumed =
                allocatedFromStoragePoolPath.getProperty(SmisConstants.CP_SPACE_CONSUMED);
            if (null != spaceConsumed) {
              volume.setAllocatedCapacity(Long.valueOf(spaceConsumed.getValue().toString()));
            }
          }
        }
        logMsgBuilder.append(
            String.format(
                "%n   Capacity: %s, Provisioned capacity: %s, Allocated Capacity: %s",
                volume.getCapacity(),
                volume.getProvisionedCapacity(),
                volume.getAllocatedCapacity()));
        if (volume.getIsComposite()) {
          logMsgBuilder.append(
              String.format(
                  "%n   Is Meta: %s, Total meta member capacity: %s, Meta member count %s, Meta member size: %s",
                  volume.getIsComposite(),
                  volume.getTotalMetaMemberCapacity(),
                  volume.getMetaMemberCount(),
                  volume.getMetaMemberSize()));
        }

        _log.info(logMsgBuilder.toString());

        // Reset list of meta member volumes in the volume
        if (volume.getMetaVolumeMembers() != null) {
          volume.getMetaVolumeMembers().clear();
        }

        StorageSystem storageSystem =
            dbClient.queryObject(StorageSystem.class, volume.getStorageController());
        // set the RP tag on the volume if the volume is RP protected
        if (volume.checkForRp()
            && storageSystem.getSystemType() != null
            && storageSystem
                .getSystemType()
                .equalsIgnoreCase(DiscoveredDataObject.Type.vmax.toString())) {
          SmisCommandHelper helper = jobContext.getSmisCommandHelper();
          List<CIMObjectPath> volumePathList = new ArrayList<CIMObjectPath>();
          volumePathList.add(helper.getVolumeMember(storageSystem, volume));
          helper.setRecoverPointTag(storageSystem, volumePathList, true);
        }

        dbClient.persistObject(volume);
        // Reset list of meta members native ids in WF data (when meta is created meta members are
        // removed from array)
        WorkflowService.getInstance().storeStepData(opId, new ArrayList<String>());
      }
    } catch (Exception e) {
      _log.error("Caught an exception while trying to updateStatus for SmisVolumeExpandJob", e);
      setPostProcessingErrorStatus(
          "Encountered an internal error during volume expand job status processing : "
              + e.getMessage());
    } finally {
      _metaVolumeTaskCompleter.setLastStepStatus(jobStatus);
      if (associatorIterator != null) {
        associatorIterator.close();
      }
      if (instanceIterator != null) {
        instanceIterator.close();
      }
      super.updateStatus(jobContext);
    }
  }
コード例 #9
0
  /** {@inheritDoc} */
  @Override
  public TaskList resynchronizeCopy(Volume sourceVolume, Volume fullCopyVolume) {
    // Create the task list.
    TaskList taskList = new TaskList();

    // Create a unique task id.
    String taskId = UUID.randomUUID().toString();

    // If the source is in a CG, then we will resynchronize the corresponding
    // full copies for all the volumes in the CG. Since we did not allow
    // full copies for volumes or snaps in CGs prior to Jedi, there should
    // be a full copy for all volumes in the CG.
    Map<URI, Volume> fullCopyMap = getFullCopySetMap(sourceVolume, fullCopyVolume);
    Set<URI> fullCopyURIs = fullCopyMap.keySet();

    // Get the storage system for the source volume.
    StorageSystem sourceSystem =
        _dbClient.queryObject(StorageSystem.class, sourceVolume.getStorageController());
    URI sourceSystemURI = sourceSystem.getId();

    // Create the resynchronize task on the full copy volumes.
    for (URI fullCopyURI : fullCopyURIs) {
      Operation op =
          _dbClient.createTaskOpStatus(
              Volume.class,
              fullCopyURI,
              taskId,
              ResourceOperationTypeEnum.RESYNCHRONIZE_VOLUME_FULL_COPY);
      fullCopyMap.get(fullCopyURI).getOpStatus().put(taskId, op);
      TaskResourceRep fullCopyVolumeTask =
          TaskMapper.toTask(fullCopyMap.get(fullCopyURI), taskId, op);
      taskList.getTaskList().add(fullCopyVolumeTask);
    }

    // Invoke the controller.
    try {
      VPlexController controller =
          getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString());
      controller.resyncFullCopy(sourceSystemURI, new ArrayList<URI>(fullCopyURIs), taskId);
    } catch (InternalException ie) {
      s_logger.error("Controller error", ie);

      // Update the status for the VPLEX volume copies and their
      // corresponding tasks.
      for (Volume vplexFullCopy : fullCopyMap.values()) {
        Operation op = vplexFullCopy.getOpStatus().get(taskId);
        if (op != null) {
          op.error(ie);
          vplexFullCopy.getOpStatus().updateTaskStatus(taskId, op);
          _dbClient.persistObject(vplexFullCopy);
          for (TaskResourceRep task : taskList.getTaskList()) {
            if (task.getResource().getId().equals(vplexFullCopy.getId())) {
              task.setState(op.getStatus());
              task.setMessage(op.getMessage());
              break;
            }
          }
        }
      }
    }
    return taskList;
  }
コード例 #10
0
  /** {@inheritDoc} */
  @Override
  public TaskList detach(BlockObject fcSourceObj, Volume fullCopyVolume) {
    // If full copy volume is already detached or was never
    // activated, return detach action is completed successfully
    // as done in base class. Otherwise, send detach full copy
    // request to controller.
    TaskList taskList = new TaskList();
    String taskId = UUID.randomUUID().toString();
    if ((BlockFullCopyUtils.isFullCopyDetached(fullCopyVolume, _dbClient))
        || (BlockFullCopyUtils.isFullCopyInactive(fullCopyVolume, _dbClient))) {
      super.detach(fcSourceObj, fullCopyVolume);
    } else {
      // You cannot create a full copy of a VPLEX snapshot, so
      // the source will be a volume.
      Volume sourceVolume = (Volume) fcSourceObj;

      // If the source is in a CG, then we will detach the corresponding
      // full copies for all the volumes in the CG. Since we did not allow
      // full copies for volumes or snaps in CGs prior to Jedi, there should
      // be a full copy for all volumes in the CG.
      Map<URI, Volume> fullCopyMap = getFullCopySetMap(sourceVolume, fullCopyVolume);
      Set<URI> fullCopyURIs = fullCopyMap.keySet();

      // Get the storage system for the source volume.
      StorageSystem sourceSystem =
          _dbClient.queryObject(StorageSystem.class, sourceVolume.getStorageController());
      URI sourceSystemURI = sourceSystem.getId();

      // Create the detach task on the full copy volumes.
      for (URI fullCopyURI : fullCopyURIs) {
        Operation op =
            _dbClient.createTaskOpStatus(
                Volume.class,
                fullCopyURI,
                taskId,
                ResourceOperationTypeEnum.DETACH_VOLUME_FULL_COPY);
        fullCopyMap.get(fullCopyURI).getOpStatus().put(taskId, op);
        TaskResourceRep fullCopyVolumeTask =
            TaskMapper.toTask(fullCopyMap.get(fullCopyURI), taskId, op);
        taskList.getTaskList().add(fullCopyVolumeTask);
      }

      // Invoke the controller.
      try {
        VPlexController controller =
            getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString());
        controller.detachFullCopy(sourceSystemURI, new ArrayList<URI>(fullCopyURIs), taskId);
      } catch (InternalException ie) {
        s_logger.error("Controller error", ie);

        // Update the status for the VPLEX volume copies and their
        // corresponding tasks.
        for (Volume vplexFullCopy : fullCopyMap.values()) {
          Operation op = vplexFullCopy.getOpStatus().get(taskId);
          if (op != null) {
            op.error(ie);
            vplexFullCopy.getOpStatus().updateTaskStatus(taskId, op);
            _dbClient.persistObject(vplexFullCopy);
            for (TaskResourceRep task : taskList.getTaskList()) {
              if (task.getResource().getId().equals(vplexFullCopy.getId())) {
                task.setState(op.getStatus());
                task.setMessage(op.getMessage());
                break;
              }
            }
          }
        }
      }
    }
    return taskList;
  }
コード例 #11
0
  /**
   * Places and prepares the HA volumes when copying a distributed VPLEX volume.
   *
   * @param name The base name for the volume.
   * @param copyCount The number of copies to be made.
   * @param size The size for the HA volume.
   * @param vplexSystem A reference to the VPLEX storage system.
   * @param vplexSystemProject A reference to the VPLEX system project.
   * @param srcVarray The virtual array for the VPLEX volume being copied.
   * @param srcHAVolume The HA volume of the VPLEX volume being copied.
   * @param taskId The task identifier.
   * @param volumeDescriptors The list of descriptors.
   * @return A list of the prepared HA volumes for the VPLEX volume copy.
   */
  private List<Volume> prepareFullCopyHAVolumes(
      String name,
      int copyCount,
      Long size,
      StorageSystem vplexSystem,
      Project vplexSystemProject,
      VirtualArray srcVarray,
      Volume srcHAVolume,
      String taskId,
      List<VolumeDescriptor> volumeDescriptors) {

    List<Volume> copyHAVolumes = new ArrayList<Volume>();

    // Get the storage placement recommendations for the volumes.
    // Placement must occur on the same VPLEX system
    Set<URI> vplexSystemURIS = new HashSet<URI>();
    vplexSystemURIS.add(vplexSystem.getId());
    VirtualArray haVarray =
        _dbClient.queryObject(VirtualArray.class, srcHAVolume.getVirtualArray());
    VirtualPool haVpool = _dbClient.queryObject(VirtualPool.class, srcHAVolume.getVirtualPool());
    VirtualPoolCapabilityValuesWrapper haCapabilities = new VirtualPoolCapabilityValuesWrapper();
    haCapabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, size);
    haCapabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, copyCount);
    VirtualPool vpool = BlockFullCopyUtils.queryFullCopySourceVPool(srcHAVolume, _dbClient);
    if (VirtualPool.ProvisioningType.Thin.toString()
        .equalsIgnoreCase(vpool.getSupportedProvisioningType())) {
      haCapabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, Boolean.TRUE);
      // To guarantee that storage pool for a copy has enough physical
      // space to contain current allocated capacity of thin source volume
      haCapabilities.put(
          VirtualPoolCapabilityValuesWrapper.THIN_VOLUME_PRE_ALLOCATE_SIZE,
          BlockFullCopyUtils.getAllocatedCapacityForFullCopySource(srcHAVolume, _dbClient));
    }
    List<Recommendation> recommendations =
        ((VPlexScheduler) _scheduler)
            .scheduleStorageForImport(
                srcVarray, vplexSystemURIS, haVarray, haVpool, haCapabilities);
    if (recommendations.isEmpty()) {
      throw APIException.badRequests.noStorageForHaVolumesForVplexVolumeCopies();
    }

    // Prepare the HA volumes for the VPLEX volume copy.
    int copyIndex = 1;
    for (Recommendation recommendation : recommendations) {
      VPlexRecommendation haRecommendation = (VPlexRecommendation) recommendation;
      for (int i = 0; i < haRecommendation.getResourceCount(); i++) {
        // Determine the name for the HA volume copy.
        StringBuilder nameBuilder = new StringBuilder(name);
        nameBuilder.append("-1");
        if (copyCount > 1) {
          nameBuilder.append("-");
          nameBuilder.append(copyIndex++);
        }

        // Prepare the volume.
        Volume volume =
            VPlexBlockServiceApiImpl.prepareVolumeForRequest(
                size,
                vplexSystemProject,
                haVarray,
                haVpool,
                haRecommendation.getSourceDevice(),
                haRecommendation.getSourcePool(),
                nameBuilder.toString(),
                null,
                taskId,
                _dbClient);
        volume.addInternalFlags(Flag.INTERNAL_OBJECT);
        _dbClient.persistObject(volume);
        copyHAVolumes.add(volume);

        // Create the volume descriptor and add it to the passed list.
        VolumeDescriptor volumeDescriptor =
            new VolumeDescriptor(
                VolumeDescriptor.Type.BLOCK_DATA,
                volume.getStorageController(),
                volume.getId(),
                volume.getPool(),
                haCapabilities);
        volumeDescriptors.add(volumeDescriptor);
      }
    }

    return copyHAVolumes;
  }