/** {@inheritDoc} */
  @Override
  public List<BlockObject> getAllSourceObjectsForFullCopyRequest(BlockObject fcSourceObj) {

    // Treats full copies of snapshots as is done in base class.
    if (URIUtil.isType(fcSourceObj.getId(), BlockSnapshot.class)) {
      return super.getAllSourceObjectsForFullCopyRequest(fcSourceObj);
    }

    // By default, if the passed volume is in a consistency group
    // all volumes in the consistency group should be copied.
    List<BlockObject> fcSourceObjList = new ArrayList<BlockObject>();
    URI cgURI = fcSourceObj.getConsistencyGroup();
    if (!NullColumnValueGetter.isNullURI(cgURI)) {
      BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, cgURI);
      // If there is no corresponding native CG for the VPLEX
      // CG, then this is a CG created prior to 2.2 and in this
      // case we want full copies treated like snapshots, which
      // is only create a copy of the passed object.
      if (!cg.checkForType(Types.LOCAL)) {
        fcSourceObjList.add(fcSourceObj);
      } else {
        fcSourceObjList.addAll(getActiveCGVolumes(cg));
      }
    } else {
      fcSourceObjList.add(fcSourceObj);
    }

    return fcSourceObjList;
  }
 @Override
 protected void updateBlockObjectNativeIds(
     BlockObject blockObject, UnManagedVolume unManagedVolume) {
   String label = unManagedVolume.getLabel();
   blockObject.setDeviceLabel(label);
   blockObject.setLabel(label);
   blockObject.setNativeId(blockObject.getNativeGuid());
 }
 public String getConsistencyGroupName(BlockObject bo) {
   if (bo.getConsistencyGroup() == null) {
     return null;
   }
   final BlockConsistencyGroup group =
       _dbClient.queryObject(BlockConsistencyGroup.class, bo.getConsistencyGroup());
   return getConsistencyGroupName(group, bo);
 }
  /*
   * (non-Javadoc)
   *
   * @see
   * com.emc.storageos.volumecontroller.BlockStorageDevice#doWaitForSynchronized
   * (java.lang.Class, com.emc.storageos.db.client.model.StorageSystem,
   * java.net.URI, com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void doWaitForSynchronized(
      Class<? extends BlockObject> clazz,
      StorageSystem storageObj,
      URI target,
      TaskCompleter completer) {
    log.info("START waitForSynchronized for {}", target);

    try {
      Volume targetObj = dbClient.queryObject(Volume.class, target);
      // Source could be either Volume or BlockSnapshot
      BlockObject sourceObj = BlockObject.fetch(dbClient, targetObj.getAssociatedSourceVolume());

      // We split the pair which causes the data to be synchronized.
      // When the split is complete that data is synchronized.
      HDSApiClient hdsApiClient =
          hdsApiFactory.getClient(
              HDSUtils.getHDSServerManagementServerInfo(storageObj),
              storageObj.getSmisUserName(),
              storageObj.getSmisPassword());
      HDSApiProtectionManager hdsApiProtectionManager = hdsApiClient.getHdsApiProtectionManager();
      String replicationGroupObjectID = hdsApiProtectionManager.getReplicationGroupObjectId();
      ReplicationInfo replicationInfo =
          hdsApiProtectionManager.getReplicationInfoFromSystem(
                  sourceObj.getNativeId(), targetObj.getNativeId())
              .first;
      hdsApiProtectionManager.modifyShadowImagePair(
          replicationGroupObjectID,
          replicationInfo.getObjectID(),
          HDSApiProtectionManager.ShadowImageOperationType.split);

      // Update state in case we are waiting for synchronization
      // after creation of a new full copy that was not created
      // inactive.
      String state = targetObj.getReplicaState();
      if (!ReplicationState.SYNCHRONIZED.name().equals(state)) {
        targetObj.setSyncActive(true);
        targetObj.setReplicaState(ReplicationState.SYNCHRONIZED.name());
        dbClient.persistObject(targetObj);
      }

      // Queue job to wait for replication status to move to split.
      ControllerServiceImpl.enqueueJob(
          new QueueJob(
              new HDSReplicationSyncJob(
                  storageObj.getId(),
                  sourceObj.getNativeId(),
                  targetObj.getNativeId(),
                  ReplicationStatus.SPLIT,
                  completer)));
    } catch (Exception e) {
      log.error("Exception occurred while waiting for synchronization", e);
      ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
      completer.error(dbClient, serviceError);
    }
    log.info("completed doWaitForSynchronized");
  }
  @Override
  public <T extends BlockObject> T ingestBlockObjects(
      IngestionRequestContext requestContext, Class<T> clazz) throws IngestionException {

    UnManagedVolume unManagedVolume = requestContext.getCurrentUnmanagedVolume();

    String volumeNativeGuid =
        unManagedVolume
            .getNativeGuid()
            .replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
    BlockObject blockObject =
        VolumeIngestionUtil.checkIfVolumeExistsInDB(volumeNativeGuid, _dbClient);

    // validate srdf blockObjects.
    validateUnManagedVolumeProperties(
        unManagedVolume,
        requestContext.getVarray(unManagedVolume),
        requestContext.getVpool(unManagedVolume),
        requestContext.getProject());
    // Check if ingested volume has exportmasks pending for ingestion.
    if (isExportIngestionPending(
        blockObject,
        unManagedVolume.getId(),
        requestContext.getVolumeContext().isVolumeExported())) {
      return clazz.cast(blockObject);
    }

    if (null == blockObject) {
      blockObject = super.ingestBlockObjects(requestContext, clazz);

      if (null == blockObject) {
        _logger.warn(
            "SRDF Volume ingestion failed for unmanagedVolume {}", unManagedVolume.getNativeGuid());
        throw IngestionException.exceptions.unmanagedVolumeMasksNotIngested(
            unManagedVolume.getNativeGuid(), "none.");
      }
    } else {
      // blockObject already ingested, now just update internalflags &
      // srdf relationships. Run this logic always when volume NO_PUBLIC_ACCESS
      if (markUnManagedVolumeInactive(requestContext, blockObject)) {
        _logger.info(
            "All the related replicas and parent of unManagedVolume {} has been ingested ",
            unManagedVolume.getNativeGuid());
        unManagedVolume.setInactive(true);
        requestContext.getUnManagedVolumesToBeDeleted().add(unManagedVolume);
      } else {
        _logger.info(
            "Not all the parent/replicas of unManagedVolume {} have been ingested , hence marking as internal",
            unManagedVolume.getNativeGuid());
        blockObject.addInternalFlags(INTERNAL_VOLUME_FLAGS);
      }
    }
    // Decorate blockobjects with SRDF Properties.
    decorateBlockObjectWithSRDFProperties(blockObject, unManagedVolume);

    return clazz.cast(blockObject);
  }
  /** {@inheritDoc} */
  @Override
  public void validateFullCopyCreateRequest(List<BlockObject> fcSourceObjList, int count) {
    // Call super first.
    super.validateFullCopyCreateRequest(fcSourceObjList, count);

    // For VMAX3 you cannot have active snap and full copy sessions,
    // so verify there are no active snapshots for the volume. Note
    // that we know the source is a volume, because full copies are
    // not allowed for vmax snapshots, which would have been caught
    // in the call to super.
    for (BlockObject fcSourceObj : fcSourceObjList) {
      BlockServiceUtils.validateVMAX3ActiveSnapSessionsExists(
          fcSourceObj.getId(), _dbClient, FULLCOPIES);
    }
  }
  /**
   * Verify the migration for BlockObjects. Ensure the consistencyGroups field has been collapsed
   * into the consistencyGroup field.
   *
   * @param blockObjects
   */
  private void verifyBlockObjects(List<BlockObject> blockObjects) {
    for (BlockObject blockObject : blockObjects) {
      log.info("Verifying BlockObject migration for " + blockObject.getLabel());

      // For RP+VPlex migrations, the BlockObjects will have a null consistencyGroups reference.
      // For non-RP+VPlex migrations, the BlockObjects will have an empty null consistencyGroups
      // reference.
      // Both conditions indicate the field is no longer being used
      Assert.assertTrue(
          "BlockObject.consistencyGroups field should be empty.",
          blockObject.getConsistencyGroups() == null
              || blockObject.getConsistencyGroups().isEmpty());
      Assert.assertNotNull(
          "BlockObject.consistencyGroup field should not be null.",
          blockObject.getConsistencyGroup());
    }
  }
  public String getConsistencyGroupName(final BlockConsistencyGroup group, BlockObject bo) {
    String groupName = null;

    if (group != null && bo != null) {
      groupName = group.getCgNameOnStorageSystem(bo.getStorageController());
    }

    return groupName;
  }
  @Override
  public boolean validate() throws Exception {
    log.info("Initiating volume validation of XtremIO ExportMask: " + id);
    try {
      XtremIOClient client =
          XtremIOProvUtils.getXtremIOClient(getDbClient(), storage, getClientFactory());
      String xioClusterName = client.getClusterDetails(storage.getSerialNumber()).getName();
      Set<String> knownVolumes = new HashSet<>();
      Set<String> igVols = new HashSet<>();
      // get the volumes in the IGs and validate against passed impacted block objects
      for (BlockObject maskVolume : blockObjects) {
        knownVolumes.add(maskVolume.getDeviceLabel());
      }

      List<XtremIOVolume> igVolumes = new ArrayList<>();
      for (String igName : igNames) {
        igVolumes.addAll(XtremIOProvUtils.getInitiatorGroupVolumes(igName, xioClusterName, client));
      }

      for (XtremIOVolume igVolume : igVolumes) {
        igVols.add(igVolume.getVolInfo().get(1));
      }

      log.info("ViPR known volumes present in IG: {}, volumes in IG: {}", knownVolumes, igVols);
      igVols.removeAll(knownVolumes);
      for (String igVol : igVols) {
        getLogger().logDiff(id, "volumes", ValidatorLogger.NO_MATCHING_ENTRY, igVol);
      }
    } catch (Exception ex) {
      log.error("Unexpected exception validating ExportMask volumes: " + ex.getMessage(), ex);
      if (getConfig().isValidationEnabled()) {
        throw DeviceControllerException.exceptions.unexpectedCondition(
            "Unexpected exception validating ExportMask volumes: " + ex.getMessage());
      }
    }

    checkForErrors();

    log.info("Completed volume validation of XtremIO ExportMask: " + id);

    return true;
  }
  /** {@inheritDoc} */
  @Override
  protected void verifyFullCopyRequestCount(BlockObject fcSourceObj, int count) {
    // Verify the requested copy count. You can only
    // have as many as is allowed by the source backend
    // volume.
    Volume fcSourceVolume = (Volume) fcSourceObj;
    Volume srcBackendVolume =
        VPlexUtil.getVPLEXBackendVolume(fcSourceVolume, true, _dbClient, true);
    // Verify if the source backend volume supports full copy
    URI systemURI = fcSourceObj.getStorageController();
    StorageSystem system = _dbClient.queryObject(StorageSystem.class, systemURI);
    int maxCount = Integer.MAX_VALUE;
    if (system != null) {
      maxCount = BlockFullCopyManager.getMaxFullCopiesForSystemType(system.getSystemType());
    }
    // If max count is 0, then the operation is not supported
    if (maxCount == 0) {
      throw APIException.badRequests.fullCopyNotSupportedByBackendSystem(fcSourceVolume.getId());
    }

    BlockFullCopyUtils.validateActiveFullCopyCount(srcBackendVolume, count, _dbClient);
  }
  /** {@inheritDoc} */
  @Override
  public void updateStatus(JobContext jobContext) throws Exception {
    JobStatus jobStatus = getJobStatus();
    CloseableIterator<CIMObjectPath> volumeIter = null;
    try {
      DbClient dbClient = jobContext.getDbClient();
      TaskCompleter completer = getTaskCompleter();
      BlockSnapshot snapshot = dbClient.queryObject(BlockSnapshot.class, _snapshotURI);
      if (jobStatus == JobStatus.IN_PROGRESS) {
        return;
      }
      if (jobStatus == JobStatus.SUCCESS) {
        s_logger.info(
            "Post-processing successful link snapshot session target {} for task {}",
            snapshot.getId(),
            completer.getOpId());
        // Get the snapshot session to which the target is being linked.
        BlockSnapshotSession snapSession =
            dbClient.queryObject(BlockSnapshotSession.class, completer.getId());

        // Get the snapshot device ID and set it against the BlockSnapshot object.
        BlockObject sourceObj = BlockObject.fetch(dbClient, snapshot.getParent().getURI());
        CIMConnectionFactory cimConnectionFactory = jobContext.getCimConnectionFactory();
        WBEMClient client = getWBEMClient(dbClient, cimConnectionFactory);
        volumeIter =
            client.associatorNames(getCimJob(), null, SmisConstants.CIM_STORAGE_VOLUME, null, null);
        while (volumeIter.hasNext()) {
          // Get the sync volume native device id
          CIMObjectPath volumePath = volumeIter.next();
          s_logger.info("volumePath: {}", volumePath.toString());
          CIMInstance volume = client.getInstance(volumePath, false, false, null);
          String volumeDeviceId =
              volumePath.getKey(SmisConstants.CP_DEVICE_ID).getValue().toString();
          s_logger.info("volumeDeviceId: {}", volumeDeviceId);
          if (volumeDeviceId.equals(sourceObj.getNativeId())) {
            // Don't want the source, we want the linked target.
            continue;
          }
          String volumeElementName =
              CIMPropertyFactory.getPropertyValue(volume, SmisConstants.CP_ELEMENT_NAME);
          s_logger.info("volumeElementName: {}", volumeElementName);
          String volumeWWN = CIMPropertyFactory.getPropertyValue(volume, SmisConstants.CP_WWN_NAME);
          s_logger.info("volumeWWN: {}", volumeWWN);
          String volumeAltName = CIMPropertyFactory.getPropertyValue(volume, SmisConstants.CP_NAME);
          s_logger.info("volumeAltName: {}", volumeAltName);
          StorageSystem system = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
          snapshot.setNativeId(volumeDeviceId);
          snapshot.setNativeGuid(NativeGUIDGenerator.generateNativeGuid(system, snapshot));
          snapshot.setDeviceLabel(volumeElementName);
          snapshot.setInactive(false);
          snapshot.setIsSyncActive(Boolean.TRUE);
          snapshot.setCreationTime(Calendar.getInstance());
          snapshot.setWWN(volumeWWN.toUpperCase());
          snapshot.setAlternateName(volumeAltName);
          snapshot.setSettingsInstance(snapSession.getSessionInstance());
          commonSnapshotUpdate(
              snapshot,
              volume,
              client,
              system,
              sourceObj.getNativeId(),
              volumeDeviceId,
              false,
              dbClient);
          s_logger.info(
              String.format(
                  "For target volume path %1$s, going to set blocksnapshot %2$s nativeId to %3$s (%4$s). Associated volume is %5$s (%6$s)",
                  volumePath.toString(),
                  snapshot.getId().toString(),
                  volumeDeviceId,
                  volumeElementName,
                  sourceObj.getNativeId(),
                  sourceObj.getDeviceLabel()));
          dbClient.updateObject(snapshot);
        }
      } else if (jobStatus == JobStatus.FAILED || jobStatus == JobStatus.FATAL_ERROR) {
        s_logger.info(
            "Failed to link snapshot session target {} for task {}",
            snapshot.getId(),
            completer.getOpId());
        snapshot.setInactive(true);
        dbClient.updateObject(snapshot);
      }
    } catch (Exception e) {
      setPostProcessingErrorStatus(
          "Encountered an internal error in link snapshot session target job status processing: "
              + e.getMessage());
      s_logger.error(
          "Encountered an internal error in link snapshot session target job status processing", e);
    } finally {
      if (volumeIter != null) {
        volumeIter.close();
      }
      super.updateStatus(jobContext);
    }
  }
  /*
   * (non-Javadoc)
   *
   * @see com.emc.storageos.volumecontroller.CloneOperations#createSingleClone(
   * com.emc.storageos.db.client.model.StorageSystem, java.net.URI, java.net.URI,
   * java.lang.Boolean,
   * com.emc.storageos.volumecontroller.TaskCompleter)
   */
  @Override
  public void createSingleClone(
      StorageSystem storageSystem,
      URI sourceObject,
      URI cloneVolume,
      Boolean createInactive,
      TaskCompleter taskCompleter) {
    log.info("START createSingleClone operation");
    boolean isVolumeClone = true;
    try {
      BlockObject sourceObj = BlockObject.fetch(dbClient, sourceObject);
      URI tenantUri = null;
      if (sourceObj
          instanceof BlockSnapshot) { // In case of snapshot, get the tenant from its parent volume
        NamedURI parentVolUri = ((BlockSnapshot) sourceObj).getParent();
        Volume parentVolume = dbClient.queryObject(Volume.class, parentVolUri);
        tenantUri = parentVolume.getTenant().getURI();
        isVolumeClone = false;
      } else { // This is a default flow
        tenantUri = ((Volume) sourceObj).getTenant().getURI();
        isVolumeClone = true;
      }

      Volume cloneObj = dbClient.queryObject(Volume.class, cloneVolume);
      StoragePool targetPool = dbClient.queryObject(StoragePool.class, cloneObj.getPool());
      TenantOrg tenantOrg = dbClient.queryObject(TenantOrg.class, tenantUri);
      // String cloneLabel = generateLabel(tenantOrg, cloneObj);

      CinderEndPointInfo ep =
          CinderUtils.getCinderEndPoint(storageSystem.getActiveProviderURI(), dbClient);
      log.info(
          "Getting the cinder APi for the provider with id "
              + storageSystem.getActiveProviderURI());
      CinderApi cinderApi = cinderApiFactory.getApi(storageSystem.getActiveProviderURI(), ep);

      String volumeId = "";
      if (isVolumeClone) {
        volumeId =
            cinderApi.cloneVolume(
                cloneObj.getLabel(),
                (cloneObj.getCapacity() / (1024 * 1024 * 1024)),
                targetPool.getNativeId(),
                sourceObj.getNativeId());
      } else {
        volumeId =
            cinderApi.createVolumeFromSnapshot(
                cloneObj.getLabel(),
                (cloneObj.getCapacity() / (1024 * 1024 * 1024)),
                targetPool.getNativeId(),
                sourceObj.getNativeId());
      }

      log.debug("Creating volume with the id " + volumeId + " on Openstack cinder node");
      if (volumeId != null) {
        Map<String, URI> volumeIds = new HashMap<String, URI>();
        volumeIds.put(volumeId, cloneObj.getId());
        ControllerServiceImpl.enqueueJob(
            new QueueJob(
                new CinderSingleVolumeCreateJob(
                    volumeId,
                    cloneObj.getLabel(),
                    storageSystem.getId(),
                    CinderConstants.ComponentType.volume.name(),
                    ep,
                    taskCompleter,
                    targetPool.getId(),
                    volumeIds)));
      }
    } catch (InternalException e) {
      String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume);
      log.error(errorMsg, e);
      taskCompleter.error(dbClient, e);
    } catch (Exception e) {
      String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume);
      log.error(errorMsg, e);
      ServiceError serviceError =
          DeviceControllerErrors.cinder.operationFailed("createSingleClone", e.getMessage());
      taskCompleter.error(dbClient, serviceError);
    }
  }
  /**
   * Routine contains logic to create an export mask on the array
   *
   * @param workflow - Workflow object to create steps against
   * @param previousStep - [optional] Identifier of workflow step to wait for
   * @param device - BlockStorageDevice implementation
   * @param storage - StorageSystem object representing the underlying array
   * @param exportGroup - ExportGroup object representing Bourne-level masking
   * @param initiatorURIs - List of Initiator URIs
   * @param volumeMap - Map of Volume URIs to requested Integer HLUs
   * @param zoningStepNeeded - Not required ofr HDS
   * @param token - Identifier for the operation
   * @throws Exception
   */
  public boolean determineExportGroupCreateSteps(
      Workflow workflow,
      String previousStep,
      BlockStorageDevice device,
      StorageSystem storage,
      ExportGroup exportGroup,
      List<URI> initiatorURIs,
      Map<URI, Integer> volumeMap,
      boolean zoningStepNeeded,
      String token)
      throws Exception {
    Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
    List<URI> volumeURIs = new ArrayList<URI>();
    volumeURIs.addAll(volumeMap.keySet());
    Map<URI, URI> hostToExistingExportMaskMap = new HashMap<URI, URI>();
    List<URI> hostURIs = new ArrayList<URI>();
    List<String> portNames = new ArrayList<String>();
    List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
    // Populate the port WWN/IQNs (portNames) and the
    // mapping of the WWN/IQNs to Initiator URIs
    processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);

    // We always want to have the full list of initiators for the hosts involved in
    // this export. This will allow the export operation to always find any
    // existing exports for a given host.
    queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);

    // Find the export masks that are associated with any or all the ports in
    // portNames. We will have to do processing differently based on whether
    // or there is an existing ExportMasks.
    Map<String, Set<URI>> matchingExportMaskURIs =
        device.findExportMasks(storage, portNames, false);
    if (matchingExportMaskURIs.isEmpty()) {

      _log.info(
          String.format(
              "No existing mask found w/ initiators { %s }", Joiner.on(",").join(portNames)));

      createNewExportMaskWorkflowForInitiators(
          initiatorURIs, exportGroup, workflow, volumeMap, storage, token, previousStep);
    } else {
      _log.info(
          String.format(
              "Mask(s) found w/ initiators {%s}. "
                  + "MatchingExportMaskURIs {%s}, portNameToInitiators {%s}",
              Joiner.on(",").join(portNames),
              Joiner.on(",").join(matchingExportMaskURIs.values()),
              Joiner.on(",").join(portNameToInitiatorURI.entrySet())));
      // There are some initiators that already exist. We need to create a
      // workflow that create new masking containers or updates masking
      // containers as necessary.

      // These data structures will be used to track new initiators - ones
      // that don't already exist on the array
      List<URI> initiatorURIsCopy = new ArrayList<URI>();
      initiatorURIsCopy.addAll(initiatorURIs);

      // This loop will determine a list of volumes to update per export mask
      Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes =
          new HashMap<URI, Map<URI, Integer>>();
      Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators =
          new HashMap<URI, Set<Initiator>>();
      for (Map.Entry<String, Set<URI>> entry : matchingExportMaskURIs.entrySet()) {
        URI initiatorURI = portNameToInitiatorURI.get(entry.getKey());
        Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
        // Keep track of those initiators that have been found to exist already
        // in some export mask on the array
        initiatorURIsCopy.remove(initiatorURI);
        // Get a list of the ExportMasks that were matched to the initiator
        List<URI> exportMaskURIs = new ArrayList<URI>();
        exportMaskURIs.addAll(entry.getValue());
        List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
        _log.info(
            String.format(
                "initiator %s masks {%s}",
                initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
        for (ExportMask mask : masks) {
          // ExportMask is created using non-vipr. Set the mask name if it doesn't have.
          if (null == mask.getMaskName()) {
            String maskName =
                ExportMaskUtils.getMaskName(_dbClient, initiators, exportGroup, storage);
            _log.info("Generated mask name: {}", maskName);
            mask.setMaskName(maskName);
          }

          // Check for NO_VIPR.  If found, avoid this mask.
          if (mask.getMaskName() != null
              && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
            _log.info(
                String.format(
                    "ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it",
                    mask.getMaskName(), ExportUtils.NO_VIPR));
            continue;
          }

          _log.info(
              String.format(
                  "mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
          if (mask.getCreatedBySystem()) {
            _log.info(
                String.format(
                    "initiator %s is in persisted mask %s",
                    initiator.getInitiatorPort(), mask.getMaskName()));

            // We're still OK if the mask contains ONLY initiators that can be found
            // in our export group, because we would simply add to them.
            if (mask.getInitiators() != null) {
              for (String existingMaskInitiatorStr : mask.getInitiators()) {

                // Now look at it from a different angle.  Which one of our export group initiators
                // are NOT in the current mask?  And if so, if it belongs to the same host as an
                // existing one,
                // we should add it to this mask.
                Iterator<URI> initiatorIter = initiatorURIsCopy.iterator();
                while (initiatorIter.hasNext()) {
                  Initiator initiatorCopy =
                      _dbClient.queryObject(Initiator.class, initiatorIter.next());

                  if (initiatorCopy != null
                      && initiatorCopy.getId() != null
                      && !mask.hasInitiator(initiatorCopy.getId().toString())) {
                    Initiator existingMaskInitiator =
                        _dbClient.queryObject(
                            Initiator.class, URI.create(existingMaskInitiatorStr));
                    if (existingMaskInitiator != null
                        && initiatorCopy.getHost() != null
                        && initiatorCopy.getHost().equals(existingMaskInitiator.getHost())) {
                      // Add to the list of initiators we need to add to this mask
                      Set<Initiator> existingMaskInitiators =
                          existingMasksToUpdateWithNewInitiators.get(mask.getId());
                      if (existingMaskInitiators == null) {
                        existingMaskInitiators = new HashSet<Initiator>();
                        existingMasksToUpdateWithNewInitiators.put(
                            mask.getId(), existingMaskInitiators);
                      }
                      existingMaskInitiators.add(initiatorCopy);
                      initiatorIter
                          .remove(); // remove this from the list of initiators we'll make a new
                      // mask from
                    }
                  }
                }
              }
            }
          } else {
            // Insert this initiator into the mask's list of initiators managed by the system.
            // This will get persisted below.
            mask.addInitiator(initiator);
            if (!NullColumnValueGetter.isNullURI(initiator.getHost())) {
              hostToExistingExportMaskMap.put(initiator.getHost(), mask.getId());
            }
          }

          // We need to see if the volume also exists the mask,
          // if it doesn't then we'll add it to the list of volumes to add.
          for (URI boURI : volumeURIs) {
            BlockObject bo = BlockObject.fetch(_dbClient, boURI);
            if (!mask.hasExistingVolume(bo)) {
              _log.info(
                  String.format(
                      "volume %s is not in mask %s", bo.getNativeGuid(), mask.getMaskName()));
              // The volume doesn't exist, so we have to add it to
              // the masking container.
              Map<URI, Integer> newVolumes = existingMasksToUpdateWithNewVolumes.get(mask.getId());
              if (newVolumes == null) {
                newVolumes = new HashMap<URI, Integer>();
                existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumes);
              }
              // Check if the requested HLU for the volume is
              // already taken by a pre-existing volume.
              Integer requestedHLU = volumeMap.get(bo.getId());
              StringMap existingVolumesInMask = mask.getExistingVolumes();
              if (existingVolumesInMask != null
                  && existingVolumesInMask.containsValue(requestedHLU.toString())) {
                ExportOrchestrationTask completer =
                    new ExportOrchestrationTask(exportGroup.getId(), token);
                ServiceError serviceError =
                    DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU(
                        boURI.toString(), requestedHLU.toString());
                completer.error(_dbClient, serviceError);
                return false;
              }
              newVolumes.put(bo.getId(), requestedHLU);
              mask.addToUserCreatedVolumes(bo);
            }
          }

          // Update the list of volumes and initiators for the mask
          Map<URI, Integer> volumeMapForExistingMask =
              existingMasksToUpdateWithNewVolumes.get(mask.getId());
          if (volumeMapForExistingMask != null && !volumeMapForExistingMask.isEmpty()) {
            mask.addVolumes(volumeMapForExistingMask);
          }

          Set<Initiator> initiatorSetForExistingMask =
              existingMasksToUpdateWithNewInitiators.get(mask.getId());
          if (initiatorSetForExistingMask != null && initiatorSetForExistingMask.isEmpty()) {
            mask.addInitiators(initiatorSetForExistingMask);
          }

          updateZoningMap(exportGroup, mask);
          _dbClient.updateAndReindexObject(mask);
          // TODO: All export group modifications should be moved to completers
          exportGroup.addExportMask(mask.getId());
          _dbClient.updateAndReindexObject(exportGroup);
        }
      }

      // The initiatorURIsCopy was used in the foreach initiator loop to see
      // which initiators already exist in a mask. If it is non-empty,
      // then it means there are initiators that are new,
      // so let's add them to the main tracker
      Map<URI, List<URI>> hostInitiatorMap = new HashMap<URI, List<URI>>();
      if (!initiatorURIsCopy.isEmpty()) {
        for (URI newExportMaskInitiator : initiatorURIsCopy) {

          Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
          List<URI> initiatorSet = hostInitiatorMap.get(initiator.getHost());
          if (initiatorSet == null) {
            initiatorSet = new ArrayList<URI>();
            hostInitiatorMap.put(initiator.getHost(), initiatorSet);
          }
          initiatorSet.add(initiator.getId());

          _log.info(
              String.format(
                  "host = %s, "
                      + "initiators to add: %d, "
                      + "existingMasksToUpdateWithNewVolumes.size = %d",
                  initiator.getHost(),
                  hostInitiatorMap.get(initiator.getHost()).size(),
                  existingMasksToUpdateWithNewVolumes.size()));
        }
      }

      _log.info(
          String.format(
              "existingMasksToUpdateWithNewVolumes.size = %d",
              existingMasksToUpdateWithNewVolumes.size()));

      // At this point we have the necessary data structures populated to
      // determine the workflow steps. We are going to create new masks
      // and/or add volumes to existing masks.
      if (!hostInitiatorMap.isEmpty()) {
        for (URI hostID : hostInitiatorMap.keySet()) {
          // Check if there is an existing mask (created outside of ViPR) for
          // the host. If there is we will need to add these intiators
          // associated with that host to the list
          if (hostToExistingExportMaskMap.containsKey(hostID)) {
            URI existingExportMaskURI = hostToExistingExportMaskMap.get(hostID);
            Set<Initiator> toAddInits = new HashSet<Initiator>();
            List<URI> hostInitaitorList = hostInitiatorMap.get(hostID);
            for (URI initURI : hostInitaitorList) {
              Initiator initiator = _dbClient.queryObject(Initiator.class, initURI);
              if (!initiator.getInactive()) {
                toAddInits.add(initiator);
              }
            }
            _log.info(
                String.format(
                    "Need to add new initiators to existing mask %s, %s",
                    existingExportMaskURI.toString(), Joiner.on(',').join(hostInitaitorList)));
            existingMasksToUpdateWithNewInitiators.put(existingExportMaskURI, toAddInits);
            continue;
          }
          // We have some brand new initiators, let's add them to new masks
          _log.info(
              String.format(
                  "new export masks %s", Joiner.on(",").join(hostInitiatorMap.get(hostID))));

          generateExportMaskCreateWorkflow(
              workflow,
              previousStep,
              storage,
              exportGroup,
              hostInitiatorMap.get(hostID),
              volumeMap,
              token);
        }
      }

      Map<URI, String> stepMap = new HashMap<URI, String>();
      for (Map.Entry<URI, Map<URI, Integer>> entry :
          existingMasksToUpdateWithNewVolumes.entrySet()) {
        ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
        Map<URI, Integer> volumesToAdd = entry.getValue();
        _log.info(
            String.format(
                "adding these volumes %s to mask %s",
                Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
        stepMap.put(
            entry.getKey(),
            generateExportMaskAddVolumesWorkflow(
                workflow, null, storage, exportGroup, mask, volumesToAdd));
      }

      for (Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
        ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
        Set<Initiator> initiatorsToAdd = entry.getValue();
        List<URI> initiatorsURIs = new ArrayList<URI>();
        for (Initiator initiator : initiatorsToAdd) {
          initiatorsURIs.add(initiator.getId());
        }
        _log.info(
            String.format(
                "adding these initiators %s to mask %s",
                Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
        previousStep =
            stepMap.get(entry.getKey()) == null ? previousStep : stepMap.get(entry.getKey());
        generateExportMaskAddInitiatorsWorkflow(
            workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, null, token);
      }
    }
    return true;
  }
  /** {@inheritDoc} */
  @Override
  public TaskList create(
      List<BlockObject> fcSourceObjList,
      VirtualArray varray,
      String name,
      boolean createInactive,
      int count,
      String taskId) {

    // Populate the descriptors list with all volumes required
    // to create the VPLEX volume copies.
    int sourceCounter = 0;
    URI vplexSrcSystemId = null;
    TaskList taskList = new TaskList();
    List<Volume> vplexCopyVolumes = new ArrayList<Volume>();
    List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>();
    List<BlockObject> sortedSourceObjectList = sortFullCopySourceList(fcSourceObjList);
    for (BlockObject fcSourceObj : sortedSourceObjectList) {
      URI fcSourceURI = fcSourceObj.getId();
      if (URIUtil.isType(fcSourceURI, BlockSnapshot.class)) {
        // Full copy of snapshots is not supported for VPLEX.
        return super.create(sortedSourceObjectList, varray, name, createInactive, count, taskId);
      }

      Volume vplexSrcVolume = (Volume) fcSourceObj;
      String copyName = name + (sortedSourceObjectList.size() > 1 ? "-" + ++sourceCounter : "");

      // Create a volume descriptor for the source VPLEX volume being copied.
      // and add it to the descriptors list. Be sure to identify this VPLEX
      // volume as the source volume being copied.
      vplexSrcSystemId = fcSourceObj.getStorageController();
      VolumeDescriptor vplexSrcVolumeDescr =
          new VolumeDescriptor(
              VolumeDescriptor.Type.VPLEX_VIRT_VOLUME, vplexSrcSystemId, fcSourceURI, null, null);
      Map<String, Object> descrParams = new HashMap<String, Object>();
      descrParams.put(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID, Boolean.TRUE);
      vplexSrcVolumeDescr.setParameters(descrParams);
      volumeDescriptors.add(vplexSrcVolumeDescr);

      // Get some info about the VPLEX volume being copied and its storage
      // system.
      Project vplexSrcProject =
          BlockFullCopyUtils.queryFullCopySourceProject(fcSourceObj, _dbClient);
      StorageSystem vplexSrcSystem = _dbClient.queryObject(StorageSystem.class, vplexSrcSystemId);
      Project vplexSystemProject =
          VPlexBlockServiceApiImpl.getVplexProject(vplexSrcSystem, _dbClient, _tenantsService);

      // For the VPLEX volume being copied, determine which of the associated
      // backend volumes is the primary and, for distributed volumes, which
      // is the HA volume. The primary volume will be natively copied and we
      // we need to place and prepare a volume to hold the copy. This copy
      // will be the primary backend volume for the VPLEX volume copy. For
      // a distributed virtual volume, we will need to place and prepare
      // a volume to hold the HA volume of the VPLEX volume copy.
      Volume vplexSrcPrimaryVolume = null;
      Volume vplexSrcHAVolume = null;
      StringSet assocVolumeURIs = vplexSrcVolume.getAssociatedVolumes();
      Iterator<String> assocVolumeURIsIter = assocVolumeURIs.iterator();
      while (assocVolumeURIsIter.hasNext()) {
        URI assocVolumeURI = URI.create(assocVolumeURIsIter.next());
        Volume assocVolume = _dbClient.queryObject(Volume.class, assocVolumeURI);
        if (assocVolume.getVirtualArray().toString().equals(varray.getId().toString())) {
          vplexSrcPrimaryVolume = assocVolume;
        } else {
          vplexSrcHAVolume = assocVolume;
        }
      }

      // Get the capabilities
      VirtualPool vpool = BlockFullCopyUtils.queryFullCopySourceVPool(fcSourceObj, _dbClient);
      VirtualPoolCapabilityValuesWrapper capabilities =
          getCapabilitiesForFullCopyCreate(fcSourceObj, vpool, count);

      // Get the number of copies to create and the size of the volumes.
      // Note that for the size, we must use the actual provisioned size
      // of the source side backend volume. The size passed in the
      // capabilities will be the size of the VPLEX volume. When the
      // source side backend volume for the copy is provisioned, you
      // might not get that actual size. On VMAX, the size will be slightly
      // larger while for VNX the size will be exactly what is requested.
      // So, if the source side is a VMAX, the source side for the copy
      // will be slightly larger than the size in the capabilities. If the HA
      // side is VNX and we use the size in the capabilities, then you will
      // get exactly that size for the HA backend volume. As a result, source
      // side backend volume for the copy will be slightly larger than the
      // HA side. Now the way a VPLEX copy is made is it uses native full
      // copy to create a native full copy of the source side backend
      // volume. It then provisions the HA side volume. The new source side
      // backend copy is then imported into VPLEX in the same way as is done
      // for a vpool change that imports a volume to VPLEX. This code in the
      // VPLEX controller creates a local VPLEX volume using the source side
      // copy and for a distributed volume it then attaches as a remote
      // mirror the HA backend volume that is provisioned. If the HA volume
      // is slightly smaller, then this will fail on the VPLEX. So, we must
      // ensure that HA side volume is big enough by using the provisioned
      // capacity of the source side backend volume of the VPLEX volume being
      // copied.
      long size = vplexSrcPrimaryVolume.getProvisionedCapacity();

      // Place and prepare a volume for each copy to serve as a native
      // copy of a VPLEX backend volume. The VPLEX backend volume that
      // is copied is the backend volume in the same virtual array as the
      // VPLEX volume i.e, the primary backend volume. Create
      // descriptors for these prepared volumes and add them to the list.
      List<Volume> vplexCopyPrimaryVolumes =
          prepareFullCopyPrimaryVolumes(
              copyName, count, vplexSrcPrimaryVolume, capabilities, volumeDescriptors);

      // If the VPLEX volume being copied is distributed, then the VPLEX
      // HA volume should be non-null. We use the VPLEX scheduler to place
      // and then prepare volumes for the HA volumes of the VPLEX volume
      // copies. This should be done in the same manner as is done for the
      // import volume routine. This is because to form the VPLEX volume
      // copy we import the copy of the primary backend volume.
      List<Volume> vplexCopyHAVolumes = new ArrayList<Volume>();
      if (vplexSrcHAVolume != null) {
        vplexCopyHAVolumes.addAll(
            prepareFullCopyHAVolumes(
                copyName,
                count,
                size,
                vplexSrcSystem,
                vplexSystemProject,
                varray,
                vplexSrcHAVolume,
                taskId,
                volumeDescriptors));
      }

      // For each copy to be created, place and prepare a volume for the
      // primary backend volume copy. When copying a distributed VPLEX
      // volume, we also must place and prepare a volume for the HA
      // backend volume copy. Lastly, we must prepare a volume for the
      // VPLEX volume copy. Create descriptors for these prepared volumes
      // and add them to the volume descriptors list.
      for (int i = 0; i < count; i++) {
        // Prepare a new VPLEX volume for each copy.
        Volume vplexCopyPrimaryVolume = vplexCopyPrimaryVolumes.get(i);
        Volume vplexCopyHAVolume = null;
        if (vplexCopyHAVolumes.size() != 0) {
          vplexCopyHAVolume = vplexCopyHAVolumes.get(i);
        }
        Volume vplexCopyVolume =
            prepareFullCopyVPlexVolume(
                copyName,
                count,
                i,
                size,
                vplexSrcVolume,
                vplexSrcProject,
                varray,
                vpool,
                vplexSrcSystemId,
                vplexCopyPrimaryVolume,
                vplexCopyHAVolume,
                taskId,
                volumeDescriptors);
        vplexCopyVolumes.add(vplexCopyVolume);

        // Create task for each copy.
        Operation op = vplexCopyVolume.getOpStatus().get(taskId);
        TaskResourceRep task = toTask(vplexCopyVolume, taskId, op);
        taskList.getTaskList().add(task);
      }
    }

    // Invoke the VPLEX controller to create the copies.
    try {
      s_logger.info("Getting VPlex controller {}.", taskId);
      VPlexController controller =
          getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString());
      // TBD controller needs to be updated to handle CGs.
      controller.createFullCopy(vplexSrcSystemId, volumeDescriptors, taskId);
      s_logger.info("Successfully invoked controller.");
    } catch (InternalException e) {
      s_logger.error("Controller error", e);

      // Update the status for the VPLEX volume copies and their
      // corresponding tasks.
      for (Volume vplexCopyVolume : vplexCopyVolumes) {
        Operation op = vplexCopyVolume.getOpStatus().get(taskId);
        if (op != null) {
          op.error(e);
          vplexCopyVolume.getOpStatus().updateTaskStatus(taskId, op);
          _dbClient.persistObject(vplexCopyVolume);
          for (TaskResourceRep task : taskList.getTaskList()) {
            if (task.getResource().getId().equals(vplexCopyVolume.getId())) {
              task.setState(op.getStatus());
              task.setMessage(op.getMessage());
              break;
            }
          }
        }
      }

      // Mark all volumes inactive, except for the VPLEX volume
      // we were trying to copy.
      for (VolumeDescriptor descriptor : volumeDescriptors) {
        if (descriptor.getParameters().get(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID) == null) {
          Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI());
          volume.setInactive(true);
          _dbClient.persistObject(volume);
        }
      }
    }

    return taskList;
  }