/**
   * Verify the migration of BlockConsistencyGroups. Ensure the type and deviceName fields have been
   * migrated to the types and deviceNames fields.
   *
   * @param consistencyGroup
   * @param types The types that should have been properly migrated.
   * @throws Exception
   */
  private void verifyConsistencyGroupMigration(
      BlockConsistencyGroup consistencyGroup, String... types) throws Exception {
    log.info("Verifying BlockConsistencyGroup migration for " + consistencyGroup.getLabel());

    // Verify that the primary CG now has the VPlex type added to its types list is and that
    // the type field null
    Assert.assertTrue(
        "The BlockConsistencyGroup.type field should be null.",
        consistencyGroup.getType().equals(NullColumnValueGetter.getNullStr()));

    for (String type : types) {
      Assert.assertNotNull(
          "The " + type + " BlockConsistencyGroup.types field should be populated.",
          consistencyGroup.getTypes());
      Assert.assertTrue(
          "The BlockConsistencyGroup.types field for "
              + consistencyGroup.getLabel()
              + " should contain "
              + type,
          consistencyGroup.getTypes().contains(type));

      // Verify that the primary CG now has the RP and VPlex device names added to its list
      Assert.assertTrue(
          "The local array BlockConsistencyGroup.deviceName field should be null.",
          consistencyGroup.getDeviceName().equals(NullColumnValueGetter.getNullStr()));
    }
  }
  /**
   * This method cleans up UnManaged Volumes in DB, which had been deleted manually from the Array
   * 1. Get All UnManagedVolumes from DB 2. Store URIs of unmanaged volumes returned from the
   * Provider in unManagedVolumesBookKeepingList. 3. If unmanaged volume is found only in DB, but
   * not in unManagedVolumesBookKeepingList, then set unmanaged volume to inactive.
   *
   * <p>DB | Provider
   *
   * <p>x,y,z | y,z.a [a --> new entry has been added but indexes didn't get added yet into DB]
   *
   * <p>x--> will be set to inactive
   *
   * @param storagePoolUri
   * @throws IOException
   */
  private void performStorageUnManagedVolumeBookKeeping(URI storagePoolUri) throws IOException {
    @SuppressWarnings("deprecation")
    List<URI> unManagedVolumesInDB =
        _dbClient.queryByConstraint(
            ContainmentConstraint.Factory.getPoolUnManagedVolumeConstraint(storagePoolUri));

    Set<URI> unManagedVolumesInDBSet = new HashSet<URI>(unManagedVolumesInDB);
    SetView<URI> onlyAvailableinDB =
        Sets.difference(unManagedVolumesInDBSet, unManagedVolumesReturnedFromProvider);

    _logger.info("Diff :" + Joiner.on("\t").join(onlyAvailableinDB));
    if (onlyAvailableinDB.size() > 0) {
      List<UnManagedVolume> unManagedVolumeTobeDeleted = new ArrayList<UnManagedVolume>();
      Iterator<UnManagedVolume> unManagedVolumes =
          _dbClient.queryIterativeObjects(
              UnManagedVolume.class, new ArrayList<URI>(onlyAvailableinDB));

      while (unManagedVolumes.hasNext()) {
        UnManagedVolume volume = unManagedVolumes.next();
        if (null == volume || volume.getInactive()) {
          continue;
        }

        _logger.info("Setting unManagedVolume {} inactive", volume.getId());
        volume.setStoragePoolUri(NullColumnValueGetter.getNullURI());
        volume.setStorageSystemUri(NullColumnValueGetter.getNullURI());
        volume.setInactive(true);
        unManagedVolumeTobeDeleted.add(volume);
      }
      if (unManagedVolumeTobeDeleted.size() > 0) {
        _partitionManager.updateAndReIndexInBatches(
            unManagedVolumeTobeDeleted, 1000, _dbClient, "UnManagedVolume");
      }
    }
  }
  /**
   * This method cleans up UnManaged Volumes in DB, which had been deleted manually from the Array
   * 1. Get All UnManagedVolumes from DB 2. Store URIs of unmanaged volumes returned from the
   * Provider in unManagedVolumesBookKeepingList. 3. If unmanaged volume is found only in DB, but
   * not in unManagedVolumesBookKeepingList, then set unmanaged volume to inactive.
   *
   * <p>DB | Provider
   *
   * <p>x,y,z | y,z.a [a --> new entry has been added but indexes didn't get added yet into DB]
   *
   * <p>x--> will be set to inactive
   *
   * @param storageSystem
   * @param discoveredUnManagedVolumes
   * @param dbClient
   * @param partitionManager
   */
  public static void markInActiveUnManagedVolumes(
      StorageSystem storageSystem,
      Set<URI> discoveredUnManagedVolumes,
      DbClient dbClient,
      PartitionManager partitionManager) {

    _log.info(
        " -- Processing {} discovered UnManaged Volumes Objects from -- {}",
        discoveredUnManagedVolumes.size(),
        storageSystem.getLabel());
    if (discoveredUnManagedVolumes.isEmpty()) {
      return;
    }
    // Get all available existing unmanaged Volume URIs for this array from DB
    URIQueryResultList allAvailableUnManagedVolumesInDB = new URIQueryResultList();
    dbClient.queryByConstraint(
        ContainmentConstraint.Factory.getStorageDeviceUnManagedVolumeConstraint(
            storageSystem.getId()),
        allAvailableUnManagedVolumesInDB);

    Set<URI> unManagedVolumesInDBSet = new HashSet<URI>();
    Iterator<URI> allAvailableUnManagedVolumesItr = allAvailableUnManagedVolumesInDB.iterator();
    while (allAvailableUnManagedVolumesItr.hasNext()) {
      unManagedVolumesInDBSet.add(allAvailableUnManagedVolumesItr.next());
    }

    SetView<URI> onlyAvailableinDB =
        Sets.difference(unManagedVolumesInDBSet, discoveredUnManagedVolumes);

    _log.info("Diff :" + Joiner.on("\t").join(onlyAvailableinDB));
    if (!onlyAvailableinDB.isEmpty()) {
      List<UnManagedVolume> unManagedVolumeTobeDeleted = new ArrayList<UnManagedVolume>();
      Iterator<UnManagedVolume> unManagedVolumes =
          dbClient.queryIterativeObjects(
              UnManagedVolume.class, new ArrayList<URI>(onlyAvailableinDB));

      while (unManagedVolumes.hasNext()) {
        UnManagedVolume volume = unManagedVolumes.next();
        if (null == volume || volume.getInactive()) {
          continue;
        }

        _log.info("Setting unManagedVolume {} inactive", volume.getId());
        volume.setStoragePoolUri(NullColumnValueGetter.getNullURI());
        volume.setStorageSystemUri(NullColumnValueGetter.getNullURI());
        volume.setInactive(true);
        unManagedVolumeTobeDeleted.add(volume);
      }
      if (!unManagedVolumeTobeDeleted.isEmpty()) {
        partitionManager.updateAndReIndexInBatches(
            unManagedVolumeTobeDeleted, 1000, dbClient, UNMANAGED_VOLUME);
      }
    }
  }
  /**
   * Lists the id and name of all the vCenters that belong to the given tenant organization if the
   * requesting user is a SysAdmin or SecAdmin. If the requested user is a TenantAdmin and user is a
   * tenant of the "tenant" given in the query param then all the vCenters of the tenant query param
   * will be returned otherwise returned only the vCenters that the user's tenant shares.
   *
   * @param tid tenant to filter the vCenters. "No-Filter" or "null" indicates, listing all the
   *     vCenters in the system. "Not-Assigned" indicates, list all the vCenters with no tenants
   *     assigned to it.
   * @return a list of vCenters that belong to the tenant organization.
   * @throws DatabaseException when a DB error occurs
   */
  @GET
  @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
  public VcenterList listVcenters(@QueryParam("tenant") final URI tid) throws DatabaseException {
    VcenterList list = new VcenterList();
    List<Vcenter> vcenters = null;

    if (isSecurityAdmin() || isSystemAdmin()) {
      _log.debug("Fetching vCenters for {}", tid);
      if (NullColumnValueGetter.isNullURI(tid)
          || Vcenter.NO_TENANT_SELECTOR.equalsIgnoreCase(tid.toString())) {
        vcenters = getDataObjects(Vcenter.class);
        list.setVcenters(
            map(
                ResourceTypeEnum.VCENTER,
                getNamedElementsList(Vcenter.class, DATAOBJECT_NAME_FIELD, vcenters)));
      } else if (Vcenter.TENANT_SELECTOR_FOR_UNASSIGNED.equalsIgnoreCase(tid.toString())) {
        vcenters = getDataObjects(Vcenter.class);
        list.setVcenters(
            map(
                ResourceTypeEnum.VCENTER,
                getNamedElementsWithNoAcls(Vcenter.class, DATAOBJECT_NAME_FIELD, vcenters)));
      } else {
        ArgValidator.checkEntity(_dbClient.queryObject(tid), tid, isIdEmbeddedInURL(tid));
        list.setVcenters(
            map(
                ResourceTypeEnum.VCENTER,
                listChildrenWithAcls(tid, Vcenter.class, DATAOBJECT_NAME_FIELD)));
      }
      return list;
    }

    vcenters = getDataObjects(Vcenter.class);
    if (!CollectionUtils.isEmpty(vcenters)) {
      List<Vcenter> tenantVcenterList = null;
      if (shouldTenantAdminUseTenantParam(tid)) {
        // If the tenant admin can use the tid query param, then the filtering should
        // happen based on the tenant query param.
        tenantVcenterList = filterVcentersByTenant(vcenters, tid);
      } else {
        // Get the vCenters based on the User's tenant org. If the user is not a tenant admin,
        // insufficient
        // permission exception will be thrown.
        tenantVcenterList = filterVcentersByTenant(vcenters, NullColumnValueGetter.getNullURI());
      }
      list.setVcenters(
          map(
              ResourceTypeEnum.VCENTER,
              getNamedElementsList(Vcenter.class, DATAOBJECT_NAME_FIELD, tenantVcenterList)));
    }
    return list;
  }
  @Override
  protected void complete(DbClient dbClient, Status status, ServiceCoded coded)
      throws DeviceControllerException {
    if (isNotifyWorkflow()) {
      // If there is a workflow, update the step to complete.
      updateWorkflowStatus(status, coded);
    }

    // if export updates were successful, remove all old initiators and deleted hosts
    if (status.equals(Status.ready)) {
      for (HostStateChange hostChange : changes) {
        for (URI initiatorId : hostChange.getOldInitiators()) {
          Initiator initiator = dbClient.queryObject(Initiator.class, initiatorId);
          dbClient.markForDeletion(initiator);
          _logger.info("Initiator marked for deletion: " + this.getId());
        }
      }

      for (URI hostId : deletedHosts) {
        Host host = dbClient.queryObject(Host.class, hostId);
        // don't delete host if it was provisioned by Vipr
        if (!NullColumnValueGetter.isNullURI(host.getComputeElement())) {
          _logger.info(
              "do not delete provisioned host {} - disassociate it from vcenter", host.getLabel());
          host.setVcenterDataCenter(NullColumnValueGetter.getNullURI());
          dbClient.persistObject(host);
        } else {
          ComputeSystemHelper.doDeactivateHost(dbClient, host);
          _logger.info("Deactivating Host: " + host.getId());
        }
      }

      for (URI clusterId : deletedClusters) {
        Cluster cluster = dbClient.queryObject(Cluster.class, clusterId);
        List<URI> clusterHosts =
            ComputeSystemHelper.getChildrenUris(dbClient, clusterId, Host.class, "cluster");
        // don't delete cluster if all hosts weren't deleted (ex: hosts provisioned by ViPR)
        if (!clusterHosts.isEmpty()) {
          _logger.info(
              "do not delete cluster {} - it still has hosts - disassociate it from vcenter",
              cluster.getLabel());
          cluster.setVcenterDataCenter(NullColumnValueGetter.getNullURI());
          cluster.setExternalId(NullColumnValueGetter.getNullStr());
          dbClient.persistObject(cluster);
        } else {
          ComputeSystemHelper.doDeactivateCluster(dbClient, cluster);
          _logger.info("Deactivating Cluster: " + cluster.getId());
        }
      }
    }
  }
  /**
   * Adds the tenant to the vCenter acls if the tenant admin is creating it. This always sets the
   * vCenter tenant (the old deprecated filed to null).
   *
   * @param tenant a valid tenant org if the tenant admin is creating it.
   * @param vcenter the vCenter being created.
   */
  private void addVcenterAclIfTenantAdmin(TenantOrg tenant, Vcenter vcenter) {
    // Always set the deprecated tenant field of a vCenter to null.
    vcenter.setTenant(NullColumnValueGetter.getNullURI());

    if (isSystemAdmin()) {
      return;
    }

    URI tenantId;
    if (tenant != null) {
      tenantId = tenant.getId();
    } else {
      // If the tenant org is not valid, try to use the
      // user's tenant org.
      tenantId = URI.create(getUserFromContext().getTenantId());
    }

    // If the User is an admin in the tenant org, allow the
    // operation otherwise, report the insufficient permission
    // exception.
    if (_permissionsHelper.userHasGivenRole(getUserFromContext(), tenantId, Role.TENANT_ADMIN)) {
      // Generate the acl entry and add to the vCenters acls.
      String aclKey = _permissionsHelper.getTenantUsePermissionKey(tenantId.toString());
      vcenter.addAcl(aclKey, ACL.USE.name());
      _log.debug("Adding {} to the vCenter {} acls", aclKey, vcenter.getLabel());
    }
  }
  /**
   * Filters the vCenters by the tenant. If the provided tenant is null or the tenant does not share
   * the vCenter than the vCenters are filtered with the user's tenant.
   *
   * @param vcenters to be filtered by the tenant.
   * @param tenantId to be used for filtering the vCenter.
   * @return the list of vCenters that belong to the tenantId or the user's tenant org.
   */
  private List<Vcenter> filterVcentersByTenant(List<Vcenter> vcenters, URI tenantId) {
    List<Vcenter> tenantVcenterList = new ArrayList<Vcenter>();
    Iterator<Vcenter> vcenterIt = vcenters.iterator();
    while (vcenterIt.hasNext()) {
      Vcenter vcenter = vcenterIt.next();
      if (vcenter == null) {
        continue;
      }

      Set<URI> tenantUris = _permissionsHelper.getUsageURIsFromAcls(vcenter.getAcls());
      if (CollectionUtils.isEmpty(tenantUris)) {
        continue;
      }

      if (!NullColumnValueGetter.isNullURI(tenantId) && !tenantUris.contains(tenantId)) {
        // The tenantId is not a null URI and it is not available in the vCenter acls,
        // so, dont add to the filtered list.
        continue;
      }

      Iterator<URI> tenantUriIt = tenantUris.iterator();
      while (tenantUriIt.hasNext()) {
        if (verifyAuthorizedInTenantOrg(tenantUriIt.next())) {
          tenantVcenterList.add(vcenter);
        }
      }
    }
    return tenantVcenterList;
  }
 /**
  * Creates a new vCenter data center.
  *
  * @param id the URN of the parent vCenter
  * @param createParam the details of the data center
  * @prereq none
  * @brief Create vCenter data center
  * @return the details of the vCenter data center, including its id and link, when creation
  *     completes successfully.
  * @throws DatabaseException when a database error occurs.
  */
 @POST
 @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
 @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
 @CheckPermission(roles = {Role.SYSTEM_ADMIN, Role.TENANT_ADMIN})
 @Path("/{id}/vcenter-data-centers")
 public VcenterDataCenterRestRep createVcenterDataCenter(
     @PathParam("id") URI id, VcenterDataCenterCreate createParam) throws DatabaseException {
   Vcenter vcenter = queryObject(Vcenter.class, id, false);
   checkDuplicateChildName(
       id,
       VcenterDataCenter.class,
       DATAOBJECT_NAME_FIELD,
       "vcenter",
       createParam.getName(),
       _dbClient);
   VcenterDataCenter datacenter = new VcenterDataCenter();
   datacenter.setId(URIUtil.createId(VcenterDataCenter.class));
   datacenter.setLabel(createParam.getName());
   datacenter.setVcenter(id);
   if (vcenter.getTenantCreated()) {
     datacenter.setTenant(_permissionsHelper.getTenant(vcenter.getAcls()));
   } else {
     datacenter.setTenant(NullColumnValueGetter.getNullURI());
   }
   _dbClient.createObject(datacenter);
   auditOp(OperationTypeEnum.CREATE_VCENTER_DATACENTER, true, null, datacenter.auditParameters());
   return map(datacenter);
 }
  /**
   * Add the application to the volume applicationIds attribute
   *
   * @param voluri The volume that will be updated
   * @param dbClient
   */
  protected void addApplicationToVolume(URI voluri, DbClient dbClient) {
    Volume volume = dbClient.queryObject(Volume.class, voluri);
    StringSet applications = volume.getVolumeGroupIds();
    if (applications == null) {
      applications = new StringSet();
    }
    applications.add(getId().toString());
    volume.setVolumeGroupIds(applications);
    dbClient.updateObject(volume);

    // handle clones
    StringSet fullCopies = volume.getFullCopies();
    List<Volume> fullCopiesToUpdate = new ArrayList<Volume>();
    if (fullCopies != null && !fullCopies.isEmpty()) {
      for (String fullCopyId : fullCopies) {
        Volume fullCopy = dbClient.queryObject(Volume.class, URI.create(fullCopyId));
        if (fullCopy != null && NullColumnValueGetter.isNullValue(fullCopy.getFullCopySetName())) {
          fullCopy.setFullCopySetName(fullCopy.getReplicationGroupInstance());
          fullCopiesToUpdate.add(fullCopy);
        }
      }
    }

    if (!fullCopiesToUpdate.isEmpty()) {
      dbClient.updateObject(fullCopiesToUpdate);
    }
  }
 @Override
 public void process() {
   DbClient dbClient = getDbClient();
   List<URI> volumeURIs = dbClient.queryByType(Volume.class, false);
   Iterator<Volume> volumesIter = dbClient.queryIterativeObjects(Volume.class, volumeURIs);
   while (volumesIter.hasNext()) {
     Volume volume = volumesIter.next();
     URI systemURI = volume.getStorageController();
     if (!NullColumnValueGetter.isNullURI(systemURI)) {
       StorageSystem system = dbClient.queryObject(StorageSystem.class, systemURI);
       if ((system != null)
           && (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType()))) {
         // This is a VPLEX volume. If not already set,
         // set the protocols to FC.
         StringSet protocols = volume.getProtocol();
         if (protocols == null) {
           protocols = new StringSet();
           protocols.add(StorageProtocol.Block.FC.name());
           volume.setProtocol(protocols);
           dbClient.persistObject(volume);
         }
       }
     }
   }
 }
 /**
  * Prepare the ViPR BlockSnapshotSession instance for the pass BlockSnapshot instance.
  *
  * @param snapshot A reference to the snapshot.
  * @return A reference to the newly created snapshot session.
  */
 private BlockSnapshotSession prepareSnapshotSession(BlockSnapshot snapshot) {
   s_logger.info("Prepare BlockSnapshotSession for snapshot {}", snapshot.getId());
   BlockSnapshotSession snapshotSession = new BlockSnapshotSession();
   URI snapSessionURI = URIUtil.createId(BlockSnapshotSession.class);
   snapshotSession.setId(snapSessionURI);
   snapshotSession.setSessionLabel(getSessionLabelFromSettingsInstance(snapshot));
   URI cgURI = snapshot.getConsistencyGroup();
   if (NullColumnValueGetter.isNullURI(cgURI)) {
     snapshotSession.setParent(snapshot.getParent());
     snapshotSession.setLabel(snapshot.getLabel());
   } else {
     snapshotSession.setConsistencyGroup(cgURI);
     snapshotSession.setLabel(snapshot.getSnapsetLabel());
     Volume parent = getDbClient().queryObject(Volume.class, snapshot.getParent());
     if (parent != null) {
       snapshotSession.setReplicationGroupInstance(parent.getReplicationGroupInstance());
       snapshotSession.setSessionSetName(parent.getReplicationGroupInstance());
     }
   }
   snapshotSession.setProject(snapshot.getProject());
   snapshotSession.setStorageController(snapshot.getStorageController());
   snapshotSession.setSessionInstance(snapshot.getSettingsInstance());
   StringSet linkedTargets = new StringSet();
   linkedTargets.add(snapshot.getId().toString());
   snapshotSession.setLinkedTargets(linkedTargets);
   return snapshotSession;
 }
  /** {@inheritDoc} */
  @Override
  public List<BlockObject> getAllSourceObjectsForFullCopyRequest(BlockObject fcSourceObj) {

    // Treats full copies of snapshots as is done in base class.
    if (URIUtil.isType(fcSourceObj.getId(), BlockSnapshot.class)) {
      return super.getAllSourceObjectsForFullCopyRequest(fcSourceObj);
    }

    // By default, if the passed volume is in a consistency group
    // all volumes in the consistency group should be copied.
    List<BlockObject> fcSourceObjList = new ArrayList<BlockObject>();
    URI cgURI = fcSourceObj.getConsistencyGroup();
    if (!NullColumnValueGetter.isNullURI(cgURI)) {
      BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, cgURI);
      // If there is no corresponding native CG for the VPLEX
      // CG, then this is a CG created prior to 2.2 and in this
      // case we want full copies treated like snapshots, which
      // is only create a copy of the passed object.
      if (!cg.checkForType(Types.LOCAL)) {
        fcSourceObjList.add(fcSourceObj);
      } else {
        fcSourceObjList.addAll(getActiveCGVolumes(cg));
      }
    } else {
      fcSourceObjList.add(fcSourceObj);
    }

    return fcSourceObjList;
  }
Ejemplo n.º 13
0
 private static void addNoneTenantOption(String id, List<TenantOrgRestRep> vCenterTenantOptions) {
   VcenterRestRep vcenterRestRep = VCenterUtils.getVCenter(uri(id));
   if (vcenterRestRep != null && !vcenterRestRep.getCascadeTenancy()) {
     TenantOrgRestRep noneTenantOption = new TenantOrgRestRep();
     noneTenantOption.setName("None");
     noneTenantOption.setId(NullColumnValueGetter.getNullURI());
     vCenterTenantOptions.add(noneTenantOption);
   }
 }
 /**
  * Checks if the tenant admin can use the "tenant" query param while listing the vCenter and
  * vCenter data centers. Tenant admin can list if the "tenant" query param is not equals to
  * "No-Filter" or "Not-Assigned" or the tenant admin is a tenant admin of the tenant represented
  * by the "tenant" query param.
  *
  * @param tid "tenant" query param to be validated if the tenant admin can use that or not
  * @return true if tenant admin can use the "tenant" query param to filter the vCenters or vCenter
  *     data centers and false otherwise.
  */
 private boolean shouldTenantAdminUseTenantParam(URI tid) {
   if (!NullColumnValueGetter.isNullURI(tid)
       && !TenantResource.TENANT_SELECTOR_FOR_UNASSIGNED.equalsIgnoreCase(tid.toString())
       && !TenantResource.NO_TENANT_SELECTOR.equalsIgnoreCase(tid.toString())
       && _permissionsHelper.userHasGivenRole(getUserFromContext(), tid, Role.TENANT_ADMIN)) {
     return true;
   }
   return false;
 }
 /*
  * (non-Javadoc)
  *
  * @see com.emc.storageos.volumecontroller.CloneOperations#detachSingleClone(
  * com.emc.storageos.db.client.model.StorageSystem,
  * java.net.URI,
  * com.emc.storageos.volumecontroller.TaskCompleter)
  */
 @Override
 public void detachSingleClone(
     StorageSystem storageSystem, URI cloneVolume, TaskCompleter taskCompleter) {
   // Not Supported
   Volume clone = dbClient.queryObject(Volume.class, cloneVolume);
   clone.setAssociatedSourceVolume(NullColumnValueGetter.getNullURI());
   clone.setReplicaState(ReplicationState.DETACHED.name());
   dbClient.persistObject(clone);
   taskCompleter.ready(dbClient);
 }
  /** {@inheritDoc} */
  @Override
  public void process() throws MigrationCallbackException {
    s_logger.info("Executing BlockSnapshotSession migration callback.");
    try {
      DbClient dbClient = getDbClient();
      List<BlockSnapshotSession> snapshotSessions = new ArrayList<BlockSnapshotSession>();
      Map<URI, Map<String, BlockSnapshotSession>> groupSessionMap = new HashMap<>();
      List<URI> snapshotURIs = dbClient.queryByType(BlockSnapshot.class, true);
      Iterator<BlockSnapshot> snapshotsIter =
          dbClient.queryIterativeObjects(BlockSnapshot.class, snapshotURIs, true);
      while (snapshotsIter.hasNext()) {
        BlockSnapshot snapshot = snapshotsIter.next();
        if (isSnapshotSessionSupported(snapshot)) {
          // Check if this is a group snapshot.
          URI cgURI = snapshot.getConsistencyGroup();
          if (NullColumnValueGetter.isNullURI(cgURI)) {
            // The storage system for the single volume snapshot supports
            // snapshot sessions, then we need to prepare and create a
            // snapshot session for that snapshot and add the snapshot as
            // a linked target for the session.
            BlockSnapshotSession snapshotSession = prepareSnapshotSession(snapshot);
            snapshotSessions.add(snapshotSession);
          } else {
            // Create the group session if necessary and add the snapshot as a
            // linked target for that group session.
            String settingsInstance = snapshot.getSettingsInstance();
            Map<String, BlockSnapshotSession> grpSnapshotSessions = groupSessionMap.get(cgURI);
            if (grpSnapshotSessions != null) {
              BlockSnapshotSession snapshotSession = grpSnapshotSessions.get(settingsInstance);
              if (snapshotSession == null) {
                snapshotSession = prepareSnapshotSession(snapshot);
                grpSnapshotSessions.put(settingsInstance, snapshotSession);
                snapshotSessions.add(snapshotSession);
              } else {
                StringSet linkedTargets = snapshotSession.getLinkedTargets();
                linkedTargets.add(snapshot.getId().toString());
              }
            } else {
              grpSnapshotSessions = new HashMap<String, BlockSnapshotSession>();
              groupSessionMap.put(cgURI, grpSnapshotSessions);
              BlockSnapshotSession snapshotSession = prepareSnapshotSession(snapshot);
              grpSnapshotSessions.put(settingsInstance, snapshotSession);
              snapshotSessions.add(snapshotSession);
            }
          }
        }
      }

      if (!snapshotSessions.isEmpty()) {
        dbClient.createObject(snapshotSessions);
      }
    } catch (Exception e) {
      s_logger.error("Caught exception during BlockSnapshotSession migration", e);
    }
  }
  private void verifyEmptyConsistencyGroupMigration() throws Exception {
    log.info("Verifying empty/unused BlockConsistencyGroup.");
    BlockConsistencyGroup emptyCg = _dbClient.queryObject(BlockConsistencyGroup.class, emptyCgURI);

    // Verify that the primary CG now has the VPlex type added to its types list is null
    Assert.assertTrue(
        "The empty BlockConsistencyGroup.type field should be null.",
        emptyCg.getType().equals(NullColumnValueGetter.getNullStr()));

    Assert.assertTrue(
        "The BlockConsistencyGroup.types field should be null.", emptyCg.getTypes().isEmpty());
  }
 @Override
 protected Volume.VolumeAccessState getVolumeAccessStateForSuccess(Volume v) {
   if (!NullColumnValueGetter.isNullValue(v.getSrdfCopyMode())
       && Mode.ACTIVE.equals(Mode.valueOf(v.getSrdfCopyMode()))
       && v.getPersonality().equals(Volume.PersonalityTypes.TARGET.toString())) {
     // For Active mode target access state is always updated from the provider
     // after each operation so just use that.
     return Volume.VolumeAccessState.getVolumeAccessState(v.getAccessState());
   } else {
     return Volume.VolumeAccessState.READWRITE;
   }
 }
  public static void markInActiveUnManagedExportMask(
      URI storageSystemUri,
      Set<URI> discoveredUnManagedExportMasks,
      DbClient dbClient,
      PartitionManager partitionManager) {

    URIQueryResultList result = new URIQueryResultList();
    dbClient.queryByConstraint(
        ContainmentConstraint.Factory.getStorageSystemUnManagedExportMaskConstraint(
            storageSystemUri),
        result);
    Set<URI> allMasksInDatabase = new HashSet<URI>();
    Iterator<URI> it = result.iterator();
    while (it.hasNext()) {
      allMasksInDatabase.add(it.next());
    }

    SetView<URI> onlyAvailableinDB =
        Sets.difference(allMasksInDatabase, discoveredUnManagedExportMasks);

    if (!onlyAvailableinDB.isEmpty()) {
      _log.info(
          "these UnManagedExportMasks are orphaned and will be cleaned up:"
              + Joiner.on("\t").join(onlyAvailableinDB));

      List<UnManagedExportMask> unManagedExportMasksToBeDeleted =
          new ArrayList<UnManagedExportMask>();
      Iterator<UnManagedExportMask> unManagedExportMasks =
          dbClient.queryIterativeObjects(
              UnManagedExportMask.class, new ArrayList<URI>(onlyAvailableinDB));

      while (unManagedExportMasks.hasNext()) {

        UnManagedExportMask uem = unManagedExportMasks.next();
        if (null == uem || uem.getInactive()) {
          continue;
        }

        _log.info("Setting UnManagedExportMask {} inactive", uem.getMaskingViewPath());
        uem.setStorageSystemUri(NullColumnValueGetter.getNullURI());
        uem.setInactive(true);
        unManagedExportMasksToBeDeleted.add(uem);
      }
      if (!unManagedExportMasksToBeDeleted.isEmpty()) {
        partitionManager.updateAndReIndexInBatches(
            unManagedExportMasksToBeDeleted,
            Constants.DEFAULT_PARTITION_SIZE,
            dbClient,
            UNMANAGED_EXPORT_MASK);
      }
    }
  }
Ejemplo n.º 20
0
  private static void renderTenantOptions() {
    if (TenantUtils.canReadAllTenantsForVcenters() && VCenterUtils.canUpdateACLs()) {
      List<StringOption> tenantOptions = dataObjectOptions(await(new TenantsCall().asPromise()));
      renderArgs.put("tenantOptions", tenantOptions);

      List<StringOption> tenantOptionsWithNone = new ArrayList<StringOption>();

      tenantOptionsWithNone.add(
          new StringOption(NullColumnValueGetter.getNullStr().toString(), "None"));
      tenantOptionsWithNone.addAll(tenantOptions);
      renderArgs.put("tenantOptionsWithNone", tenantOptionsWithNone);
    }
  }
  /**
   * Generates workflow step to remove volumes from ExportMask.
   *
   * @param workflow
   * @param previousStep
   * @param exportGroup
   * @param exportMask
   * @param completer
   * @return
   */
  public String generateWorkflowStepToToRemoveVolumesFromExportMask(
      Workflow workflow,
      String previousStep,
      ExportGroup exportGroup,
      ExportMask exportMask,
      List<URI> volumesToRemove,
      ExportTaskCompleter completer) {
    URI exportGroupURI = exportGroup.getId();

    String stepId = workflow.createStepId();
    ExportTaskCompleter exportTaskCompleter;
    if (completer != null) {
      exportTaskCompleter = completer;
      exportTaskCompleter.setOpId(stepId);
    } else {
      exportTaskCompleter =
          new ExportMaskRemoveVolumeCompleter(
              exportGroupURI, exportMask.getId(), volumesToRemove, stepId);
    }

    Workflow.Method removeVolumesFromExportMaskExecuteMethod =
        new Workflow.Method(
            "doExportGroupToCleanVolumesInExportMask",
            exportGroupURI,
            exportMask.getId(),
            volumesToRemove,
            exportTaskCompleter);

    stepId =
        workflow.createStep(
            EXPORT_MASK_CLEANUP_TASK,
            String.format(
                "ExportMask to removeVolumes %s (%s)",
                exportMask.getMaskName(), exportMask.getId().toString()),
            previousStep,
            NullColumnValueGetter.getNullURI(),
            "storage-system",
            MaskingWorkflowEntryPoints.class,
            removeVolumesFromExportMaskExecuteMethod,
            null,
            stepId);

    return stepId;
  }
  /**
   * Generates workflow step to Mark ExportMask inActive.
   *
   * @param workflow
   * @param previousStep
   * @param exportGroup
   * @param exportMask
   * @param completer
   * @return
   */
  public String generateWorkflowStepToMarkExportMaskInActive(
      Workflow workflow,
      String previousStep,
      ExportGroup exportGroup,
      ExportMask exportMask,
      ExportTaskCompleter completer) {
    URI exportGroupURI = exportGroup.getId();

    String stepId = workflow.createStepId();
    ExportTaskCompleter exportTaskCompleter;
    if (completer != null) {
      exportTaskCompleter = completer;
      exportTaskCompleter.setOpId(stepId);
    } else {
      exportTaskCompleter =
          new ExportMaskDeleteCompleter(exportGroupURI, exportMask.getId(), stepId);
    }

    Workflow.Method markExportMaskInActiveExecuteMethod =
        new Workflow.Method(
            "doExportGroupToCleanExportMask",
            exportGroupURI,
            exportMask.getId(),
            exportTaskCompleter);

    stepId =
        workflow.createStep(
            EXPORT_MASK_CLEANUP_TASK,
            String.format(
                "Marking exportmasks to inactive %s (%s)",
                exportMask.getMaskName(), exportMask.getId().toString()),
            previousStep,
            NullColumnValueGetter.getNullURI(),
            "storage-system",
            MaskingWorkflowEntryPoints.class,
            markExportMaskInActiveExecuteMethod,
            null,
            stepId);

    return stepId;
  }
Ejemplo n.º 23
0
    public ACLAssignmentChanges getAclAssignmentChanges() {
      Set<String> tenantIds = Sets.newHashSet();

      if (this.cascadeTenancy) {
        if (StringUtils.isNotBlank(this.tenant)
            && !this.tenant.equalsIgnoreCase(NullColumnValueGetter.getNullStr().toString())) {
          tenantIds.add(this.tenant);
        }
      } else if (!CollectionUtils.isEmpty(this.tenants)) {
        tenantIds.addAll(this.tenants);
      }

      List<ACLEntry> existingAcls = new ArrayList<ACLEntry>();
      if (StringUtils.isNotBlank(this.id)) {
        existingAcls = VCenterUtils.getAcl(URI.create(this.id));
      }
      ACLUpdateBuilder builder = new ACLUpdateBuilder(existingAcls);
      builder.setTenants(tenantIds);

      return builder.getACLUpdate();
    }
Ejemplo n.º 24
0
  public static void editVcenterDataCenter(String vcenterDataCenterId, String tenant) {
    VcenterDataCenterRestRep vcenterDataCenter =
        VcenterDataCenterUtils.getDataCenter(uri(vcenterDataCenterId));
    if (vcenterDataCenter != null) {
      try {
        URI tenantId = NullColumnValueGetter.getNullURI();
        if (StringUtils.isNotBlank(tenant)) {
          tenantId = uri(tenant);
        }

        VcenterDataCenterUtils.updateDataCenter(uri(vcenterDataCenterId), tenantId);
        list();
      } catch (Exception e) {
        flash.error(MessagesUtils.get("validation.vcenter.messageAndError", e.getMessage()));
        Common.handleError();
      }
    } else {
      flash.error(MessagesUtils.get(UNKNOWN, vcenterDataCenterId));
      list();
    }
  }
  /**
   * Updates the tenant information in the Task data object and TaskResourceRep (the response object
   * to the API request). Both Task and TaskResourceRep is updated with the user's tenant
   * information if it they don't contain any tenant information already.
   *
   * @param taskResourceRep api response to be updated.
   */
  private void updateTaskTenant(TaskResourceRep taskResourceRep) {
    Task task = _dbClient.queryObject(Task.class, taskResourceRep.getId());
    if (areEqual(task.getTenant(), NullColumnValueGetter.getNullURI())) {
      StorageOSUser user = getUserFromContext();
      URI userTenantUri = URI.create(user.getTenantId());
      task.setTenant(userTenantUri);

      RelatedResourceRep tenant = new RelatedResourceRep();
      tenant.setId(userTenantUri);
      tenant.setLink(new RestLinkRep("self", URI.create("/tenants/" + userTenantUri.toString())));

      taskResourceRep.setTenant(tenant);
      _dbClient.persistObject(task);

      List<String> traceParams = new ArrayList<String>();
      traceParams.add(task.getId().toString());
      traceParams.add(user.getName());
      traceParams.add(user.getTenantId());

      _log.info("Update the task {} with the user's {} tenant {}", traceParams);
    }
  }
  public static URI newLink(ResourceTypeEnum res, URI resource_id) {
    try {
      if (resource_id == null) {
        return new URI("/");
      }

      if (res == null) {
        return new URI("/" + resource_id);
      }

      if (res == ResourceTypeEnum.STORAGE_POOL
          || res == ResourceTypeEnum.STORAGE_PORT
          || res == ResourceTypeEnum.BLOCK_MIRROR
          || res == ResourceTypeEnum.RDF_GROUP
          || res == ResourceTypeEnum.VPLEX_MIRROR) {
        URI link = _linkCache.get(resource_id);
        if (link == null) {
          DataObject resource =
              _dbClient.queryObject(ResourceTypeMapping.getDataObjectClass(res), resource_id);
          if (resource != null) {
            link = newLink(resource);
            _linkCache.put(resource_id, link);
          } else {
            link = NullColumnValueGetter.getNullURI();
          }
        }
        return link;
      } else {
        return simpleServiceLink(res, resource_id);
      }
    } catch (URISyntaxException ex) {
      return null; // impossible;
    } catch (DatabaseException ex) {
      return null;
    }
  }
  /**
   * Filter vpools from the qualified list. rpSource true: Filter out anything other than RP source
   * vpools rpSource false: Filter out RP and SRDF source vpools
   *
   * @param dbClient dbclient
   * @param unManagedVolume unmanaged volume
   * @param personality SOURCE, TARGET, or METADATA
   */
  private void filterProtectedVpools(
      DbClient dbClient, UnManagedVolume unManagedVolume, String personality) {

    if (unManagedVolume.getSupportedVpoolUris() != null
        && !unManagedVolume.getSupportedVpoolUris().isEmpty()) {
      Iterator<VirtualPool> vpoolItr =
          dbClient.queryIterativeObjects(
              VirtualPool.class, URIUtil.toURIList(unManagedVolume.getSupportedVpoolUris()));
      while (vpoolItr.hasNext()) {
        boolean remove = false;
        VirtualPool vpool = vpoolItr.next();

        // If this is an SRDF source vpool, we can filter out since we're dealing with an RP volume
        if (vpool.getProtectionRemoteCopySettings() != null) {
          remove = true;
        }

        // If this is not an RP source, the vpool should be filtered out if:
        // The vpool is an RP vpool (has settings) and target vpools are non-null
        if (vpool.getProtectionVarraySettings() != null
            && ((Volume.PersonalityTypes.TARGET.name().equalsIgnoreCase(personality))
                || Volume.PersonalityTypes.METADATA.name().equalsIgnoreCase(personality))) {
          boolean foundEmptyTargetVpool = false;
          Map<URI, VpoolProtectionVarraySettings> settings =
              VirtualPool.getProtectionSettings(vpool, dbClient);
          for (Map.Entry<URI, VpoolProtectionVarraySettings> setting : settings.entrySet()) {
            if (NullColumnValueGetter.isNullURI(setting.getValue().getVirtualPool())) {
              foundEmptyTargetVpool = true;
              break;
            }
          }

          // If this is a journal volume, also check the journal vpools. If they're not set, we
          // cannot filter out this vpool.
          if (Volume.PersonalityTypes.METADATA.name().equalsIgnoreCase(personality)
              && (NullColumnValueGetter.isNullValue(vpool.getJournalVpool())
                  || NullColumnValueGetter.isNullValue(vpool.getStandbyJournalVpool()))) {
            foundEmptyTargetVpool = true;
          }

          // If every relevant target (and journal for journal volumes) vpool is filled-in, then
          // you would never assign your target volume to this source vpool, so filter it out.
          if (!foundEmptyTargetVpool) {
            remove = true;
          }
        }

        if (Volume.PersonalityTypes.SOURCE.name().equalsIgnoreCase(personality)) {
          if (!VirtualPool.vPoolSpecifiesProtection(vpool)) {
            // If this an RP source, the vpool must be an RP vpool
            remove = true;
          } else if (unManagedVolume
                  .getVolumeInformation()
                  .containsKey(SupportedVolumeInformation.RP_STANDBY_INTERNAL_SITENAME.toString())
              && !VirtualPool.vPoolSpecifiesMetroPoint(vpool)) {
            // Since this is a Source volume with the presence of RP_STANDBY_INTERNAL_SITENAME
            // it indicates that this volume is MetroPoint, if we get here, this is vpool
            // must be filtered out since it's not MP.
            remove = true;
          }
        }

        if (remove) {
          log.info(
              "Removing virtual pool "
                  + vpool.getLabel()
                  + " from supported vpools for unmanaged volume: "
                  + unManagedVolume.getLabel());
          unManagedVolume.getSupportedVpoolUris().remove(vpool.getId().toString());
        }
      }
    }
  }
  /**
   * Routine contains logic to create an export mask on the array
   *
   * @param workflow - Workflow object to create steps against
   * @param previousStep - [optional] Identifier of workflow step to wait for
   * @param device - BlockStorageDevice implementation
   * @param storage - StorageSystem object representing the underlying array
   * @param exportGroup - ExportGroup object representing Bourne-level masking
   * @param initiatorURIs - List of Initiator URIs
   * @param volumeMap - Map of Volume URIs to requested Integer HLUs
   * @param zoningStepNeeded - Not required ofr HDS
   * @param token - Identifier for the operation
   * @throws Exception
   */
  public boolean determineExportGroupCreateSteps(
      Workflow workflow,
      String previousStep,
      BlockStorageDevice device,
      StorageSystem storage,
      ExportGroup exportGroup,
      List<URI> initiatorURIs,
      Map<URI, Integer> volumeMap,
      boolean zoningStepNeeded,
      String token)
      throws Exception {
    Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
    List<URI> volumeURIs = new ArrayList<URI>();
    volumeURIs.addAll(volumeMap.keySet());
    Map<URI, URI> hostToExistingExportMaskMap = new HashMap<URI, URI>();
    List<URI> hostURIs = new ArrayList<URI>();
    List<String> portNames = new ArrayList<String>();
    List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
    // Populate the port WWN/IQNs (portNames) and the
    // mapping of the WWN/IQNs to Initiator URIs
    processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);

    // We always want to have the full list of initiators for the hosts involved in
    // this export. This will allow the export operation to always find any
    // existing exports for a given host.
    queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);

    // Find the export masks that are associated with any or all the ports in
    // portNames. We will have to do processing differently based on whether
    // or there is an existing ExportMasks.
    Map<String, Set<URI>> matchingExportMaskURIs =
        device.findExportMasks(storage, portNames, false);
    if (matchingExportMaskURIs.isEmpty()) {

      _log.info(
          String.format(
              "No existing mask found w/ initiators { %s }", Joiner.on(",").join(portNames)));

      createNewExportMaskWorkflowForInitiators(
          initiatorURIs, exportGroup, workflow, volumeMap, storage, token, previousStep);
    } else {
      _log.info(
          String.format(
              "Mask(s) found w/ initiators {%s}. "
                  + "MatchingExportMaskURIs {%s}, portNameToInitiators {%s}",
              Joiner.on(",").join(portNames),
              Joiner.on(",").join(matchingExportMaskURIs.values()),
              Joiner.on(",").join(portNameToInitiatorURI.entrySet())));
      // There are some initiators that already exist. We need to create a
      // workflow that create new masking containers or updates masking
      // containers as necessary.

      // These data structures will be used to track new initiators - ones
      // that don't already exist on the array
      List<URI> initiatorURIsCopy = new ArrayList<URI>();
      initiatorURIsCopy.addAll(initiatorURIs);

      // This loop will determine a list of volumes to update per export mask
      Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes =
          new HashMap<URI, Map<URI, Integer>>();
      Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators =
          new HashMap<URI, Set<Initiator>>();
      for (Map.Entry<String, Set<URI>> entry : matchingExportMaskURIs.entrySet()) {
        URI initiatorURI = portNameToInitiatorURI.get(entry.getKey());
        Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
        // Keep track of those initiators that have been found to exist already
        // in some export mask on the array
        initiatorURIsCopy.remove(initiatorURI);
        // Get a list of the ExportMasks that were matched to the initiator
        List<URI> exportMaskURIs = new ArrayList<URI>();
        exportMaskURIs.addAll(entry.getValue());
        List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
        _log.info(
            String.format(
                "initiator %s masks {%s}",
                initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
        for (ExportMask mask : masks) {
          // ExportMask is created using non-vipr. Set the mask name if it doesn't have.
          if (null == mask.getMaskName()) {
            String maskName =
                ExportMaskUtils.getMaskName(_dbClient, initiators, exportGroup, storage);
            _log.info("Generated mask name: {}", maskName);
            mask.setMaskName(maskName);
          }

          // Check for NO_VIPR.  If found, avoid this mask.
          if (mask.getMaskName() != null
              && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
            _log.info(
                String.format(
                    "ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it",
                    mask.getMaskName(), ExportUtils.NO_VIPR));
            continue;
          }

          _log.info(
              String.format(
                  "mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
          if (mask.getCreatedBySystem()) {
            _log.info(
                String.format(
                    "initiator %s is in persisted mask %s",
                    initiator.getInitiatorPort(), mask.getMaskName()));

            // We're still OK if the mask contains ONLY initiators that can be found
            // in our export group, because we would simply add to them.
            if (mask.getInitiators() != null) {
              for (String existingMaskInitiatorStr : mask.getInitiators()) {

                // Now look at it from a different angle.  Which one of our export group initiators
                // are NOT in the current mask?  And if so, if it belongs to the same host as an
                // existing one,
                // we should add it to this mask.
                Iterator<URI> initiatorIter = initiatorURIsCopy.iterator();
                while (initiatorIter.hasNext()) {
                  Initiator initiatorCopy =
                      _dbClient.queryObject(Initiator.class, initiatorIter.next());

                  if (initiatorCopy != null
                      && initiatorCopy.getId() != null
                      && !mask.hasInitiator(initiatorCopy.getId().toString())) {
                    Initiator existingMaskInitiator =
                        _dbClient.queryObject(
                            Initiator.class, URI.create(existingMaskInitiatorStr));
                    if (existingMaskInitiator != null
                        && initiatorCopy.getHost() != null
                        && initiatorCopy.getHost().equals(existingMaskInitiator.getHost())) {
                      // Add to the list of initiators we need to add to this mask
                      Set<Initiator> existingMaskInitiators =
                          existingMasksToUpdateWithNewInitiators.get(mask.getId());
                      if (existingMaskInitiators == null) {
                        existingMaskInitiators = new HashSet<Initiator>();
                        existingMasksToUpdateWithNewInitiators.put(
                            mask.getId(), existingMaskInitiators);
                      }
                      existingMaskInitiators.add(initiatorCopy);
                      initiatorIter
                          .remove(); // remove this from the list of initiators we'll make a new
                      // mask from
                    }
                  }
                }
              }
            }
          } else {
            // Insert this initiator into the mask's list of initiators managed by the system.
            // This will get persisted below.
            mask.addInitiator(initiator);
            if (!NullColumnValueGetter.isNullURI(initiator.getHost())) {
              hostToExistingExportMaskMap.put(initiator.getHost(), mask.getId());
            }
          }

          // We need to see if the volume also exists the mask,
          // if it doesn't then we'll add it to the list of volumes to add.
          for (URI boURI : volumeURIs) {
            BlockObject bo = BlockObject.fetch(_dbClient, boURI);
            if (!mask.hasExistingVolume(bo)) {
              _log.info(
                  String.format(
                      "volume %s is not in mask %s", bo.getNativeGuid(), mask.getMaskName()));
              // The volume doesn't exist, so we have to add it to
              // the masking container.
              Map<URI, Integer> newVolumes = existingMasksToUpdateWithNewVolumes.get(mask.getId());
              if (newVolumes == null) {
                newVolumes = new HashMap<URI, Integer>();
                existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumes);
              }
              // Check if the requested HLU for the volume is
              // already taken by a pre-existing volume.
              Integer requestedHLU = volumeMap.get(bo.getId());
              StringMap existingVolumesInMask = mask.getExistingVolumes();
              if (existingVolumesInMask != null
                  && existingVolumesInMask.containsValue(requestedHLU.toString())) {
                ExportOrchestrationTask completer =
                    new ExportOrchestrationTask(exportGroup.getId(), token);
                ServiceError serviceError =
                    DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU(
                        boURI.toString(), requestedHLU.toString());
                completer.error(_dbClient, serviceError);
                return false;
              }
              newVolumes.put(bo.getId(), requestedHLU);
              mask.addToUserCreatedVolumes(bo);
            }
          }

          // Update the list of volumes and initiators for the mask
          Map<URI, Integer> volumeMapForExistingMask =
              existingMasksToUpdateWithNewVolumes.get(mask.getId());
          if (volumeMapForExistingMask != null && !volumeMapForExistingMask.isEmpty()) {
            mask.addVolumes(volumeMapForExistingMask);
          }

          Set<Initiator> initiatorSetForExistingMask =
              existingMasksToUpdateWithNewInitiators.get(mask.getId());
          if (initiatorSetForExistingMask != null && initiatorSetForExistingMask.isEmpty()) {
            mask.addInitiators(initiatorSetForExistingMask);
          }

          updateZoningMap(exportGroup, mask);
          _dbClient.updateAndReindexObject(mask);
          // TODO: All export group modifications should be moved to completers
          exportGroup.addExportMask(mask.getId());
          _dbClient.updateAndReindexObject(exportGroup);
        }
      }

      // The initiatorURIsCopy was used in the foreach initiator loop to see
      // which initiators already exist in a mask. If it is non-empty,
      // then it means there are initiators that are new,
      // so let's add them to the main tracker
      Map<URI, List<URI>> hostInitiatorMap = new HashMap<URI, List<URI>>();
      if (!initiatorURIsCopy.isEmpty()) {
        for (URI newExportMaskInitiator : initiatorURIsCopy) {

          Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
          List<URI> initiatorSet = hostInitiatorMap.get(initiator.getHost());
          if (initiatorSet == null) {
            initiatorSet = new ArrayList<URI>();
            hostInitiatorMap.put(initiator.getHost(), initiatorSet);
          }
          initiatorSet.add(initiator.getId());

          _log.info(
              String.format(
                  "host = %s, "
                      + "initiators to add: %d, "
                      + "existingMasksToUpdateWithNewVolumes.size = %d",
                  initiator.getHost(),
                  hostInitiatorMap.get(initiator.getHost()).size(),
                  existingMasksToUpdateWithNewVolumes.size()));
        }
      }

      _log.info(
          String.format(
              "existingMasksToUpdateWithNewVolumes.size = %d",
              existingMasksToUpdateWithNewVolumes.size()));

      // At this point we have the necessary data structures populated to
      // determine the workflow steps. We are going to create new masks
      // and/or add volumes to existing masks.
      if (!hostInitiatorMap.isEmpty()) {
        for (URI hostID : hostInitiatorMap.keySet()) {
          // Check if there is an existing mask (created outside of ViPR) for
          // the host. If there is we will need to add these intiators
          // associated with that host to the list
          if (hostToExistingExportMaskMap.containsKey(hostID)) {
            URI existingExportMaskURI = hostToExistingExportMaskMap.get(hostID);
            Set<Initiator> toAddInits = new HashSet<Initiator>();
            List<URI> hostInitaitorList = hostInitiatorMap.get(hostID);
            for (URI initURI : hostInitaitorList) {
              Initiator initiator = _dbClient.queryObject(Initiator.class, initURI);
              if (!initiator.getInactive()) {
                toAddInits.add(initiator);
              }
            }
            _log.info(
                String.format(
                    "Need to add new initiators to existing mask %s, %s",
                    existingExportMaskURI.toString(), Joiner.on(',').join(hostInitaitorList)));
            existingMasksToUpdateWithNewInitiators.put(existingExportMaskURI, toAddInits);
            continue;
          }
          // We have some brand new initiators, let's add them to new masks
          _log.info(
              String.format(
                  "new export masks %s", Joiner.on(",").join(hostInitiatorMap.get(hostID))));

          generateExportMaskCreateWorkflow(
              workflow,
              previousStep,
              storage,
              exportGroup,
              hostInitiatorMap.get(hostID),
              volumeMap,
              token);
        }
      }

      Map<URI, String> stepMap = new HashMap<URI, String>();
      for (Map.Entry<URI, Map<URI, Integer>> entry :
          existingMasksToUpdateWithNewVolumes.entrySet()) {
        ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
        Map<URI, Integer> volumesToAdd = entry.getValue();
        _log.info(
            String.format(
                "adding these volumes %s to mask %s",
                Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName()));
        stepMap.put(
            entry.getKey(),
            generateExportMaskAddVolumesWorkflow(
                workflow, null, storage, exportGroup, mask, volumesToAdd));
      }

      for (Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) {
        ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
        Set<Initiator> initiatorsToAdd = entry.getValue();
        List<URI> initiatorsURIs = new ArrayList<URI>();
        for (Initiator initiator : initiatorsToAdd) {
          initiatorsURIs.add(initiator.getId());
        }
        _log.info(
            String.format(
                "adding these initiators %s to mask %s",
                Joiner.on(",").join(initiatorsURIs), mask.getMaskName()));
        previousStep =
            stepMap.get(entry.getKey()) == null ? previousStep : stepMap.get(entry.getKey());
        generateExportMaskAddInitiatorsWorkflow(
            workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, null, token);
      }
    }
    return true;
  }
  @Override
  public void deleteGroupMirrors(
      StorageSystem storage, List<URI> mirrorList, TaskCompleter taskCompleter)
      throws DeviceControllerException {
    _log.info("deleteGroupMirrors operation START");
    if (!((storage.getUsingSmis80() && storage.deviceIsType(Type.vmax))
        || storage.deviceIsType(Type.vnxblock))) {
      throw DeviceControllerException.exceptions.blockDeviceOperationNotSupported();
    }

    try {
      String[] deviceIds = null;
      BlockMirror firstMirror = _dbClient.queryObject(BlockMirror.class, mirrorList.get(0));
      String repGroupName = firstMirror.getReplicationGroupInstance();
      if (NullColumnValueGetter.isNotNullValue(repGroupName)) {
        CIMObjectPath repGroupPath = _cimPath.getReplicationGroupPath(storage, repGroupName);
        Set<String> deviceIdsSet =
            _helper.getVolumeDeviceIdsFromStorageGroup(storage, repGroupPath);
        deviceIds = deviceIdsSet.toArray(new String[deviceIdsSet.size()]);

        // Delete replication group
        ReplicationUtils.deleteReplicationGroup(
            storage, repGroupName, _dbClient, _helper, _cimPath);
        // Set mirrors replication group to null
        List<BlockMirror> mirrors = _dbClient.queryObject(BlockMirror.class, mirrorList);
        for (BlockMirror mirror : mirrors) {
          mirror.setConsistencyGroup(NullColumnValueGetter.getNullURI());
          mirror.setReplicationGroupInstance(NullColumnValueGetter.getNullStr());
        }

        _dbClient.persistObject(mirrors);
      } else {
        deviceIds = _helper.getBlockObjectNativeIds(mirrorList);
      }

      if (storage.checkIfVmax3()) {
        for (String deviceId : deviceIds) {
          _helper.removeVolumeFromParkingSLOStorageGroup(storage, deviceId, false);
          _log.info("Done invoking remove volume {} from parking SLO storage group", deviceId);
        }
      }

      CIMObjectPath[] mirrorPaths = _cimPath.getVolumePaths(storage, deviceIds);
      CIMObjectPath configSvcPath = _cimPath.getConfigSvcPath(storage);
      CIMArgument[] inArgs = null;
      if (storage.deviceIsType(Type.vnxblock)) {
        inArgs = _helper.getReturnElementsToStoragePoolArguments(mirrorPaths);
      } else {
        inArgs =
            _helper.getReturnElementsToStoragePoolArguments(
                mirrorPaths, SmisConstants.CONTINUE_ON_NONEXISTENT_ELEMENT);
      }
      CIMArgument[] outArgs = new CIMArgument[5];
      _helper.invokeMethod(
          storage, configSvcPath, SmisConstants.RETURN_ELEMENTS_TO_STORAGE_POOL, inArgs, outArgs);
      CIMObjectPath job = _cimPath.getCimObjectPathFromOutputArgs(outArgs, SmisConstants.JOB);
      ControllerServiceImpl.enqueueJob(
          new QueueJob(new SmisBlockDeleteCGMirrorJob(job, storage.getId(), taskCompleter)));
    } catch (Exception e) {
      _log.error("Problem making SMI-S call: ", e);
      ServiceError serviceError =
          DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
      taskCompleter.error(_dbClient, serviceError);
    }
  }
  /**
   * Prepares the VPLEX volume copies.
   *
   * @param name The base name for the volume.
   * @param copyCount The total number of copies.
   * @param copyIndex The index for this copy.
   * @param size The size for the HA volume.
   * @param srcVPlexVolume The VPLEX volume being copied.
   * @param srcProject The project for the VPLEX volume being copied.
   * @param srcVarray The virtual array for the VPLEX volume being copied.
   * @param srcVpool The virtual pool for the VPLEX volume being copied.
   * @param srcSystemURI The VPLEX system URI.
   * @param primaryVolume The primary volume for the copy.
   * @param haVolume The HA volume for the copy, or null.
   * @param taskId The task identifier.
   * @param volumeDescriptors The list of descriptors.
   * @return A reference to the prepared VPLEX volume copy.
   */
  private Volume prepareFullCopyVPlexVolume(
      String name,
      int copyCount,
      int copyIndex,
      long size,
      Volume srcVPlexVolume,
      Project srcProject,
      VirtualArray srcVarray,
      VirtualPool srcVpool,
      URI srcSystemURI,
      Volume primaryVolume,
      Volume haVolume,
      String taskId,
      List<VolumeDescriptor> volumeDescriptors) {

    // Determine the VPLEX volume copy name.
    StringBuilder nameBuilder = new StringBuilder(name);
    if (copyCount > 1) {
      nameBuilder.append("-");
      nameBuilder.append(copyIndex + 1);
    }

    // Prepare the VPLEX volume copy.
    Volume vplexCopyVolume =
        VPlexBlockServiceApiImpl.prepareVolumeForRequest(
            size,
            srcProject,
            srcVarray,
            srcVpool,
            srcSystemURI,
            NullColumnValueGetter.getNullURI(),
            nameBuilder.toString(),
            ResourceOperationTypeEnum.CREATE_VOLUME_FULL_COPY,
            taskId,
            _dbClient);

    // Create a volume descriptor and add it to the passed list.
    VolumeDescriptor vplexCopyVolumeDescr =
        new VolumeDescriptor(
            VolumeDescriptor.Type.VPLEX_VIRT_VOLUME,
            srcSystemURI,
            vplexCopyVolume.getId(),
            null,
            null);
    volumeDescriptors.add(vplexCopyVolumeDescr);

    // Set the associated volumes for this new VPLEX volume copy to
    // the copy of the backend primary and the newly prepared HA
    // volume if the VPLEX volume being copied is distributed.
    vplexCopyVolume.setAssociatedVolumes(new StringSet());
    StringSet assocVolumes = vplexCopyVolume.getAssociatedVolumes();
    assocVolumes.add(primaryVolume.getId().toString());
    if (haVolume != null) {
      assocVolumes.add(haVolume.getId().toString());
    }

    // Set the VPLEX source volume for the copy.
    vplexCopyVolume.setAssociatedSourceVolume(srcVPlexVolume.getId());

    // Copies always created active.
    vplexCopyVolume.setSyncActive(Boolean.TRUE);

    // Persist the copy.
    _dbClient.persistObject(vplexCopyVolume);

    return vplexCopyVolume;
  }