/** * Detaches volumes from initiators. * * @param storage the storage * @param volumes the volumes * @param initiators the initiators * @throws Exception the exception */ private void detachVolumesFromInitiators( StorageSystem storage, List<Volume> volumes, List<Initiator> initiators) throws Exception { CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storage.getActiveProviderURI(), dbClient); log.debug("Getting the cinder APi for the provider with id {}", storage.getActiveProviderURI()); CinderApi cinderApi = cinderApiFactory.getApi(storage.getActiveProviderURI(), ep); List<Initiator> iSCSIInitiators = new ArrayList<Initiator>(); List<Initiator> fcInitiators = new ArrayList<Initiator>(); splitInitiatorsByProtocol(initiators, iSCSIInitiators, fcInitiators); String host = getHostNameFromInitiators(initiators); Map<String, String[]> mapSettingVsValues = getFCInitiatorsArray(fcInitiators); String[] fcInitiatorsWwpns = mapSettingVsValues.get(WWPNS); String[] fcInitiatorsWwnns = mapSettingVsValues.get(WWNNS); for (Volume volume : volumes) { // cinder generated volume ID String volumeId = volume.getNativeId(); // for iSCSI for (Initiator initiator : iSCSIInitiators) { String initiatorPort = initiator.getInitiatorPort(); log.debug( String.format( "Detaching volume %s ( %s ) from initiator %s on Openstack cinder node", volumeId, volume.getId(), initiatorPort)); cinderApi.detachVolume(volumeId, initiatorPort, null, null, host); // TODO : Do not use Job to poll status till we figure out how // to get detach status. /* * CinderJob detachJob = new CinderDetachVolumeJob(volumeId, * volume.getLabel(), storage.getId(), * CinderConstants.ComponentType.volume.name(), ep, * taskCompleter); ControllerServiceImpl.enqueueJob(new * QueueJob(detachJob)); */ } // for FC if (fcInitiatorsWwpns.length > 0) { log.debug( String.format( "Detaching volume %s ( %s ) from initiator %s on Openstack cinder node", volumeId, volume.getId(), fcInitiatorsWwpns)); cinderApi.detachVolume(volumeId, null, fcInitiatorsWwpns, fcInitiatorsWwnns, host); } // If ITLs are added, remove them removeITLsFromVolume(volume); } }
/** * Creates the BlockObject BlockSnapshot data. * * @param name * @param numSnapshots * @throws Exception */ private void prepareBlockSnapshotData(String name, int numSnapshots) throws Exception { // Create the volume for the snapshots Volume volume = new Volume(); URI volumeURI = URIUtil.createId(Volume.class); StorageSystem storageSystem = createStorageSystem(false); volume.setId(volumeURI); volume.setStorageController(storageSystem.getId()); String volName = "blockSnapshotVolume"; volume.setLabel(volName); BlockConsistencyGroup cg = createBlockConsistencyGroup( "blockSnapshotConsistencyGroup", storageSystem.getId(), Types.LOCAL.name(), true); volume.setConsistencyGroup(cg.getId()); _dbClient.createObject(volume); for (int i = 1; i <= numSnapshots; i++) { BlockSnapshot blockSnapshot = new BlockSnapshot(); URI blockSnapshotURI = URIUtil.createId(BlockSnapshot.class); blockSnapshotURIs.add(blockSnapshotURI); blockSnapshot.setId(blockSnapshotURI); blockSnapshot.setLabel(name + i); blockSnapshot.setSnapsetLabel(name + i); blockSnapshot.setParent(new NamedURI(volume.getId(), name + i)); blockSnapshot.addConsistencyGroup(cg.getId().toString()); _dbClient.createObject(blockSnapshot); } }
@Override protected void complete(DbClient dbClient, Operation.Status status, ServiceCoded coded) throws DeviceControllerException { log.info("START ApplicationCompleter complete"); super.setStatus(dbClient, status, coded); updateWorkflowStatus(status, coded); if (addVolumes != null) { for (URI voluri : addVolumes) { Volume volume = getVolume(voluri, dbClient); switch (status) { case error: setErrorOnDataObject(dbClient, Volume.class, volume.getId(), coded); break; default: setReadyOnDataObject(dbClient, Volume.class, volume.getId()); addApplicationToVolume(volume, dbClient); } } } if (removeVolumes != null) { for (URI voluri : removeVolumes) { Volume volume = getVolume(voluri, dbClient); switch (status) { case error: setErrorOnDataObject(dbClient, Volume.class, volume.getId(), coded); break; default: setReadyOnDataObject(dbClient, Volume.class, volume.getId()); removeApplicationFromVolume(volume.getId(), dbClient); } } } if (consistencyGroups != null && !consistencyGroups.isEmpty()) { for (URI cguri : consistencyGroups) { switch (status) { case error: setErrorOnDataObject(dbClient, BlockConsistencyGroup.class, cguri, coded); break; default: setReadyOnDataObject(dbClient, BlockConsistencyGroup.class, cguri); } } } log.info("END ApplicationCompleter complete"); }
/** * Convenience method that adds volumes to a protection set. * * @param protectionSetURI * @param volumes */ private void addVolumesToProtectionSet(URI protectionSetURI, List<Volume> volumes) { ProtectionSet protectionSet = _dbClient.queryObject(ProtectionSet.class, protectionSetURI); StringSet vols = new StringSet(); for (Volume volume : volumes) { vols.add(volume.getId().toString()); } protectionSet.setVolumes(vols); _dbClient.persistObject(protectionSet); }
/** * Places and prepares the primary copy volumes when copying a VPLEX virtual volume. * * @param name The base name for the volume. * @param copyCount The number of copies to be made. * @param srcPrimaryVolume The primary volume of the VPLEX volume being copied. * @param srcCapabilities The capabilities of the primary volume. * @param volumeDescriptors The list of descriptors. * @return A list of the prepared primary volumes for the VPLEX volume copy. */ private List<Volume> prepareFullCopyPrimaryVolumes( String name, int copyCount, Volume srcPrimaryVolume, VirtualPoolCapabilityValuesWrapper srcCapabilities, List<VolumeDescriptor> volumeDescriptors) { List<Volume> copyPrimaryVolumes = new ArrayList<Volume>(); // Get the placement recommendations for the primary volume copies. // Use the same method as is done for native volume copy. VirtualArray vArray = _dbClient.queryObject(VirtualArray.class, srcPrimaryVolume.getVirtualArray()); VirtualPool vPool = _dbClient.queryObject(VirtualPool.class, srcPrimaryVolume.getVirtualPool()); List<VolumeRecommendation> recommendations = ((VPlexScheduler) _scheduler) .getBlockScheduler() .getRecommendationsForVolumeClones(vArray, vPool, srcPrimaryVolume, srcCapabilities); if (recommendations.isEmpty()) { throw APIException.badRequests.noStorageForPrimaryVolumesForVplexVolumeCopies(); } // Prepare the copy volumes for each recommendation. Again, // use the same manner as is done for native volume copy. StringBuilder nameBuilder = new StringBuilder(name); nameBuilder.append("-0"); int copyIndex = (copyCount > 1) ? 1 : 0; for (VolumeRecommendation recommendation : recommendations) { Volume volume = StorageScheduler.prepareFullCopyVolume( _dbClient, nameBuilder.toString(), srcPrimaryVolume, recommendation, copyIndex++, srcCapabilities); volume.addInternalFlags(Flag.INTERNAL_OBJECT); _dbClient.persistObject(volume); copyPrimaryVolumes.add(volume); // Create the volume descriptor and add it to the passed list. VolumeDescriptor volumeDescriptor = new VolumeDescriptor( VolumeDescriptor.Type.VPLEX_IMPORT_VOLUME, volume.getStorageController(), volume.getId(), volume.getPool(), srcCapabilities); volumeDescriptors.add(volumeDescriptor); } return copyPrimaryVolumes; }
/* * (non-Javadoc) * * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExpandAsMetaVolume(com.emc.storageos.db.client.model.StorageSystem, * com.emc.storageos.db.client.model.StoragePool, com.emc.storageos.db.client.model.Volume, long, * com.emc.storageos.volumecontroller.impl.smis.MetaVolumeRecommendation, com.emc.storageos.volumecontroller.TaskCompleter) */ @Override public void doExpandAsMetaVolume( StorageSystem storageSystem, StoragePool storagePool, Volume metaHead, long size, MetaVolumeRecommendation recommendation, VolumeExpandCompleter volumeCompleter) throws DeviceControllerException { StringBuilder logMsgBuilder = new StringBuilder( String.format( "Expand Meta Volume Start - Array:%s, Pool:%s %n Volume: %s, id: %s", storageSystem.getSerialNumber(), storagePool.getNativeId(), metaHead.getLabel(), metaHead.getId())); log.info(logMsgBuilder.toString()); long metaMemberCapacity = recommendation.getMetaMemberSize(); int metaMemberCount = (int) recommendation.getMetaMemberCount(); MetaVolumeTaskCompleter metaVolumeTaskCompleter = new MetaVolumeTaskCompleter(volumeCompleter); try { // Step 1: create meta members. List<String> newMetaMembers = metaVolumeOperations.createMetaVolumeMembers( storageSystem, storagePool, metaHead, metaMemberCount, metaMemberCapacity, metaVolumeTaskCompleter); log.info("ldevMetaMembers created successfully: {}", newMetaMembers); if (metaVolumeTaskCompleter.getLastStepStatus() == Job.JobStatus.SUCCESS) { metaVolumeOperations.expandMetaVolume( storageSystem, storagePool, metaHead, newMetaMembers, metaVolumeTaskCompleter); } else { ServiceError serviceError = DeviceControllerErrors.hds.jobFailed("LDEV Meta Member creation failed"); volumeCompleter.error(dbClient, serviceError); } } catch (final InternalException e) { log.error("Problem in doExpandAsMetaVolume: ", e); volumeCompleter.error(dbClient, e); } catch (final Exception e) { log.error("Problem in doExpandAsMetaVolume: ", e); ServiceError serviceError = DeviceControllerErrors.hds.methodFailed("doExpandAsMetaVolume", e.getMessage()); volumeCompleter.error(dbClient, serviceError); } }
/** * creates snapshot objects * * @param numSnapshots * @param volume * @param name snapshot name */ public void addSnapshots( int numSnapshots, Volume volume, BlockConsistencyGroup cg, ProtectionSet ps, String name) { for (int i = 1; i <= numSnapshots; i++) { BlockSnapshot blockSnapshot = new BlockSnapshot(); URI blockSnapshotURI = URIUtil.createId(BlockSnapshot.class); blockSnapshotURIs.add(blockSnapshotURI); blockSnapshot.setId(blockSnapshotURI); blockSnapshot.setLabel(name + i); blockSnapshot.setSnapsetLabel(name + i); blockSnapshot.setParent(new NamedURI(volume.getId(), name + i)); blockSnapshot.addConsistencyGroup(cg.getId().toString()); blockSnapshot.setProtectionSet(ps.getId()); _dbClient.createObject(blockSnapshot); } }
private void updateStorageVolume( CIMInstance volumeInstance, Volume storageVolume, Map<String, Object> keyMap) throws IOException { storageVolume.setAllocatedCapacity( Long.parseLong(getCIMPropertyValue(volumeInstance, EMC_ALLOCATED_CAPACITY))); storageVolume.setProvisionedCapacity(returnProvisionedCapacity(volumeInstance, keyMap)); // If meta volume was ingested prior to upgrade to 2.2 it won't have // 'isComposite' set. We need to check // cim instance here to see if the volume is meta volume and set it in // the volume instance. if (isComposite(volumeInstance) && !storageVolume.getIsComposite()) { storageVolume.setIsComposite(true); _logger.info("Set volume {} to composite (meta volume)", storageVolume.getId()); } _updateVolumes.add(storageVolume); }
/** Create RP BlockConsistencyGroup objects for each ProtectionSet. */ private void createRpBlockConsistencyGroups() { DbClient dbClient = this.getDbClient(); List<URI> protectionSetURIs = dbClient.queryByType(ProtectionSet.class, false); Iterator<ProtectionSet> protectionSets = dbClient.queryIterativeObjects(ProtectionSet.class, protectionSetURIs); while (protectionSets.hasNext()) { ProtectionSet ps = protectionSets.next(); Project project = dbClient.queryObject(Project.class, ps.getProject()); BlockConsistencyGroup cg = new BlockConsistencyGroup(); cg.setId(URIUtil.createId(BlockConsistencyGroup.class)); cg.setLabel(ps.getLabel()); cg.setDeviceName(ps.getLabel()); cg.setType(BlockConsistencyGroup.Types.RP.toString()); cg.setProject(new NamedURI(project.getId(), ps.getLabel())); cg.setTenant(new NamedURI(project.getTenantOrg().getURI(), ps.getLabel())); dbClient.createObject(cg); log.debug( "Created ConsistencyGroup (id={}) based on ProtectionSet (id={})", cg.getId().toString(), ps.getId().toString()); // Organize the volumes by replication set for (String protectionVolumeID : ps.getVolumes()) { URI uri = URI.create(protectionVolumeID); Volume protectionVolume = dbClient.queryObject(Volume.class, uri); protectionVolume.addConsistencyGroup(cg.getId().toString()); dbClient.persistObject(protectionVolume); log.debug( "Volume (id={}) added to ConsistencyGroup (id={})", protectionVolume.getId().toString(), cg.getId().toString()); } } }
/** {@inheritDoc} */ @Override protected void verifyFullCopyRequestCount(BlockObject fcSourceObj, int count) { // Verify the requested copy count. You can only // have as many as is allowed by the source backend // volume. Volume fcSourceVolume = (Volume) fcSourceObj; Volume srcBackendVolume = VPlexUtil.getVPLEXBackendVolume(fcSourceVolume, true, _dbClient, true); // Verify if the source backend volume supports full copy URI systemURI = fcSourceObj.getStorageController(); StorageSystem system = _dbClient.queryObject(StorageSystem.class, systemURI); int maxCount = Integer.MAX_VALUE; if (system != null) { maxCount = BlockFullCopyManager.getMaxFullCopiesForSystemType(system.getSystemType()); } // If max count is 0, then the operation is not supported if (maxCount == 0) { throw APIException.badRequests.fullCopyNotSupportedByBackendSystem(fcSourceVolume.getId()); } BlockFullCopyUtils.validateActiveFullCopyCount(srcBackendVolume, count, _dbClient); }
/* * (non-Javadoc) * * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doCreateVolumes(com.emc.storageos.db.client.model.StorageSystem, * com.emc.storageos.db.client.model.StoragePool, java.lang.String, java.util.List, * com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper, com.emc.storageos.volumecontroller.TaskCompleter) */ @Override public void doCreateVolumes( StorageSystem storageSystem, StoragePool storagePool, String opId, List<Volume> volumes, VirtualPoolCapabilityValuesWrapper capabilities, TaskCompleter taskCompleter) throws DeviceControllerException { String label = null; Long capacity = null; boolean isThinVolume = false; boolean opCreationFailed = false; StringBuilder logMsgBuilder = new StringBuilder( String.format( "Create Volume Start - Array:%s, Pool:%s", storageSystem.getSerialNumber(), storagePool.getNativeGuid())); for (Volume volume : volumes) { logMsgBuilder.append( String.format( "%nVolume:%s , IsThinlyProvisioned: %s", volume.getLabel(), volume.getThinlyProvisioned())); if ((label == null) && (volumes.size() == 1)) { String tenantName = ""; try { TenantOrg tenant = dbClient.queryObject(TenantOrg.class, volume.getTenant().getURI()); tenantName = tenant.getLabel(); } catch (DatabaseException e) { log.error("Error lookup TenantOrb object", e); } label = nameGenerator.generate( tenantName, volume.getLabel(), volume.getId().toString(), '-', HDSConstants.MAX_VOLUME_NAME_LENGTH); } if (capacity == null) { capacity = volume.getCapacity(); } isThinVolume = volume.getThinlyProvisioned(); } log.info(logMsgBuilder.toString()); try { multiVolumeCheckForHitachiModel(volumes, storageSystem); HDSApiClient hdsApiClient = hdsApiFactory.getClient( HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword()); String systemObjectID = HDSUtils.getSystemObjectID(storageSystem); String poolObjectID = HDSUtils.getPoolObjectID(storagePool); String asyncTaskMessageId = null; // isThinVolume = true, creates VirtualVolumes // isThinVolume = false, creates LogicalUnits if (isThinVolume) { asyncTaskMessageId = hdsApiClient.createThinVolumes( systemObjectID, storagePool.getNativeId(), capacity, volumes.size(), label, QUICK_FORMAT_TYPE, storageSystem.getModel()); } else if (!isThinVolume) { asyncTaskMessageId = hdsApiClient.createThickVolumes( systemObjectID, poolObjectID, capacity, volumes.size(), label, null, storageSystem.getModel(), null); } if (asyncTaskMessageId != null) { HDSJob createHDSJob = (volumes.size() > 1) ? new HDSCreateMultiVolumeJob( asyncTaskMessageId, volumes.get(0).getStorageController(), storagePool.getId(), volumes.size(), taskCompleter) : new HDSCreateVolumeJob( asyncTaskMessageId, volumes.get(0).getStorageController(), storagePool.getId(), taskCompleter); ControllerServiceImpl.enqueueJob(new QueueJob(createHDSJob)); } } catch (final InternalException e) { log.error("Problem in doCreateVolumes: ", e); opCreationFailed = true; taskCompleter.error(dbClient, e); } catch (final Exception e) { log.error("Problem in doCreateVolumes: ", e); opCreationFailed = true; ServiceError serviceError = DeviceControllerErrors.hds.methodFailed("doCreateVolumes", e.getMessage()); taskCompleter.error(dbClient, serviceError); } if (opCreationFailed) { for (Volume vol : volumes) { vol.setInactive(true); dbClient.persistObject(vol); } } logMsgBuilder = new StringBuilder( String.format( "Create Volumes End - Array:%s, Pool:%s", storageSystem.getSerialNumber(), storagePool.getNativeGuid())); for (Volume volume : volumes) { logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel())); } log.info(logMsgBuilder.toString()); }
@Override public void doModifyVolumes( StorageSystem storage, StoragePool storagePool, String opId, List<Volume> volumes, TaskCompleter taskCompleter) throws DeviceControllerException { StringBuilder logMsgBuilder = new StringBuilder( String.format( "Modify Volume Start - Array:%s, Pool:%s", storage.getSerialNumber(), storagePool.getNativeGuid())); String systemObjectID = HDSUtils.getSystemObjectID(storage); for (Volume volume : volumes) { try { HDSApiClient hdsApiClient = hdsApiFactory.getClient( HDSUtils.getHDSServerManagementServerInfo(storage), storage.getSmisUserName(), storage.getSmisPassword()); logMsgBuilder.append( String.format( "%nVolume:%s , IsThinlyProvisioned: %s, tieringPolicy: %s", volume.getLabel(), volume.getThinlyProvisioned(), volume.getAutoTieringPolicyUri())); LogicalUnit logicalUnit = hdsApiClient.getLogicalUnitInfo( systemObjectID, HDSUtils.getLogicalUnitObjectId(volume.getNativeId(), storage)); String policyName = ControllerUtils.getAutoTieringPolicyName(volume.getId(), dbClient); String autoTierPolicyName = null; if (policyName.equals(Constants.NONE)) { autoTierPolicyName = null; } else { autoTierPolicyName = HitachiTieringPolicy.getPolicy( policyName.replaceAll( HDSConstants.SLASH_OPERATOR, HDSConstants.UNDERSCORE_OPERATOR)) .getKey(); } if (null != logicalUnit && null != logicalUnit.getLdevList() && !logicalUnit.getLdevList().isEmpty()) { Iterator<LDEV> ldevItr = logicalUnit.getLdevList().iterator(); if (ldevItr.hasNext()) { LDEV ldev = ldevItr.next(); String asyncMessageId = hdsApiClient.modifyThinVolumeTieringPolicy( systemObjectID, logicalUnit.getObjectID(), ldev.getObjectID(), autoTierPolicyName); if (null != asyncMessageId) { HDSJob modifyHDSJob = new HDSModifyVolumeJob( asyncMessageId, volume.getStorageController(), taskCompleter, HDSModifyVolumeJob.VOLUME_MODIFY_JOB); ControllerServiceImpl.enqueueJob(new QueueJob(modifyHDSJob)); } } } else { String errorMsg = String.format("No LDEV's found for volume: %s", volume.getId()); log.info(errorMsg); ServiceError serviceError = DeviceControllerErrors.hds.methodFailed("doModifyVolumes", errorMsg); taskCompleter.error(dbClient, serviceError); } } catch (final InternalException e) { log.error("Problem in doModifyVolumes: ", e); taskCompleter.error(dbClient, e); } catch (final Exception e) { log.error("Problem in doModifyVolumes: ", e); ServiceError serviceError = DeviceControllerErrors.hds.methodFailed("doModifyVolumes", e.getMessage()); taskCompleter.error(dbClient, serviceError); } } }
/** * Prepares the VPLEX volume copies. * * @param name The base name for the volume. * @param copyCount The total number of copies. * @param copyIndex The index for this copy. * @param size The size for the HA volume. * @param srcVPlexVolume The VPLEX volume being copied. * @param srcProject The project for the VPLEX volume being copied. * @param srcVarray The virtual array for the VPLEX volume being copied. * @param srcVpool The virtual pool for the VPLEX volume being copied. * @param srcSystemURI The VPLEX system URI. * @param primaryVolume The primary volume for the copy. * @param haVolume The HA volume for the copy, or null. * @param taskId The task identifier. * @param volumeDescriptors The list of descriptors. * @return A reference to the prepared VPLEX volume copy. */ private Volume prepareFullCopyVPlexVolume( String name, int copyCount, int copyIndex, long size, Volume srcVPlexVolume, Project srcProject, VirtualArray srcVarray, VirtualPool srcVpool, URI srcSystemURI, Volume primaryVolume, Volume haVolume, String taskId, List<VolumeDescriptor> volumeDescriptors) { // Determine the VPLEX volume copy name. StringBuilder nameBuilder = new StringBuilder(name); if (copyCount > 1) { nameBuilder.append("-"); nameBuilder.append(copyIndex + 1); } // Prepare the VPLEX volume copy. Volume vplexCopyVolume = VPlexBlockServiceApiImpl.prepareVolumeForRequest( size, srcProject, srcVarray, srcVpool, srcSystemURI, NullColumnValueGetter.getNullURI(), nameBuilder.toString(), ResourceOperationTypeEnum.CREATE_VOLUME_FULL_COPY, taskId, _dbClient); // Create a volume descriptor and add it to the passed list. VolumeDescriptor vplexCopyVolumeDescr = new VolumeDescriptor( VolumeDescriptor.Type.VPLEX_VIRT_VOLUME, srcSystemURI, vplexCopyVolume.getId(), null, null); volumeDescriptors.add(vplexCopyVolumeDescr); // Set the associated volumes for this new VPLEX volume copy to // the copy of the backend primary and the newly prepared HA // volume if the VPLEX volume being copied is distributed. vplexCopyVolume.setAssociatedVolumes(new StringSet()); StringSet assocVolumes = vplexCopyVolume.getAssociatedVolumes(); assocVolumes.add(primaryVolume.getId().toString()); if (haVolume != null) { assocVolumes.add(haVolume.getId().toString()); } // Set the VPLEX source volume for the copy. vplexCopyVolume.setAssociatedSourceVolume(srcVPlexVolume.getId()); // Copies always created active. vplexCopyVolume.setSyncActive(Boolean.TRUE); // Persist the copy. _dbClient.persistObject(vplexCopyVolume); return vplexCopyVolume; }
/** {@inheritDoc} */ @Override public TaskList detach(BlockObject fcSourceObj, Volume fullCopyVolume) { // If full copy volume is already detached or was never // activated, return detach action is completed successfully // as done in base class. Otherwise, send detach full copy // request to controller. TaskList taskList = new TaskList(); String taskId = UUID.randomUUID().toString(); if ((BlockFullCopyUtils.isFullCopyDetached(fullCopyVolume, _dbClient)) || (BlockFullCopyUtils.isFullCopyInactive(fullCopyVolume, _dbClient))) { super.detach(fcSourceObj, fullCopyVolume); } else { // You cannot create a full copy of a VPLEX snapshot, so // the source will be a volume. Volume sourceVolume = (Volume) fcSourceObj; // If the source is in a CG, then we will detach the corresponding // full copies for all the volumes in the CG. Since we did not allow // full copies for volumes or snaps in CGs prior to Jedi, there should // be a full copy for all volumes in the CG. Map<URI, Volume> fullCopyMap = getFullCopySetMap(sourceVolume, fullCopyVolume); Set<URI> fullCopyURIs = fullCopyMap.keySet(); // Get the storage system for the source volume. StorageSystem sourceSystem = _dbClient.queryObject(StorageSystem.class, sourceVolume.getStorageController()); URI sourceSystemURI = sourceSystem.getId(); // Create the detach task on the full copy volumes. for (URI fullCopyURI : fullCopyURIs) { Operation op = _dbClient.createTaskOpStatus( Volume.class, fullCopyURI, taskId, ResourceOperationTypeEnum.DETACH_VOLUME_FULL_COPY); fullCopyMap.get(fullCopyURI).getOpStatus().put(taskId, op); TaskResourceRep fullCopyVolumeTask = TaskMapper.toTask(fullCopyMap.get(fullCopyURI), taskId, op); taskList.getTaskList().add(fullCopyVolumeTask); } // Invoke the controller. try { VPlexController controller = getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString()); controller.detachFullCopy(sourceSystemURI, new ArrayList<URI>(fullCopyURIs), taskId); } catch (InternalException ie) { s_logger.error("Controller error", ie); // Update the status for the VPLEX volume copies and their // corresponding tasks. for (Volume vplexFullCopy : fullCopyMap.values()) { Operation op = vplexFullCopy.getOpStatus().get(taskId); if (op != null) { op.error(ie); vplexFullCopy.getOpStatus().updateTaskStatus(taskId, op); _dbClient.persistObject(vplexFullCopy); for (TaskResourceRep task : taskList.getTaskList()) { if (task.getResource().getId().equals(vplexFullCopy.getId())) { task.setState(op.getStatus()); task.setMessage(op.getMessage()); break; } } } } } } return taskList; }
/** {@inheritDoc} */ @Override public TaskList resynchronizeCopy(Volume sourceVolume, Volume fullCopyVolume) { // Create the task list. TaskList taskList = new TaskList(); // Create a unique task id. String taskId = UUID.randomUUID().toString(); // If the source is in a CG, then we will resynchronize the corresponding // full copies for all the volumes in the CG. Since we did not allow // full copies for volumes or snaps in CGs prior to Jedi, there should // be a full copy for all volumes in the CG. Map<URI, Volume> fullCopyMap = getFullCopySetMap(sourceVolume, fullCopyVolume); Set<URI> fullCopyURIs = fullCopyMap.keySet(); // Get the storage system for the source volume. StorageSystem sourceSystem = _dbClient.queryObject(StorageSystem.class, sourceVolume.getStorageController()); URI sourceSystemURI = sourceSystem.getId(); // Create the resynchronize task on the full copy volumes. for (URI fullCopyURI : fullCopyURIs) { Operation op = _dbClient.createTaskOpStatus( Volume.class, fullCopyURI, taskId, ResourceOperationTypeEnum.RESYNCHRONIZE_VOLUME_FULL_COPY); fullCopyMap.get(fullCopyURI).getOpStatus().put(taskId, op); TaskResourceRep fullCopyVolumeTask = TaskMapper.toTask(fullCopyMap.get(fullCopyURI), taskId, op); taskList.getTaskList().add(fullCopyVolumeTask); } // Invoke the controller. try { VPlexController controller = getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString()); controller.resyncFullCopy(sourceSystemURI, new ArrayList<URI>(fullCopyURIs), taskId); } catch (InternalException ie) { s_logger.error("Controller error", ie); // Update the status for the VPLEX volume copies and their // corresponding tasks. for (Volume vplexFullCopy : fullCopyMap.values()) { Operation op = vplexFullCopy.getOpStatus().get(taskId); if (op != null) { op.error(ie); vplexFullCopy.getOpStatus().updateTaskStatus(taskId, op); _dbClient.persistObject(vplexFullCopy); for (TaskResourceRep task : taskList.getTaskList()) { if (task.getResource().getId().equals(vplexFullCopy.getId())) { task.setState(op.getStatus()); task.setMessage(op.getMessage()); break; } } } } } return taskList; }
/* * (non-Javadoc) * * @see com.emc.storageos.volumecontroller.CloneOperations#createSingleClone( * com.emc.storageos.db.client.model.StorageSystem, java.net.URI, java.net.URI, * java.lang.Boolean, * com.emc.storageos.volumecontroller.TaskCompleter) */ @Override public void createSingleClone( StorageSystem storageSystem, URI sourceObject, URI cloneVolume, Boolean createInactive, TaskCompleter taskCompleter) { log.info("START createSingleClone operation"); boolean isVolumeClone = true; try { BlockObject sourceObj = BlockObject.fetch(dbClient, sourceObject); URI tenantUri = null; if (sourceObj instanceof BlockSnapshot) { // In case of snapshot, get the tenant from its parent volume NamedURI parentVolUri = ((BlockSnapshot) sourceObj).getParent(); Volume parentVolume = dbClient.queryObject(Volume.class, parentVolUri); tenantUri = parentVolume.getTenant().getURI(); isVolumeClone = false; } else { // This is a default flow tenantUri = ((Volume) sourceObj).getTenant().getURI(); isVolumeClone = true; } Volume cloneObj = dbClient.queryObject(Volume.class, cloneVolume); StoragePool targetPool = dbClient.queryObject(StoragePool.class, cloneObj.getPool()); TenantOrg tenantOrg = dbClient.queryObject(TenantOrg.class, tenantUri); // String cloneLabel = generateLabel(tenantOrg, cloneObj); CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storageSystem.getActiveProviderURI(), dbClient); log.info( "Getting the cinder APi for the provider with id " + storageSystem.getActiveProviderURI()); CinderApi cinderApi = cinderApiFactory.getApi(storageSystem.getActiveProviderURI(), ep); String volumeId = ""; if (isVolumeClone) { volumeId = cinderApi.cloneVolume( cloneObj.getLabel(), (cloneObj.getCapacity() / (1024 * 1024 * 1024)), targetPool.getNativeId(), sourceObj.getNativeId()); } else { volumeId = cinderApi.createVolumeFromSnapshot( cloneObj.getLabel(), (cloneObj.getCapacity() / (1024 * 1024 * 1024)), targetPool.getNativeId(), sourceObj.getNativeId()); } log.debug("Creating volume with the id " + volumeId + " on Openstack cinder node"); if (volumeId != null) { Map<String, URI> volumeIds = new HashMap<String, URI>(); volumeIds.put(volumeId, cloneObj.getId()); ControllerServiceImpl.enqueueJob( new QueueJob( new CinderSingleVolumeCreateJob( volumeId, cloneObj.getLabel(), storageSystem.getId(), CinderConstants.ComponentType.volume.name(), ep, taskCompleter, targetPool.getId(), volumeIds))); } } catch (InternalException e) { String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume); log.error(errorMsg, e); taskCompleter.error(dbClient, e); } catch (Exception e) { String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume); log.error(errorMsg, e); ServiceError serviceError = DeviceControllerErrors.cinder.operationFailed("createSingleClone", e.getMessage()); taskCompleter.error(dbClient, serviceError); } }
/** * Update (if it exists) the journal UnManagedVolume objects with RP information needed for * ingestion * * @param unManagedProtectionSet unmanaged protection set * @param cg CG response got back from RP system * @param rpCopyAccessStateMap Map to hold the access state of the replication sets. * @param rpWwnToNativeWwn Map of RP volume WWN to native volume WWN - required for XIO but * harmless otherwise * @param storageNativeIdPrefixes List of XIO systems discovered in ViPR * @param dbClient DB client instance */ private void mapCgJournals( UnManagedProtectionSet unManagedProtectionSet, GetCGsResponse cg, Map<String, String> rpCopyAccessStateMap, Map<String, String> rpWwnToNativeWwn, List<String> storageNativeIdPrefixes, DbClient dbClient) { for (GetCopyResponse copy : cg.getCopies()) { String accessState = copy.getAccessState(); for (GetVolumeResponse volume : copy.getJournals()) { // Find this volume in UnManagedVolumes based on wwn UnManagedVolume unManagedVolume = findUnManagedVolumeForWwn(volume.getWwn(), dbClient, storageNativeIdPrefixes); // Check if this volume is already managed, which would indicate it has already been // partially ingested Volume managedVolume = DiscoveryUtils.checkManagedVolumeExistsInDBByWwn(dbClient, volume.getWwn()); // Add the WWN to the unmanaged protection set, regardless of whether this volume is // unmanaged or not. unManagedProtectionSet.getVolumeWwns().add(volume.getWwn()); if (null == unManagedVolume && null == managedVolume) { log.info( "Protection Set {} contains unknown Journal volume: {}. Skipping.", unManagedProtectionSet.getNativeGuid(), volume.getWwn()); continue; } if (null != managedVolume) { log.info( "Protection Set {} contains volume {} that is already managed", unManagedProtectionSet.getNativeGuid(), volume.getWwn()); // make sure it's in the UnManagedProtectionSet's ManagedVolume ids if (!unManagedProtectionSet .getManagedVolumeIds() .contains(managedVolume.getId().toString())) { unManagedProtectionSet.getManagedVolumeIds().add(managedVolume.getId().toString()); } if (null != unManagedVolume) { log.info( "Protection Set {} also has an orphaned UnManagedVolume {} that will be removed", unManagedProtectionSet.getNativeGuid(), unManagedVolume.getLabel()); // remove the unManagedVolume from the UnManagedProtectionSet's UnManagedVolume ids unManagedProtectionSet .getUnManagedVolumeIds() .remove(unManagedVolume.getId().toString()); unManagedVolumesToDelete.add(unManagedVolume); } // because this volume is already managed, we can just continue to the next continue; } // at this point, we have an legitimate UnManagedVolume whose RP properties should be // updated log.info("Processing Journal UnManagedVolume {}", unManagedVolume.forDisplay()); // Capture the access state rpCopyAccessStateMap.put(volume.getRpCopyName(), accessState); // Add the unmanaged volume to the list (if it's not there already) if (!unManagedProtectionSet .getUnManagedVolumeIds() .contains(unManagedVolume.getId().toString())) { unManagedProtectionSet.getUnManagedVolumeIds().add(unManagedVolume.getId().toString()); } updateCommonRPProperties( unManagedProtectionSet, unManagedVolume, Volume.PersonalityTypes.METADATA.name(), volume, dbClient); rpWwnToNativeWwn.put(volume.getWwn(), unManagedVolume.getWwn()); unManagedVolumesToUpdateByWwn.put(unManagedVolume.getWwn(), unManagedVolume); } } }
/** * Places and prepares the HA volumes when copying a distributed VPLEX volume. * * @param name The base name for the volume. * @param copyCount The number of copies to be made. * @param size The size for the HA volume. * @param vplexSystem A reference to the VPLEX storage system. * @param vplexSystemProject A reference to the VPLEX system project. * @param srcVarray The virtual array for the VPLEX volume being copied. * @param srcHAVolume The HA volume of the VPLEX volume being copied. * @param taskId The task identifier. * @param volumeDescriptors The list of descriptors. * @return A list of the prepared HA volumes for the VPLEX volume copy. */ private List<Volume> prepareFullCopyHAVolumes( String name, int copyCount, Long size, StorageSystem vplexSystem, Project vplexSystemProject, VirtualArray srcVarray, Volume srcHAVolume, String taskId, List<VolumeDescriptor> volumeDescriptors) { List<Volume> copyHAVolumes = new ArrayList<Volume>(); // Get the storage placement recommendations for the volumes. // Placement must occur on the same VPLEX system Set<URI> vplexSystemURIS = new HashSet<URI>(); vplexSystemURIS.add(vplexSystem.getId()); VirtualArray haVarray = _dbClient.queryObject(VirtualArray.class, srcHAVolume.getVirtualArray()); VirtualPool haVpool = _dbClient.queryObject(VirtualPool.class, srcHAVolume.getVirtualPool()); VirtualPoolCapabilityValuesWrapper haCapabilities = new VirtualPoolCapabilityValuesWrapper(); haCapabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, size); haCapabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, copyCount); VirtualPool vpool = BlockFullCopyUtils.queryFullCopySourceVPool(srcHAVolume, _dbClient); if (VirtualPool.ProvisioningType.Thin.toString() .equalsIgnoreCase(vpool.getSupportedProvisioningType())) { haCapabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, Boolean.TRUE); // To guarantee that storage pool for a copy has enough physical // space to contain current allocated capacity of thin source volume haCapabilities.put( VirtualPoolCapabilityValuesWrapper.THIN_VOLUME_PRE_ALLOCATE_SIZE, BlockFullCopyUtils.getAllocatedCapacityForFullCopySource(srcHAVolume, _dbClient)); } List<Recommendation> recommendations = ((VPlexScheduler) _scheduler) .scheduleStorageForImport( srcVarray, vplexSystemURIS, haVarray, haVpool, haCapabilities); if (recommendations.isEmpty()) { throw APIException.badRequests.noStorageForHaVolumesForVplexVolumeCopies(); } // Prepare the HA volumes for the VPLEX volume copy. int copyIndex = 1; for (Recommendation recommendation : recommendations) { VPlexRecommendation haRecommendation = (VPlexRecommendation) recommendation; for (int i = 0; i < haRecommendation.getResourceCount(); i++) { // Determine the name for the HA volume copy. StringBuilder nameBuilder = new StringBuilder(name); nameBuilder.append("-1"); if (copyCount > 1) { nameBuilder.append("-"); nameBuilder.append(copyIndex++); } // Prepare the volume. Volume volume = VPlexBlockServiceApiImpl.prepareVolumeForRequest( size, vplexSystemProject, haVarray, haVpool, haRecommendation.getSourceDevice(), haRecommendation.getSourcePool(), nameBuilder.toString(), null, taskId, _dbClient); volume.addInternalFlags(Flag.INTERNAL_OBJECT); _dbClient.persistObject(volume); copyHAVolumes.add(volume); // Create the volume descriptor and add it to the passed list. VolumeDescriptor volumeDescriptor = new VolumeDescriptor( VolumeDescriptor.Type.BLOCK_DATA, volume.getStorageController(), volume.getId(), volume.getPool(), haCapabilities); volumeDescriptors.add(volumeDescriptor); } } return copyHAVolumes; }
/* * (non-Javadoc) * * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doCleanupMetaMembers(com.emc.storageos.db.client.model.StorageSystem, * com.emc.storageos.db.client.model.Volume, * com.emc.storageos.volumecontroller.impl.block.taskcompleter.CleanupMetaVolumeMembersCompleter) */ @Override public void doCleanupMetaMembers( StorageSystem storageSystem, Volume volume, CleanupMetaVolumeMembersCompleter cleanupCompleter) throws DeviceControllerException { // Remove meta member volumes from storage device try { log.info( String.format( "doCleanupMetaMembers Start - Array: %s, Volume: %s", storageSystem.getSerialNumber(), volume.getLabel())); // Load meta volume members from WF data String sourceStepId = cleanupCompleter.getSourceStepId(); HDSApiClient hdsApiClient = hdsApiFactory.getClient( HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getUsername(), storageSystem.getSmisPassword()); List<String> metaMembers = (ArrayList<String>) WorkflowService.getInstance().loadStepData(sourceStepId); if (metaMembers != null && !metaMembers.isEmpty()) { log.info( String.format( "doCleanupMetaMembers: Members stored for meta volume: %n %s", metaMembers)); // Check if volumes still exist in array and if it is not composite member (already // added to the meta volume) Set<String> volumeIds = new HashSet<String>(); for (String logicalUnitObjectId : metaMembers) { LogicalUnit logicalUnit = hdsApiClient.getLogicalUnitInfo( HDSUtils.getSystemObjectID(storageSystem), logicalUnitObjectId); if (logicalUnit != null) { log.debug( "doCleanupMetaMembers: Volume: " + logicalUnitObjectId + ", Usage of volume: " + logicalUnit.getComposite()); if (logicalUnit.getComposite() != HDSConstants.COMPOSITE_ELEMENT_MEMBER) { volumeIds.add(logicalUnitObjectId); } } } if (volumeIds.isEmpty()) { cleanupCompleter.setSuccess(true); log.info("doCleanupMetaMembers: No meta members to cleanup in array."); } else { log.info( String.format( "doCleanupMetaMembers: Members to cleanup in array: %n %s", volumeIds)); // Prepare parameters and call method to delete meta members from array HDSCleanupMetaVolumeMembersJob hdsJobCompleter = null; // When "cleanup" is separate workflow step, call async (for example rollback // step in volume expand) // Otherwise, call synchronously (for example when cleanup is part of meta // volume create rollback) String asyncMessageId = hdsApiClient.deleteThickLogicalUnits( HDSUtils.getSystemObjectID(storageSystem), volumeIds); if (cleanupCompleter.isWFStep()) { if (asyncMessageId != null) { ControllerServiceImpl.enqueueJob( new QueueJob( new HDSCleanupMetaVolumeMembersJob( asyncMessageId, storageSystem.getId(), volume.getId(), cleanupCompleter))); } } else { // invoke synchronously hdsJobCompleter = new HDSCleanupMetaVolumeMembersJob( asyncMessageId, storageSystem.getId(), volume.getId(), cleanupCompleter); ((HDSMetaVolumeOperations) metaVolumeOperations) .invokeMethodSynchronously(hdsApiFactory, asyncMessageId, hdsJobCompleter); } } } else { log.info( "doCleanupMetaMembers: No meta members stored for meta volume. Nothing to cleanup in array."); cleanupCompleter.setSuccess(true); } } catch (Exception e) { log.error("Problem in doCleanupMetaMembers: ", e); ServiceError error = DeviceControllerErrors.smis.methodFailed("doCleanupMetaMembers", e.getMessage()); cleanupCompleter.setError(error); cleanupCompleter.setSuccess(false); } log.info( String.format( "doCleanupMetaMembers End - Array: %s, Volume: %s", storageSystem.getSerialNumber(), volume.getLabel())); }
/** * Attaches volumes to initiators. * * @param storage the storage * @param volumes the volumes * @param initiators the initiators * @param volumeToTargetLunMap the volume to target lun map * @throws Exception the exception */ private void attachVolumesToInitiators( StorageSystem storage, List<Volume> volumes, List<Initiator> initiators, Map<URI, Integer> volumeToTargetLunMap, Map<Volume, Map<String, List<String>>> volumeToInitiatorTargetMap, ExportMask exportMask) throws Exception { CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storage.getActiveProviderURI(), dbClient); log.debug( "Getting the cinder APi for the provider with id {}", storage.getActiveProviderURI()); CinderApi cinderApi = cinderApiFactory.getApi(storage.getActiveProviderURI(), ep); List<Initiator> iSCSIInitiators = new ArrayList<Initiator>(); List<Initiator> fcInitiators = new ArrayList<Initiator>(); splitInitiatorsByProtocol(initiators, iSCSIInitiators, fcInitiators); String host = getHostNameFromInitiators(initiators); Map<String, String[]> mapSettingVsValues = getFCInitiatorsArray(fcInitiators); String[] fcInitiatorsWwpns = mapSettingVsValues.get(WWPNS); String[] fcInitiatorsWwnns = mapSettingVsValues.get(WWNNS); for (Volume volume : volumes) { // cinder generated volume ID String volumeId = volume.getNativeId(); int targetLunId = -1; VolumeAttachResponse attachResponse = null; // for iSCSI for (Initiator initiator : iSCSIInitiators) { String initiatorPort = initiator.getInitiatorPort(); log.debug( String.format( "Attaching volume %s ( %s ) to initiator %s on Openstack cinder node", volumeId, volume.getId(), initiatorPort)); attachResponse = cinderApi.attachVolume(volumeId, initiatorPort, null, null, host); log.info("Got response : {}", attachResponse.connection_info.toString()); targetLunId = attachResponse.connection_info.data.target_lun; } // for FC if (fcInitiatorsWwpns.length > 0) { log.debug( String.format( "Attaching volume %s ( %s ) to initiators %s on Openstack cinder node", volumeId, volume.getId(), fcInitiatorsWwpns)); attachResponse = cinderApi.attachVolume(volumeId, null, fcInitiatorsWwpns, fcInitiatorsWwnns, host); log.info("Got response : {}", attachResponse.connection_info.toString()); targetLunId = attachResponse.connection_info.data.target_lun; Map<String, List<String>> initTargetMap = attachResponse.connection_info.data.initiator_target_map; if (null != initTargetMap && !initTargetMap.isEmpty()) { volumeToInitiatorTargetMap.put( volume, attachResponse.connection_info.data.initiator_target_map); } } volumeToTargetLunMap.put(volume.getId(), targetLunId); // After the successful export, create or modify the storage ports CinderStoragePortOperations storagePortOperationsInstance = CinderStoragePortOperations.getInstance(storage, dbClient); storagePortOperationsInstance.invoke(attachResponse); } // Add ITLs to volume objects storeITLMappingInVolume(volumeToTargetLunMap, exportMask); }
/** * Updates the initiator to target list map in the export mask * * @throws Exception */ private void updateTargetsInExportMask( StorageSystem storage, Volume volume, Map<Volume, Map<String, List<String>>> volumeToInitiatorTargetMapFromAttachResponse, List<Initiator> fcInitiatorList, ExportMask exportMask) throws Exception { log.debug("START - updateTargetsInExportMask"); // ITLS for initiator URIs vs Target port URIs - This will be the final // filtered list to send for the zoning map update Map<URI, List<URI>> mapFilteredInitiatorURIVsTargetURIList = new HashMap<URI, List<URI>>(); // From the initiators list, construct the map of initiator WWNs to // their URIs Map<String, URI> initiatorsWWNVsURI = getWWNvsURIFCInitiatorsMap(fcInitiatorList); URI varrayURI = volume.getVirtualArray(); /* * Get the list of storage ports from the storage system which are * associated with the varray This will be map of storage port WWNs with * their URIs */ Map<String, URI> mapVarrayTaggedPortWWNVsURI = getVarrayTaggedStoragePortWWNs(storage, varrayURI); // List of WWN entries, used below for filtering the target port list // from attach response Set<String> varrayTaggedPortWWNs = mapVarrayTaggedPortWWNVsURI.keySet(); URI vpoolURI = volume.getVirtualPool(); VirtualPool vpool = dbClient.queryObject(VirtualPool.class, vpoolURI); int pathsPerInitiator = vpool.getPathsPerInitiator(); // Process the attach response output Set<Volume> volumeKeysSet = volumeToInitiatorTargetMapFromAttachResponse.keySet(); for (Volume volumeRes : volumeKeysSet) { log.info( String.format( "Processing attach response for the volume with URI %s and name %s", volumeRes.getId(), volumeRes.getLabel())); Map<String, List<String>> initiatorTargetMap = volumeToInitiatorTargetMapFromAttachResponse.get(volumeRes); Set<String> initiatorKeysSet = initiatorTargetMap.keySet(); for (String initiatorKey : initiatorKeysSet) { // The list of filtered target ports ( which are varray tagged ) // from the attach response List<String> filteredTargetList = filterTargetsFromResponse(varrayTaggedPortWWNs, initiatorTargetMap, initiatorKey); log.info( String.format( "For initiator %s accessible storage ports are %s ", initiatorKey, filteredTargetList.toString())); List<String> tmpTargetList = null; if (!isVplex(volumeRes)) { // For VPLEX - no path validations // Path validations are required only for the Host Exports tmpTargetList = checkPathsPerInitiator(pathsPerInitiator, filteredTargetList); if (null == tmpTargetList) { // Rollback case - throw the exception throw new Exception( String.format( "Paths per initiator criteria is not met for the initiator : %s " + " Target counts is: %s Expected paths per initiator is: %s", initiatorKey, String.valueOf(filteredTargetList.size()), String.valueOf(pathsPerInitiator))); } } else { tmpTargetList = filteredTargetList; } // Now populate URIs for the map to be returned - convert WWNs // to URIs populateInitiatorTargetURIMap( mapFilteredInitiatorURIVsTargetURIList, initiatorsWWNVsURI, mapVarrayTaggedPortWWNVsURI, initiatorKey, tmpTargetList); } // End initiator iteration } // End volume iteration // Clean all existing targets in the export mask and add new targets List<URI> storagePortListFromMask = StringSetUtil.stringSetToUriList(exportMask.getStoragePorts()); for (URI removeUri : storagePortListFromMask) { exportMask.removeTarget(removeUri); } exportMask.setStoragePorts(null); // Now add new target ports and populate the zoning map Set<URI> initiatorURIKeys = mapFilteredInitiatorURIVsTargetURIList.keySet(); for (URI initiatorURI : initiatorURIKeys) { List<URI> storagePortURIList = mapFilteredInitiatorURIVsTargetURIList.get(initiatorURI); for (URI portURI : storagePortURIList) { exportMask.addTarget(portURI); } } log.debug("END - updateTargetsInExportMask"); }
/* * (non-Javadoc) * * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doDeleteVolumes(com.emc.storageos.db.client.model.StorageSystem, * java.lang.String, java.util.List, com.emc.storageos.volumecontroller.TaskCompleter) */ @Override public void doDeleteVolumes( StorageSystem storageSystem, String opId, List<Volume> volumes, TaskCompleter taskCompleter) throws DeviceControllerException { try { StringBuilder logMsgBuilder = new StringBuilder( String.format("Delete Volume Start - Array:%s", storageSystem.getSerialNumber())); MultiVolumeTaskCompleter multiVolumeTaskCompleter = (MultiVolumeTaskCompleter) taskCompleter; Set<String> thickLogicalUnitIdList = new HashSet<String>(); Set<String> thinLogicalUnitIdList = new HashSet<String>(); HDSApiClient hdsApiClient = hdsApiFactory.getClient( HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword()); String systemObjectId = HDSUtils.getSystemObjectID(storageSystem); log.info("volumes size: {}", volumes.size()); for (Volume volume : volumes) { logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel())); String logicalUnitObjectId = HDSUtils.getLogicalUnitObjectId(volume.getNativeId(), storageSystem); LogicalUnit logicalUnit = hdsApiClient.getLogicalUnitInfo(systemObjectId, logicalUnitObjectId); if (logicalUnit == null) { // related volume state (if any) has been deleted. skip // processing, if already deleted from array. log.info(String.format("Volume %s already deleted: ", volume.getNativeId())); volume.setInactive(true); dbClient.persistObject(volume); VolumeTaskCompleter deleteTaskCompleter = multiVolumeTaskCompleter.skipTaskCompleter(volume.getId()); deleteTaskCompleter.ready(dbClient); continue; } if (volume.getThinlyProvisioned()) { thinLogicalUnitIdList.add(logicalUnitObjectId); } else { thickLogicalUnitIdList.add(logicalUnitObjectId); } } log.info(logMsgBuilder.toString()); if (!multiVolumeTaskCompleter.isVolumeTaskCompletersEmpty()) { if (null != thickLogicalUnitIdList && !thickLogicalUnitIdList.isEmpty()) { String asyncThickLUsJobId = hdsApiClient.deleteThickLogicalUnits(systemObjectId, thickLogicalUnitIdList); if (null != asyncThickLUsJobId) { ControllerServiceImpl.enqueueJob( new QueueJob( new HDSDeleteVolumeJob( asyncThickLUsJobId, volumes.get(0).getStorageController(), taskCompleter))); } } if (null != thinLogicalUnitIdList && !thinLogicalUnitIdList.isEmpty()) { String asyncThinHDSJobId = hdsApiClient.deleteThinLogicalUnits(systemObjectId, thinLogicalUnitIdList); // Not sure whether this really works as tracking two jobs // in single operation. if (null != asyncThinHDSJobId) { ControllerServiceImpl.enqueueJob( new QueueJob( new HDSDeleteVolumeJob( asyncThinHDSJobId, volumes.get(0).getStorageController(), taskCompleter))); } } } else { // If we are here, there are no volumes to delete, we have // invoked ready() for the VolumeDeleteCompleter, and told // the multiVolumeTaskCompleter to skip these completers. // In this case, the multiVolumeTaskCompleter complete() // method will not be invoked and the result is that the // workflow that initiated this delete request will never // be updated. So, here we just call complete() on the // multiVolumeTaskCompleter to ensure the workflow status is // updated. multiVolumeTaskCompleter.ready(dbClient); } } catch (Exception e) { log.error("Problem in doDeleteVolume: ", e); ServiceError error = DeviceControllerErrors.hds.methodFailed("doDeleteVolume", e.getMessage()); taskCompleter.error(dbClient, error); } StringBuilder logMsgBuilder = new StringBuilder( String.format("Delete Volume End - Array: %s", storageSystem.getSerialNumber())); for (Volume volume : volumes) { logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel())); } log.info(logMsgBuilder.toString()); }
/** * Update (if it exists) the source and target UnManagedVolume objects with RP information needed * for ingestion * * @param unManagedProtectionSet unmanaged protection set * @param cg CG response got back from RP system * @param rpCopyAccessStateMap Map to hold the access state of the replication sets * @param rpWwnToNativeWwn Map of RP volume WWN to native volume WWN - required for XIO but * harmless otherwise * @param storageNativeIdPrefixes List of XIO systems discovered in ViPR * @param dbClient DB client instance */ private void mapCgSourceAndTargets( UnManagedProtectionSet unManagedProtectionSet, GetCGsResponse cg, Map<String, String> rpCopyAccessStateMap, Map<String, String> rpWwnToNativeWwn, List<String> storageNativeIdPrefixes, DbClient dbClient) { for (GetRSetResponse rset : cg.getRsets()) { for (GetVolumeResponse volume : rset.getVolumes()) { // Find this volume in UnManagedVolumes based on wwn UnManagedVolume unManagedVolume = findUnManagedVolumeForWwn(volume.getWwn(), dbClient, storageNativeIdPrefixes); // Check if this volume is already managed, which would indicate it has already been // partially ingested Volume managedVolume = DiscoveryUtils.checkManagedVolumeExistsInDBByWwn(dbClient, volume.getWwn()); // Add the WWN to the unmanaged protection set, regardless of whether this volume is // unmanaged or not. unManagedProtectionSet.getVolumeWwns().add(volume.getWwn()); if (null == unManagedVolume && null == managedVolume) { log.info( "Protection Set {} contains unknown Replication Set volume: {}. Skipping.", unManagedProtectionSet.getNativeGuid(), volume.getWwn()); continue; } if (null != managedVolume) { log.info( "Protection Set {} contains volume {} that is already managed", unManagedProtectionSet.getNativeGuid(), volume.getWwn()); // make sure it's in the UnManagedProtectionSet's ManagedVolume ids if (!unManagedProtectionSet .getManagedVolumeIds() .contains(managedVolume.getId().toString())) { unManagedProtectionSet.getManagedVolumeIds().add(managedVolume.getId().toString()); } if (!managedVolume.checkInternalFlags(Flag.INTERNAL_OBJECT) && null != unManagedVolume) { log.info( "Protection Set {} also has an orphaned UnManagedVolume {} that will be removed", unManagedProtectionSet.getNativeGuid(), unManagedVolume.getLabel()); // remove the unManagedVolume from the UnManagedProtectionSet's UnManagedVolume ids unManagedProtectionSet .getUnManagedVolumeIds() .remove(unManagedVolume.getId().toString()); unManagedVolumesToDelete.add(unManagedVolume); // because this volume is already managed, we can just continue to the next continue; } } // at this point, we have an legitimate UnManagedVolume whose RP properties should be // updated log.info("Processing Replication Set UnManagedVolume {}", unManagedVolume.forDisplay()); // Add the unmanaged volume to the list (if it's not there already) if (!unManagedProtectionSet .getUnManagedVolumeIds() .contains(unManagedVolume.getId().toString())) { unManagedProtectionSet.getUnManagedVolumeIds().add(unManagedVolume.getId().toString()); } // Update the fields in the UnManagedVolume to reflect RP characteristics String personality = Volume.PersonalityTypes.SOURCE.name(); if (!volume.isProduction()) { personality = Volume.PersonalityTypes.TARGET.name(); } updateCommonRPProperties( unManagedProtectionSet, unManagedVolume, personality, volume, dbClient); // Update other RP properties for source/targets // What Replication Set does this volume belong to? (so we can associate sources to // targets.) // What is the access state. StringSet rpAccessState = new StringSet(); rpAccessState.add(rpCopyAccessStateMap.get(volume.getRpCopyName())); unManagedVolume.putVolumeInfo( SupportedVolumeInformation.RP_ACCESS_STATE.toString(), rpAccessState); StringSet rsetName = new StringSet(); rsetName.add(rset.getName()); unManagedVolume.putVolumeInfo(SupportedVolumeInformation.RP_RSET_NAME.toString(), rsetName); rpWwnToNativeWwn.put(volume.getWwn(), unManagedVolume.getWwn()); unManagedVolumesToUpdateByWwn.put(unManagedVolume.getWwn(), unManagedVolume); } // Now that we've processed all of the sources and targets, we can mark all of the target // devices in the source devices. for (GetVolumeResponse volume : rset.getVolumes()) { // Only process source volumes here. if (!volume.isProduction()) { continue; } // Find this volume in UnManagedVolumes based on wwn // See if the unmanaged volume is in the list of volumes to update // (it should be, unless the backing array has not been discovered) UnManagedVolume unManagedVolume = null; String wwn = rpWwnToNativeWwn.get(volume.getWwn()); if (wwn != null) { unManagedVolume = findUnManagedVolumeForWwn(wwn, dbClient, storageNativeIdPrefixes); } if (null == unManagedVolume) { log.info( "Protection Set {} contains unknown volume: {}. Skipping.", unManagedProtectionSet.getNativeGuid(), volume.getWwn()); continue; } log.info("Linking target volumes to source volume {}", unManagedVolume.forDisplay()); StringSet rpTargetVolumeIds = linkTargetVolumes( unManagedProtectionSet, unManagedVolume, rset, rpWwnToNativeWwn, storageNativeIdPrefixes, dbClient); // Add the unmanaged target IDs to the source unmanaged volume unManagedVolume.putVolumeInfo( SupportedVolumeInformation.RP_UNMANAGED_TARGET_VOLUMES.toString(), rpTargetVolumeIds); unManagedVolumesToUpdateByWwn.put(unManagedVolume.getWwn(), unManagedVolume); } } }
private void createOrUpdateVcenterCluster( boolean createCluster, AsyncTask task, URI clusterUri, URI[] addHostUris, URI[] removeHostUris, URI[] volumeUris) throws InternalException { TaskCompleter completer = null; try { _log.info( "createOrUpdateVcenterCluster " + createCluster + " " + task + " " + clusterUri + " " + addHostUris + " " + removeHostUris); if (task == null) { _log.error("AsyncTask is null"); throw new Exception("AsyncTask is null"); } URI vcenterDataCenterId = task._id; VcenterDataCenter vcenterDataCenter = _dbClient.queryObject(VcenterDataCenter.class, vcenterDataCenterId); if (clusterUri == null) { _log.error("Cluster URI is null"); throw new Exception("Cluster URI is null"); } Cluster cluster = _dbClient.queryObject(Cluster.class, clusterUri); Vcenter vcenter = _dbClient.queryObject(Vcenter.class, vcenterDataCenter.getVcenter()); _log.info( "Request to create or update cluster " + vcenter.getIpAddress() + "/" + vcenterDataCenter.getLabel() + "/" + cluster.getLabel()); Collection<Host> addHosts = new ArrayList<Host>(); if (addHostUris == null || addHostUris.length == 0) { _log.info("Add host URIs is null or empty - Cluster will be created without hosts"); } else { for (URI hostUri : addHostUris) { _log.info("createOrUpdateVcenterCluster " + clusterUri + " with add host " + hostUri); } addHosts = _dbClient.queryObject(Host.class, addHostUris); } Collection<Host> removeHosts = new ArrayList<Host>(); if (removeHostUris == null || removeHostUris.length == 0) { _log.info("Remove host URIs is null or empty - Cluster will have no removed hosts"); } else { for (URI hostUri : removeHostUris) { _log.info("createOrUpdateVcenterCluster " + clusterUri + " with remove host " + hostUri); } removeHosts = _dbClient.queryObject(Host.class, removeHostUris); } Collection<Volume> volumes = new ArrayList<Volume>(); if (volumeUris == null || volumeUris.length == 0) { _log.info("Volume URIs is null or empty - Cluster will be created without datastores"); } else { for (URI volumeUri : volumeUris) { _log.info("createOrUpdateVcenterCluster " + clusterUri + " with volume " + volumeUri); } volumes = _dbClient.queryObject(Volume.class, volumeUris); } completer = new VcenterClusterCompleter( vcenterDataCenterId, task._opId, OperationTypeEnum.CREATE_UPDATE_VCENTER_CLUSTER, "VCENTER_CONTROLLER"); Workflow workflow = _workflowService.getNewWorkflow( this, "CREATE_UPDATE_VCENTER_CLUSTER_WORKFLOW", true, task._opId); String clusterStep = workflow.createStep( "CREATE_UPDATE_VCENTER_CLUSTER_STEP", String.format( "vCenter cluster operation in vCenter datacenter %s", vcenterDataCenterId), null, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method( "createUpdateVcenterClusterOperation", createCluster, vcenter.getId(), vcenterDataCenter.getId(), cluster.getId()), null, null); String lastStep = clusterStep; if (removeHosts.size() > 0) { for (Host host : removeHosts) { String hostStep = workflow.createStep( "VCENTER_CLUSTER_REMOVE_HOST", String.format("vCenter cluster remove host operation %s", host.getId()), clusterStep, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method( "vcenterClusterRemoveHostOperation", vcenter.getId(), vcenterDataCenter.getId(), cluster.getId(), host.getId()), null, null); lastStep = hostStep; // add host will wait on last of these } } if (addHosts.size() > 0) { for (Host host : addHosts) { String hostStep = workflow.createStep( "VCENTER_CLUSTER_ADD_HOST", String.format("vCenter cluster add host operation %s", host.getId()), lastStep, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method( "vcenterClusterAddHostOperation", vcenter.getId(), vcenterDataCenter.getId(), cluster.getId(), host.getId()), null, null); } if (volumes.size() > 0) { // Once all hosts in cluster select a host to use for shared storage operations String selectHostForStorageOperationsStep = workflow.createStep( "VCENTER_CLUSTER_SELECT_HOST", String.format( "vCenter cluster select host for storage operations operation vCenter datacenter %s", vcenterDataCenterId), "VCENTER_CLUSTER_ADD_HOST", vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method( "vcenterClusterSelectHostOperation", vcenter.getId(), vcenterDataCenter.getId(), cluster.getId(), addHostUris), null, null); // Do not run datastore creation in parallel // First datastore waits on selectHostForStorageOperationsStep step then next wait on the // previous datastore operation String volumeStep = null; for (Volume volume : volumes) { volumeStep = workflow.createStep( "VCENTER_CLUSTER_CREATE_DATASTORE", String.format("vCenter cluster create datastore operation %s", volume.getId()), volumeStep == null ? selectHostForStorageOperationsStep : volumeStep, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method( "vcenterClusterCreateDatastoreOperation", vcenter.getId(), vcenterDataCenter.getId(), cluster.getId(), volume.getId(), selectHostForStorageOperationsStep), null, null); } } } workflow.executePlan(completer, "Success"); } catch (Exception e) { _log.error("createOrUpdateVcenterCluster caught an exception.", e); ServiceError serviceError = DeviceControllerException.errors.jobFailed(e); completer.error(_dbClient, serviceError); } }
/** {@inheritDoc} */ @Override public TaskList create( List<BlockObject> fcSourceObjList, VirtualArray varray, String name, boolean createInactive, int count, String taskId) { // Populate the descriptors list with all volumes required // to create the VPLEX volume copies. int sourceCounter = 0; URI vplexSrcSystemId = null; TaskList taskList = new TaskList(); List<Volume> vplexCopyVolumes = new ArrayList<Volume>(); List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>(); List<BlockObject> sortedSourceObjectList = sortFullCopySourceList(fcSourceObjList); for (BlockObject fcSourceObj : sortedSourceObjectList) { URI fcSourceURI = fcSourceObj.getId(); if (URIUtil.isType(fcSourceURI, BlockSnapshot.class)) { // Full copy of snapshots is not supported for VPLEX. return super.create(sortedSourceObjectList, varray, name, createInactive, count, taskId); } Volume vplexSrcVolume = (Volume) fcSourceObj; String copyName = name + (sortedSourceObjectList.size() > 1 ? "-" + ++sourceCounter : ""); // Create a volume descriptor for the source VPLEX volume being copied. // and add it to the descriptors list. Be sure to identify this VPLEX // volume as the source volume being copied. vplexSrcSystemId = fcSourceObj.getStorageController(); VolumeDescriptor vplexSrcVolumeDescr = new VolumeDescriptor( VolumeDescriptor.Type.VPLEX_VIRT_VOLUME, vplexSrcSystemId, fcSourceURI, null, null); Map<String, Object> descrParams = new HashMap<String, Object>(); descrParams.put(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID, Boolean.TRUE); vplexSrcVolumeDescr.setParameters(descrParams); volumeDescriptors.add(vplexSrcVolumeDescr); // Get some info about the VPLEX volume being copied and its storage // system. Project vplexSrcProject = BlockFullCopyUtils.queryFullCopySourceProject(fcSourceObj, _dbClient); StorageSystem vplexSrcSystem = _dbClient.queryObject(StorageSystem.class, vplexSrcSystemId); Project vplexSystemProject = VPlexBlockServiceApiImpl.getVplexProject(vplexSrcSystem, _dbClient, _tenantsService); // For the VPLEX volume being copied, determine which of the associated // backend volumes is the primary and, for distributed volumes, which // is the HA volume. The primary volume will be natively copied and we // we need to place and prepare a volume to hold the copy. This copy // will be the primary backend volume for the VPLEX volume copy. For // a distributed virtual volume, we will need to place and prepare // a volume to hold the HA volume of the VPLEX volume copy. Volume vplexSrcPrimaryVolume = null; Volume vplexSrcHAVolume = null; StringSet assocVolumeURIs = vplexSrcVolume.getAssociatedVolumes(); Iterator<String> assocVolumeURIsIter = assocVolumeURIs.iterator(); while (assocVolumeURIsIter.hasNext()) { URI assocVolumeURI = URI.create(assocVolumeURIsIter.next()); Volume assocVolume = _dbClient.queryObject(Volume.class, assocVolumeURI); if (assocVolume.getVirtualArray().toString().equals(varray.getId().toString())) { vplexSrcPrimaryVolume = assocVolume; } else { vplexSrcHAVolume = assocVolume; } } // Get the capabilities VirtualPool vpool = BlockFullCopyUtils.queryFullCopySourceVPool(fcSourceObj, _dbClient); VirtualPoolCapabilityValuesWrapper capabilities = getCapabilitiesForFullCopyCreate(fcSourceObj, vpool, count); // Get the number of copies to create and the size of the volumes. // Note that for the size, we must use the actual provisioned size // of the source side backend volume. The size passed in the // capabilities will be the size of the VPLEX volume. When the // source side backend volume for the copy is provisioned, you // might not get that actual size. On VMAX, the size will be slightly // larger while for VNX the size will be exactly what is requested. // So, if the source side is a VMAX, the source side for the copy // will be slightly larger than the size in the capabilities. If the HA // side is VNX and we use the size in the capabilities, then you will // get exactly that size for the HA backend volume. As a result, source // side backend volume for the copy will be slightly larger than the // HA side. Now the way a VPLEX copy is made is it uses native full // copy to create a native full copy of the source side backend // volume. It then provisions the HA side volume. The new source side // backend copy is then imported into VPLEX in the same way as is done // for a vpool change that imports a volume to VPLEX. This code in the // VPLEX controller creates a local VPLEX volume using the source side // copy and for a distributed volume it then attaches as a remote // mirror the HA backend volume that is provisioned. If the HA volume // is slightly smaller, then this will fail on the VPLEX. So, we must // ensure that HA side volume is big enough by using the provisioned // capacity of the source side backend volume of the VPLEX volume being // copied. long size = vplexSrcPrimaryVolume.getProvisionedCapacity(); // Place and prepare a volume for each copy to serve as a native // copy of a VPLEX backend volume. The VPLEX backend volume that // is copied is the backend volume in the same virtual array as the // VPLEX volume i.e, the primary backend volume. Create // descriptors for these prepared volumes and add them to the list. List<Volume> vplexCopyPrimaryVolumes = prepareFullCopyPrimaryVolumes( copyName, count, vplexSrcPrimaryVolume, capabilities, volumeDescriptors); // If the VPLEX volume being copied is distributed, then the VPLEX // HA volume should be non-null. We use the VPLEX scheduler to place // and then prepare volumes for the HA volumes of the VPLEX volume // copies. This should be done in the same manner as is done for the // import volume routine. This is because to form the VPLEX volume // copy we import the copy of the primary backend volume. List<Volume> vplexCopyHAVolumes = new ArrayList<Volume>(); if (vplexSrcHAVolume != null) { vplexCopyHAVolumes.addAll( prepareFullCopyHAVolumes( copyName, count, size, vplexSrcSystem, vplexSystemProject, varray, vplexSrcHAVolume, taskId, volumeDescriptors)); } // For each copy to be created, place and prepare a volume for the // primary backend volume copy. When copying a distributed VPLEX // volume, we also must place and prepare a volume for the HA // backend volume copy. Lastly, we must prepare a volume for the // VPLEX volume copy. Create descriptors for these prepared volumes // and add them to the volume descriptors list. for (int i = 0; i < count; i++) { // Prepare a new VPLEX volume for each copy. Volume vplexCopyPrimaryVolume = vplexCopyPrimaryVolumes.get(i); Volume vplexCopyHAVolume = null; if (vplexCopyHAVolumes.size() != 0) { vplexCopyHAVolume = vplexCopyHAVolumes.get(i); } Volume vplexCopyVolume = prepareFullCopyVPlexVolume( copyName, count, i, size, vplexSrcVolume, vplexSrcProject, varray, vpool, vplexSrcSystemId, vplexCopyPrimaryVolume, vplexCopyHAVolume, taskId, volumeDescriptors); vplexCopyVolumes.add(vplexCopyVolume); // Create task for each copy. Operation op = vplexCopyVolume.getOpStatus().get(taskId); TaskResourceRep task = toTask(vplexCopyVolume, taskId, op); taskList.getTaskList().add(task); } } // Invoke the VPLEX controller to create the copies. try { s_logger.info("Getting VPlex controller {}.", taskId); VPlexController controller = getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString()); // TBD controller needs to be updated to handle CGs. controller.createFullCopy(vplexSrcSystemId, volumeDescriptors, taskId); s_logger.info("Successfully invoked controller."); } catch (InternalException e) { s_logger.error("Controller error", e); // Update the status for the VPLEX volume copies and their // corresponding tasks. for (Volume vplexCopyVolume : vplexCopyVolumes) { Operation op = vplexCopyVolume.getOpStatus().get(taskId); if (op != null) { op.error(e); vplexCopyVolume.getOpStatus().updateTaskStatus(taskId, op); _dbClient.persistObject(vplexCopyVolume); for (TaskResourceRep task : taskList.getTaskList()) { if (task.getResource().getId().equals(vplexCopyVolume.getId())) { task.setState(op.getStatus()); task.setMessage(op.getMessage()); break; } } } } // Mark all volumes inactive, except for the VPLEX volume // we were trying to copy. for (VolumeDescriptor descriptor : volumeDescriptors) { if (descriptor.getParameters().get(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID) == null) { Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI()); volume.setInactive(true); _dbClient.persistObject(volume); } } } return taskList; }