/* * (non-Javadoc) * * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doCreateVolumes(com.emc.storageos.db.client.model.StorageSystem, * com.emc.storageos.db.client.model.StoragePool, java.lang.String, java.util.List, * com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper, com.emc.storageos.volumecontroller.TaskCompleter) */ @Override public void doCreateVolumes( StorageSystem storageSystem, StoragePool storagePool, String opId, List<Volume> volumes, VirtualPoolCapabilityValuesWrapper capabilities, TaskCompleter taskCompleter) throws DeviceControllerException { String label = null; Long capacity = null; boolean isThinVolume = false; boolean opCreationFailed = false; StringBuilder logMsgBuilder = new StringBuilder( String.format( "Create Volume Start - Array:%s, Pool:%s", storageSystem.getSerialNumber(), storagePool.getNativeGuid())); for (Volume volume : volumes) { logMsgBuilder.append( String.format( "%nVolume:%s , IsThinlyProvisioned: %s", volume.getLabel(), volume.getThinlyProvisioned())); if ((label == null) && (volumes.size() == 1)) { String tenantName = ""; try { TenantOrg tenant = dbClient.queryObject(TenantOrg.class, volume.getTenant().getURI()); tenantName = tenant.getLabel(); } catch (DatabaseException e) { log.error("Error lookup TenantOrb object", e); } label = nameGenerator.generate( tenantName, volume.getLabel(), volume.getId().toString(), '-', HDSConstants.MAX_VOLUME_NAME_LENGTH); } if (capacity == null) { capacity = volume.getCapacity(); } isThinVolume = volume.getThinlyProvisioned(); } log.info(logMsgBuilder.toString()); try { multiVolumeCheckForHitachiModel(volumes, storageSystem); HDSApiClient hdsApiClient = hdsApiFactory.getClient( HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword()); String systemObjectID = HDSUtils.getSystemObjectID(storageSystem); String poolObjectID = HDSUtils.getPoolObjectID(storagePool); String asyncTaskMessageId = null; // isThinVolume = true, creates VirtualVolumes // isThinVolume = false, creates LogicalUnits if (isThinVolume) { asyncTaskMessageId = hdsApiClient.createThinVolumes( systemObjectID, storagePool.getNativeId(), capacity, volumes.size(), label, QUICK_FORMAT_TYPE, storageSystem.getModel()); } else if (!isThinVolume) { asyncTaskMessageId = hdsApiClient.createThickVolumes( systemObjectID, poolObjectID, capacity, volumes.size(), label, null, storageSystem.getModel(), null); } if (asyncTaskMessageId != null) { HDSJob createHDSJob = (volumes.size() > 1) ? new HDSCreateMultiVolumeJob( asyncTaskMessageId, volumes.get(0).getStorageController(), storagePool.getId(), volumes.size(), taskCompleter) : new HDSCreateVolumeJob( asyncTaskMessageId, volumes.get(0).getStorageController(), storagePool.getId(), taskCompleter); ControllerServiceImpl.enqueueJob(new QueueJob(createHDSJob)); } } catch (final InternalException e) { log.error("Problem in doCreateVolumes: ", e); opCreationFailed = true; taskCompleter.error(dbClient, e); } catch (final Exception e) { log.error("Problem in doCreateVolumes: ", e); opCreationFailed = true; ServiceError serviceError = DeviceControllerErrors.hds.methodFailed("doCreateVolumes", e.getMessage()); taskCompleter.error(dbClient, serviceError); } if (opCreationFailed) { for (Volume vol : volumes) { vol.setInactive(true); dbClient.persistObject(vol); } } logMsgBuilder = new StringBuilder( String.format( "Create Volumes End - Array:%s, Pool:%s", storageSystem.getSerialNumber(), storagePool.getNativeGuid())); for (Volume volume : volumes) { logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel())); } log.info(logMsgBuilder.toString()); }
/* * (non-Javadoc) * * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doDeleteVolumes(com.emc.storageos.db.client.model.StorageSystem, * java.lang.String, java.util.List, com.emc.storageos.volumecontroller.TaskCompleter) */ @Override public void doDeleteVolumes( StorageSystem storageSystem, String opId, List<Volume> volumes, TaskCompleter taskCompleter) throws DeviceControllerException { try { StringBuilder logMsgBuilder = new StringBuilder( String.format("Delete Volume Start - Array:%s", storageSystem.getSerialNumber())); MultiVolumeTaskCompleter multiVolumeTaskCompleter = (MultiVolumeTaskCompleter) taskCompleter; Set<String> thickLogicalUnitIdList = new HashSet<String>(); Set<String> thinLogicalUnitIdList = new HashSet<String>(); HDSApiClient hdsApiClient = hdsApiFactory.getClient( HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword()); String systemObjectId = HDSUtils.getSystemObjectID(storageSystem); log.info("volumes size: {}", volumes.size()); for (Volume volume : volumes) { logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel())); String logicalUnitObjectId = HDSUtils.getLogicalUnitObjectId(volume.getNativeId(), storageSystem); LogicalUnit logicalUnit = hdsApiClient.getLogicalUnitInfo(systemObjectId, logicalUnitObjectId); if (logicalUnit == null) { // related volume state (if any) has been deleted. skip // processing, if already deleted from array. log.info(String.format("Volume %s already deleted: ", volume.getNativeId())); volume.setInactive(true); dbClient.persistObject(volume); VolumeTaskCompleter deleteTaskCompleter = multiVolumeTaskCompleter.skipTaskCompleter(volume.getId()); deleteTaskCompleter.ready(dbClient); continue; } if (volume.getThinlyProvisioned()) { thinLogicalUnitIdList.add(logicalUnitObjectId); } else { thickLogicalUnitIdList.add(logicalUnitObjectId); } } log.info(logMsgBuilder.toString()); if (!multiVolumeTaskCompleter.isVolumeTaskCompletersEmpty()) { if (null != thickLogicalUnitIdList && !thickLogicalUnitIdList.isEmpty()) { String asyncThickLUsJobId = hdsApiClient.deleteThickLogicalUnits(systemObjectId, thickLogicalUnitIdList); if (null != asyncThickLUsJobId) { ControllerServiceImpl.enqueueJob( new QueueJob( new HDSDeleteVolumeJob( asyncThickLUsJobId, volumes.get(0).getStorageController(), taskCompleter))); } } if (null != thinLogicalUnitIdList && !thinLogicalUnitIdList.isEmpty()) { String asyncThinHDSJobId = hdsApiClient.deleteThinLogicalUnits(systemObjectId, thinLogicalUnitIdList); // Not sure whether this really works as tracking two jobs // in single operation. if (null != asyncThinHDSJobId) { ControllerServiceImpl.enqueueJob( new QueueJob( new HDSDeleteVolumeJob( asyncThinHDSJobId, volumes.get(0).getStorageController(), taskCompleter))); } } } else { // If we are here, there are no volumes to delete, we have // invoked ready() for the VolumeDeleteCompleter, and told // the multiVolumeTaskCompleter to skip these completers. // In this case, the multiVolumeTaskCompleter complete() // method will not be invoked and the result is that the // workflow that initiated this delete request will never // be updated. So, here we just call complete() on the // multiVolumeTaskCompleter to ensure the workflow status is // updated. multiVolumeTaskCompleter.ready(dbClient); } } catch (Exception e) { log.error("Problem in doDeleteVolume: ", e); ServiceError error = DeviceControllerErrors.hds.methodFailed("doDeleteVolume", e.getMessage()); taskCompleter.error(dbClient, error); } StringBuilder logMsgBuilder = new StringBuilder( String.format("Delete Volume End - Array: %s", storageSystem.getSerialNumber())); for (Volume volume : volumes) { logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel())); } log.info(logMsgBuilder.toString()); }
/** {@inheritDoc} */ @Override public TaskList create( List<BlockObject> fcSourceObjList, VirtualArray varray, String name, boolean createInactive, int count, String taskId) { // Populate the descriptors list with all volumes required // to create the VPLEX volume copies. int sourceCounter = 0; URI vplexSrcSystemId = null; TaskList taskList = new TaskList(); List<Volume> vplexCopyVolumes = new ArrayList<Volume>(); List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>(); List<BlockObject> sortedSourceObjectList = sortFullCopySourceList(fcSourceObjList); for (BlockObject fcSourceObj : sortedSourceObjectList) { URI fcSourceURI = fcSourceObj.getId(); if (URIUtil.isType(fcSourceURI, BlockSnapshot.class)) { // Full copy of snapshots is not supported for VPLEX. return super.create(sortedSourceObjectList, varray, name, createInactive, count, taskId); } Volume vplexSrcVolume = (Volume) fcSourceObj; String copyName = name + (sortedSourceObjectList.size() > 1 ? "-" + ++sourceCounter : ""); // Create a volume descriptor for the source VPLEX volume being copied. // and add it to the descriptors list. Be sure to identify this VPLEX // volume as the source volume being copied. vplexSrcSystemId = fcSourceObj.getStorageController(); VolumeDescriptor vplexSrcVolumeDescr = new VolumeDescriptor( VolumeDescriptor.Type.VPLEX_VIRT_VOLUME, vplexSrcSystemId, fcSourceURI, null, null); Map<String, Object> descrParams = new HashMap<String, Object>(); descrParams.put(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID, Boolean.TRUE); vplexSrcVolumeDescr.setParameters(descrParams); volumeDescriptors.add(vplexSrcVolumeDescr); // Get some info about the VPLEX volume being copied and its storage // system. Project vplexSrcProject = BlockFullCopyUtils.queryFullCopySourceProject(fcSourceObj, _dbClient); StorageSystem vplexSrcSystem = _dbClient.queryObject(StorageSystem.class, vplexSrcSystemId); Project vplexSystemProject = VPlexBlockServiceApiImpl.getVplexProject(vplexSrcSystem, _dbClient, _tenantsService); // For the VPLEX volume being copied, determine which of the associated // backend volumes is the primary and, for distributed volumes, which // is the HA volume. The primary volume will be natively copied and we // we need to place and prepare a volume to hold the copy. This copy // will be the primary backend volume for the VPLEX volume copy. For // a distributed virtual volume, we will need to place and prepare // a volume to hold the HA volume of the VPLEX volume copy. Volume vplexSrcPrimaryVolume = null; Volume vplexSrcHAVolume = null; StringSet assocVolumeURIs = vplexSrcVolume.getAssociatedVolumes(); Iterator<String> assocVolumeURIsIter = assocVolumeURIs.iterator(); while (assocVolumeURIsIter.hasNext()) { URI assocVolumeURI = URI.create(assocVolumeURIsIter.next()); Volume assocVolume = _dbClient.queryObject(Volume.class, assocVolumeURI); if (assocVolume.getVirtualArray().toString().equals(varray.getId().toString())) { vplexSrcPrimaryVolume = assocVolume; } else { vplexSrcHAVolume = assocVolume; } } // Get the capabilities VirtualPool vpool = BlockFullCopyUtils.queryFullCopySourceVPool(fcSourceObj, _dbClient); VirtualPoolCapabilityValuesWrapper capabilities = getCapabilitiesForFullCopyCreate(fcSourceObj, vpool, count); // Get the number of copies to create and the size of the volumes. // Note that for the size, we must use the actual provisioned size // of the source side backend volume. The size passed in the // capabilities will be the size of the VPLEX volume. When the // source side backend volume for the copy is provisioned, you // might not get that actual size. On VMAX, the size will be slightly // larger while for VNX the size will be exactly what is requested. // So, if the source side is a VMAX, the source side for the copy // will be slightly larger than the size in the capabilities. If the HA // side is VNX and we use the size in the capabilities, then you will // get exactly that size for the HA backend volume. As a result, source // side backend volume for the copy will be slightly larger than the // HA side. Now the way a VPLEX copy is made is it uses native full // copy to create a native full copy of the source side backend // volume. It then provisions the HA side volume. The new source side // backend copy is then imported into VPLEX in the same way as is done // for a vpool change that imports a volume to VPLEX. This code in the // VPLEX controller creates a local VPLEX volume using the source side // copy and for a distributed volume it then attaches as a remote // mirror the HA backend volume that is provisioned. If the HA volume // is slightly smaller, then this will fail on the VPLEX. So, we must // ensure that HA side volume is big enough by using the provisioned // capacity of the source side backend volume of the VPLEX volume being // copied. long size = vplexSrcPrimaryVolume.getProvisionedCapacity(); // Place and prepare a volume for each copy to serve as a native // copy of a VPLEX backend volume. The VPLEX backend volume that // is copied is the backend volume in the same virtual array as the // VPLEX volume i.e, the primary backend volume. Create // descriptors for these prepared volumes and add them to the list. List<Volume> vplexCopyPrimaryVolumes = prepareFullCopyPrimaryVolumes( copyName, count, vplexSrcPrimaryVolume, capabilities, volumeDescriptors); // If the VPLEX volume being copied is distributed, then the VPLEX // HA volume should be non-null. We use the VPLEX scheduler to place // and then prepare volumes for the HA volumes of the VPLEX volume // copies. This should be done in the same manner as is done for the // import volume routine. This is because to form the VPLEX volume // copy we import the copy of the primary backend volume. List<Volume> vplexCopyHAVolumes = new ArrayList<Volume>(); if (vplexSrcHAVolume != null) { vplexCopyHAVolumes.addAll( prepareFullCopyHAVolumes( copyName, count, size, vplexSrcSystem, vplexSystemProject, varray, vplexSrcHAVolume, taskId, volumeDescriptors)); } // For each copy to be created, place and prepare a volume for the // primary backend volume copy. When copying a distributed VPLEX // volume, we also must place and prepare a volume for the HA // backend volume copy. Lastly, we must prepare a volume for the // VPLEX volume copy. Create descriptors for these prepared volumes // and add them to the volume descriptors list. for (int i = 0; i < count; i++) { // Prepare a new VPLEX volume for each copy. Volume vplexCopyPrimaryVolume = vplexCopyPrimaryVolumes.get(i); Volume vplexCopyHAVolume = null; if (vplexCopyHAVolumes.size() != 0) { vplexCopyHAVolume = vplexCopyHAVolumes.get(i); } Volume vplexCopyVolume = prepareFullCopyVPlexVolume( copyName, count, i, size, vplexSrcVolume, vplexSrcProject, varray, vpool, vplexSrcSystemId, vplexCopyPrimaryVolume, vplexCopyHAVolume, taskId, volumeDescriptors); vplexCopyVolumes.add(vplexCopyVolume); // Create task for each copy. Operation op = vplexCopyVolume.getOpStatus().get(taskId); TaskResourceRep task = toTask(vplexCopyVolume, taskId, op); taskList.getTaskList().add(task); } } // Invoke the VPLEX controller to create the copies. try { s_logger.info("Getting VPlex controller {}.", taskId); VPlexController controller = getController(VPlexController.class, DiscoveredDataObject.Type.vplex.toString()); // TBD controller needs to be updated to handle CGs. controller.createFullCopy(vplexSrcSystemId, volumeDescriptors, taskId); s_logger.info("Successfully invoked controller."); } catch (InternalException e) { s_logger.error("Controller error", e); // Update the status for the VPLEX volume copies and their // corresponding tasks. for (Volume vplexCopyVolume : vplexCopyVolumes) { Operation op = vplexCopyVolume.getOpStatus().get(taskId); if (op != null) { op.error(e); vplexCopyVolume.getOpStatus().updateTaskStatus(taskId, op); _dbClient.persistObject(vplexCopyVolume); for (TaskResourceRep task : taskList.getTaskList()) { if (task.getResource().getId().equals(vplexCopyVolume.getId())) { task.setState(op.getStatus()); task.setMessage(op.getMessage()); break; } } } } // Mark all volumes inactive, except for the VPLEX volume // we were trying to copy. for (VolumeDescriptor descriptor : volumeDescriptors) { if (descriptor.getParameters().get(VolumeDescriptor.PARAM_IS_COPY_SOURCE_ID) == null) { Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI()); volume.setInactive(true); _dbClient.persistObject(volume); } } } return taskList; }