/** * Checks the UnManaged Volume's policy with vPool's policy. * * @param vPool the vPool * @param autoTierPolicyId the auto tier policy id on unmanaged volume * @param system the system * @return true, if matching, false otherwise */ public static boolean checkVPoolValidForUnManagedVolumeAutoTieringPolicy( VirtualPool vPool, String autoTierPolicyId, StorageSystem system) { _log.debug("Policy Id: {}, vPool: {}", autoTierPolicyId, vPool); boolean policyMatching = false; String policyIdfromVPool = vPool.getAutoTierPolicyName(); if (autoTierPolicyId != null) { if (policyIdfromVPool != null) { if (vPool.getUniquePolicyNames() || DiscoveredDataObject.Type.vnxblock.name().equalsIgnoreCase(system.getSystemType())) { // Unique Policy names field will not be set for VNX. vPool will have policy name, not the // policy's nativeGuid policyIdfromVPool = NativeGUIDGenerator.generateAutoTierPolicyNativeGuid( system.getNativeGuid(), policyIdfromVPool, NativeGUIDGenerator.getTieringPolicyKeyForSystem(system)); _log.debug("Policy Id generated: {}", policyIdfromVPool); } if (autoTierPolicyId.equalsIgnoreCase(policyIdfromVPool)) { policyMatching = true; } } } else if ((policyIdfromVPool == null) || (policyIdfromVPool.equalsIgnoreCase("none"))) { // if policy is not set in both unmanaged volume and vPool. Note // that the value in the vpool could be set to "none". policyMatching = true; } // Default policy for VNX - match volume with default policy to vPool with no policy as well if (!policyMatching && DiscoveredDataObject.Type.vnxblock.name().equalsIgnoreCase(system.getSystemType())) { if (autoTierPolicyId != null && autoTierPolicyId.contains(VnxFastPolicy.DEFAULT_START_HIGH_THEN_AUTOTIER.name()) && policyIdfromVPool == null) { policyMatching = true; } } // Default policy for HDS - match volume with default policy to vPool with no policy as well if (!policyMatching && DiscoveredDataObject.Type.hds.name().equalsIgnoreCase(system.getSystemType())) { if (autoTierPolicyId != null && autoTierPolicyId.contains(HitachiTieringPolicy.All.name()) && policyIdfromVPool == null) { policyMatching = true; } } return policyMatching; }
@Override public void process() { DbClient dbClient = getDbClient(); List<URI> volumeURIs = dbClient.queryByType(Volume.class, false); Iterator<Volume> volumesIter = dbClient.queryIterativeObjects(Volume.class, volumeURIs); while (volumesIter.hasNext()) { Volume volume = volumesIter.next(); URI systemURI = volume.getStorageController(); if (!NullColumnValueGetter.isNullURI(systemURI)) { StorageSystem system = dbClient.queryObject(StorageSystem.class, systemURI); if ((system != null) && (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType()))) { // This is a VPLEX volume. If not already set, // set the protocols to FC. StringSet protocols = volume.getProtocol(); if (protocols == null) { protocols = new StringSet(); protocols.add(StorageProtocol.Block.FC.name()); volume.setProtocol(protocols); dbClient.persistObject(volume); } } } } }
/** * Deactivate Quota directory of file system, this will move the Quota directory to a * "marked-for-delete" state * * <p>NOTE: This is an asynchronous operation. * * @param id the URN of the QuotaDirectory * @param param QuotaDirectory delete param for optional force delete * @brief Delete file system Quota Dir * @return Task resource representation * @throws com.emc.storageos.svcs.errorhandling.resources.InternalException */ @POST @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Path("/{id}/deactivate") @CheckPermission( roles = {Role.TENANT_ADMIN}, acls = {ACL.OWN, ACL.ALL}) public TaskResourceRep deactivateQuotaDirectory( @PathParam("id") URI id, QuotaDirectoryDeleteParam param) throws InternalException { _log.info("FileService::deactivateQtree Request recieved {}", id); String task = UUID.randomUUID().toString(); ArgValidator.checkFieldUriType(id, QuotaDirectory.class, "id"); QuotaDirectory quotaDirectory = queryResource(id); FileShare fs = queryFileShareResource(quotaDirectory.getParent().getURI()); ArgValidator.checkFieldNotNull(fs, "filesystem"); // <TODO> Implement Force delete option when shares and exports for Quota Directory are // supported Operation op = new Operation(); op.setResourceType(ResourceOperationTypeEnum.DELETE_FILE_SYSTEM_QUOTA_DIR); quotaDirectory.getOpStatus().createTaskStatus(task, op); fs.setOpStatus(new OpStatusMap()); fs.getOpStatus().createTaskStatus(task, op); _dbClient.persistObject(fs); _dbClient.persistObject(quotaDirectory); // Now get ready to make calls into the controller StorageSystem device = _dbClient.queryObject(StorageSystem.class, fs.getStorageDevice()); FileController controller = getController(FileController.class, device.getSystemType()); try { controller.deleteQuotaDirectory(device.getId(), quotaDirectory.getId(), fs.getId(), task); // If delete operation is successful, then remove obj from ViPR db by setting inactive=true quotaDirectory.setInactive(true); _dbClient.persistObject(quotaDirectory); } catch (InternalException e) { // treating all controller exceptions as internal error for now. controller // should discriminate between validation problems vs. internal errors throw e; } auditOp( OperationTypeEnum.DELETE_FILE_SYSTEM_QUOTA_DIR, true, AuditLogManager.AUDITOP_BEGIN, quotaDirectory.getLabel(), quotaDirectory.getId().toString(), fs.getId().toString()); fs = _dbClient.queryObject(FileShare.class, fs.getId()); _log.debug( "FileService::Quota directory Before sending response, FS ID : {}, Taks : {} ; Status {}", fs.getOpStatus().get(task), fs.getOpStatus().get(task).getStatus()); return toTask(quotaDirectory, task, op); }
/** {@inheritDoc} */ @Override public void validateFullCopyCreateRequest(List<BlockObject> fcSourceObjList, int count) { if (fcSourceObjList.size() > 0) { URI fcSourceObjURI = fcSourceObjList.get(0).getId(); if (URIUtil.isType(fcSourceObjURI, BlockSnapshot.class)) { // Currently you cannot create a full copy of a VPLEX snapshot. throw APIException.badRequests.cantCreateFullCopyForVPlexSnapshot(); } else { // Call super first. super.validateFullCopyCreateRequest(fcSourceObjList, count); // Platform specific checks. for (BlockObject fcSourceObj : fcSourceObjList) { Volume fcSourceVolume = (Volume) fcSourceObj; StorageSystem system = _dbClient.queryObject(StorageSystem.class, fcSourceVolume.getStorageController()); if (DiscoveredDataObject.Type.vplex.name().equals(system.getSystemType())) { // If the volume is a VPLEX volume, then we need to be sure that // storage pool of the source backend volume of the VPLEX volume, // which is volume used to create the native full copy, supports // full copy. Volume srcBackendVolume = VPlexUtil.getVPLEXBackendVolume(fcSourceVolume, true, _dbClient, true); StoragePool storagePool = _dbClient.queryObject(StoragePool.class, srcBackendVolume.getPool()); verifyFullCopySupportedForStoragePool(storagePool); // If the full copy source is itself a full copy, it is not // detached, and the native full copy i.e., the source side // backend volume, is VNX, then creating a full copy of the // volume will fail. As such, we prevent it. if ((BlockFullCopyUtils.isVolumeFullCopy(fcSourceVolume, _dbClient)) && (!BlockFullCopyUtils.isFullCopyDetached(fcSourceVolume, _dbClient))) { URI backendSystemURI = srcBackendVolume.getStorageController(); StorageSystem backendSystem = _dbClient.queryObject(StorageSystem.class, backendSystemURI); if (DiscoveredDataObject.Type.vnxblock.name().equals(backendSystem.getSystemType())) { throw APIException.badRequests.cantCreateFullCopyOfVPlexFullCopyUsingVNX(); } } } } } } }
@Override public void doExportAddVolumes( StorageSystem storage, ExportMask exportMask, Map<URI, Integer> volumes, TaskCompleter taskCompleter) throws DeviceControllerException { log.info("{} doExportAddVolume START ...", storage.getSerialNumber()); VolumeURIHLU[] volumeLunArray = ControllerUtils.getVolumeURIHLUArray(storage.getSystemType(), volumes, dbClient); exportMaskOperationsHelper.addVolume( storage, exportMask.getId(), volumeLunArray, taskCompleter); log.info("{} doExportAddVolume END ...", storage.getSerialNumber()); }
/* * (non-Javadoc) * * @see com.emc.storageos.volumecontroller.BlockStorageDevice#doExportGroupCreate(com.emc.storageos.db.client.model.StorageSystem, * com.emc.storageos.db.client.model.ExportMask, java.util.Map, java.util.List, java.util.List, * com.emc.storageos.volumecontroller.TaskCompleter) */ @Override public void doExportGroupCreate( StorageSystem storage, ExportMask exportMask, Map<URI, Integer> volumeMap, List<Initiator> initiators, List<URI> targets, TaskCompleter taskCompleter) throws DeviceControllerException { log.info("{} doExportGroupCreate START ...", storage.getSerialNumber()); VolumeURIHLU[] volumeLunArray = ControllerUtils.getVolumeURIHLUArray(storage.getSystemType(), volumeMap, dbClient); exportMaskOperationsHelper.createExportMask( storage, exportMask.getId(), volumeLunArray, targets, initiators, taskCompleter); log.info("{} doExportGroupCreate END ...", storage.getSerialNumber()); }
/** * get the volume to be updated after application add and remove operations could be the volume * passed in if it's a simple block volume or the vplex virtual volume if it's a backing volume * * @param voluri uri of volume operated on during add or remove volume from application operation * @param dbClient * @return the volume to update */ private Volume getVolume(URI voluri, DbClient dbClient) { // if this is a vplex volume, update the parent virtual volume List<Volume> vplexVolumes = CustomQueryUtility.queryActiveResourcesByConstraint( dbClient, Volume.class, getVolumesByAssociatedId(voluri.toString())); Volume volume = null; for (Volume vplexVolume : vplexVolumes) { URI storageURI = vplexVolume.getStorageController(); StorageSystem storage = dbClient.queryObject(StorageSystem.class, storageURI); if (DiscoveredDataObject.Type.vplex.name().equals(storage.getSystemType())) { volume = vplexVolume; } } if (volume == null) { volume = dbClient.queryObject(Volume.class, voluri); } return volume; }
/** * Deactivate Bucket, this will move the Bucket to a "marked-for-delete" state * * <p>NOTE: This is an asynchronous operation. * * @param id the URN of a ViPR Bucket * @param param Bucket delete param for optional force delete * @brief Delete Bucket * @return Task resource representation * @throws InternalException */ @POST @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Path("/{id}/deactivate") @CheckPermission( roles = {Role.TENANT_ADMIN}, acls = {ACL.OWN, ACL.ALL}) public TaskResourceRep deactivateBucket(@PathParam("id") URI id, BucketDeleteParam param) throws InternalException { String task = UUID.randomUUID().toString(); _log.info( String.format( "BucketDelete --- Bucket id: %1$s, Task: %2$s, ForceDelete: %3$s", id, task, param.getForceDelete())); ArgValidator.checkFieldUriType(id, Bucket.class, "id"); Bucket bucket = queryResource(id); if (!param.getForceDelete()) { ArgValidator.checkReference(Bucket.class, id, checkForDelete(bucket)); } StorageSystem device = _dbClient.queryObject(StorageSystem.class, bucket.getStorageDevice()); Operation op = _dbClient.createTaskOpStatus( Bucket.class, bucket.getId(), task, ResourceOperationTypeEnum.DELETE_BUCKET); op.setDescription("Bucket deactivate"); ObjectController controller = getController(ObjectController.class, device.getSystemType()); controller.deleteBucket(bucket.getStorageDevice(), id, task); auditOp( OperationTypeEnum.DELETE_BUCKET, true, AuditLogManager.AUDITOP_BEGIN, bucket.getId().toString(), device.getId().toString()); return toTask(bucket, task, op); }
/** {@inheritDoc} */ @Override protected void verifyFullCopyRequestCount(BlockObject fcSourceObj, int count) { // Verify the requested copy count. You can only // have as many as is allowed by the source backend // volume. Volume fcSourceVolume = (Volume) fcSourceObj; Volume srcBackendVolume = VPlexUtil.getVPLEXBackendVolume(fcSourceVolume, true, _dbClient, true); // Verify if the source backend volume supports full copy URI systemURI = fcSourceObj.getStorageController(); StorageSystem system = _dbClient.queryObject(StorageSystem.class, systemURI); int maxCount = Integer.MAX_VALUE; if (system != null) { maxCount = BlockFullCopyManager.getMaxFullCopiesForSystemType(system.getSystemType()); } // If max count is 0, then the operation is not supported if (maxCount == 0) { throw APIException.badRequests.fullCopyNotSupportedByBackendSystem(fcSourceVolume.getId()); } BlockFullCopyUtils.validateActiveFullCopyCount(srcBackendVolume, count, _dbClient); }
/** * Called to update the job status when the volume expand job completes. * * @param jobContext The job context. */ public void updateStatus(JobContext jobContext) throws Exception { CloseableIterator<CIMObjectPath> associatorIterator = null; CloseableIterator<CIMInstance> instanceIterator = null; JobStatus jobStatus = getJobStatus(); try { if (jobStatus == JobStatus.IN_PROGRESS) { return; } DbClient dbClient = jobContext.getDbClient(); CIMConnectionFactory cimConnectionFactory = jobContext.getCimConnectionFactory(); WBEMClient client = getWBEMClient(dbClient, cimConnectionFactory); // If terminal state update storage pool capacity and remove reservation for volume capacity // from pool's reserved capacity map. if (jobStatus == JobStatus.SUCCESS || jobStatus == JobStatus.FAILED || jobStatus == JobStatus.FATAL_ERROR) { SmisUtils.updateStoragePoolCapacity(dbClient, client, _storagePoolURI); StoragePool pool = dbClient.queryObject(StoragePool.class, _storagePoolURI); StringMap reservationMap = pool.getReservedCapacityMap(); URI volumeId = getTaskCompleter().getId(); // remove from reservation map reservationMap.remove(volumeId.toString()); dbClient.persistObject(pool); } String opId = getTaskCompleter().getOpId(); StringBuilder logMsgBuilder = new StringBuilder( String.format( "Updating status of job %s to %s, task: %s", this.getJobName(), jobStatus.name(), opId)); if (jobStatus == JobStatus.SUCCESS) { VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter(); Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId()); // set requested capacity volume.setCapacity(taskCompleter.getSize()); // set meta related properties volume.setTotalMetaMemberCapacity(taskCompleter.getTotalMetaMembersSize()); volume.setMetaMemberCount(taskCompleter.getMetaMemberCount()); volume.setMetaMemberSize(taskCompleter.getMetaMemberSize()); volume.setIsComposite(taskCompleter.isComposite()); volume.setCompositionType(taskCompleter.getMetaVolumeType()); // set provisioned capacity associatorIterator = client.associatorNames(getCimJob(), null, SmisConstants.CIM_STORAGE_VOLUME, null, null); if (associatorIterator.hasNext()) { CIMObjectPath volumePath = associatorIterator.next(); CIMInstance volumeInstance = client.getInstance(volumePath, true, false, null); if (volumeInstance != null) { CIMProperty consumableBlocks = volumeInstance.getProperty(SmisConstants.CP_CONSUMABLE_BLOCKS); CIMProperty blockSize = volumeInstance.getProperty(SmisConstants.CP_BLOCK_SIZE); // calculate provisionedCapacity = consumableBlocks * block size Long provisionedCapacity = Long.valueOf(consumableBlocks.getValue().toString()) * Long.valueOf(blockSize.getValue().toString()); volume.setProvisionedCapacity(provisionedCapacity); } // set allocated capacity instanceIterator = client.referenceInstances( volumePath, SmisConstants.CIM_ALLOCATED_FROM_STORAGEPOOL, null, false, SmisConstants.PS_SPACE_CONSUMED); if (instanceIterator.hasNext()) { CIMInstance allocatedFromStoragePoolPath = instanceIterator.next(); CIMProperty spaceConsumed = allocatedFromStoragePoolPath.getProperty(SmisConstants.CP_SPACE_CONSUMED); if (null != spaceConsumed) { volume.setAllocatedCapacity(Long.valueOf(spaceConsumed.getValue().toString())); } } } logMsgBuilder.append( String.format( "%n Capacity: %s, Provisioned capacity: %s, Allocated Capacity: %s", volume.getCapacity(), volume.getProvisionedCapacity(), volume.getAllocatedCapacity())); if (volume.getIsComposite()) { logMsgBuilder.append( String.format( "%n Is Meta: %s, Total meta member capacity: %s, Meta member count %s, Meta member size: %s", volume.getIsComposite(), volume.getTotalMetaMemberCapacity(), volume.getMetaMemberCount(), volume.getMetaMemberSize())); } _log.info(logMsgBuilder.toString()); // Reset list of meta member volumes in the volume if (volume.getMetaVolumeMembers() != null) { volume.getMetaVolumeMembers().clear(); } StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, volume.getStorageController()); // set the RP tag on the volume if the volume is RP protected if (volume.checkForRp() && storageSystem.getSystemType() != null && storageSystem .getSystemType() .equalsIgnoreCase(DiscoveredDataObject.Type.vmax.toString())) { SmisCommandHelper helper = jobContext.getSmisCommandHelper(); List<CIMObjectPath> volumePathList = new ArrayList<CIMObjectPath>(); volumePathList.add(helper.getVolumeMember(storageSystem, volume)); helper.setRecoverPointTag(storageSystem, volumePathList, true); } dbClient.persistObject(volume); // Reset list of meta members native ids in WF data (when meta is created meta members are // removed from array) WorkflowService.getInstance().storeStepData(opId, new ArrayList<String>()); } } catch (Exception e) { _log.error("Caught an exception while trying to updateStatus for SmisVolumeExpandJob", e); setPostProcessingErrorStatus( "Encountered an internal error during volume expand job status processing : " + e.getMessage()); } finally { _metaVolumeTaskCompleter.setLastStepStatus(jobStatus); if (associatorIterator != null) { associatorIterator.close(); } if (instanceIterator != null) { instanceIterator.close(); } super.updateStatus(jobContext); } }
/** * Updates Bucket values like Quota and Retention. * * @param id Bucket ID * @param param Bucket update parameter * @return Task resource representation * @throws InternalException if update fails */ @PUT @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Path("/{id}") @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @CheckPermission( roles = {Role.TENANT_ADMIN}, acls = {ACL.OWN, ACL.ALL}) public TaskResourceRep updateBucket(@PathParam("id") URI id, BucketUpdateParam param) throws InternalException { Bucket bucket = null; ArgValidator.checkFieldUriType(id, Bucket.class, "id"); bucket = _dbClient.queryObject(Bucket.class, id); ArgValidator.checkEntity(bucket, id, isIdEmbeddedInURL(id)); Long softQuota = SizeUtil.translateSize(param.getSoftQuota()); Long hardQuota = SizeUtil.translateSize(param.getHardQuota()); Integer retention = null != param.getRetention() ? Integer.valueOf(param.getRetention()) : 0; // if no softquota is provided, use the old value if (softQuota == 0) { softQuota = bucket.getSoftQuota(); } // if no hardquota is provided, use the old value if (hardQuota == 0) { hardQuota = bucket.getHardQuota(); } // Hard Quota should be more than SoftQuota verifyQuotaValues(softQuota, hardQuota, bucket.getLabel()); // if no retention is provided, use the old value if (retention == 0) { retention = bucket.getRetention(); } VirtualPool cos = _dbClient.queryObject(VirtualPool.class, bucket.getVirtualPool()); // verify retention. Its validated only if Retention is configured. if (retention != 0 && cos.getMaxRetention() != 0 && retention > cos.getMaxRetention()) { throw APIException.badRequests.insufficientRetentionForVirtualPool(cos.getLabel(), "bucket"); } String task = UUID.randomUUID().toString(); _log.info(String.format("BucketUpdate --- Bucket id: %1$s, Task: %2$s", id, task)); StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, bucket.getStorageDevice()); Operation op = _dbClient.createTaskOpStatus( Bucket.class, bucket.getId(), task, ResourceOperationTypeEnum.UPDATE_BUCKET); op.setDescription("Bucket update"); ObjectController controller = getController(ObjectController.class, storageSystem.getSystemType()); controller.updateBucket(bucket.getStorageDevice(), id, softQuota, hardQuota, retention, task); auditOp( OperationTypeEnum.UPDATE_BUCKET, true, AuditLogManager.AUDITOP_BEGIN, bucket.getId().toString(), bucket.getStorageDevice().toString()); return toTask(bucket, task, op); }
private TaskResourceRep initiateBucketCreation( BucketParam param, Project project, TenantOrg tenant, DataObject.Flag[] flags) throws InternalException { ArgValidator.checkFieldUriType(param.getVpool(), VirtualPool.class, "vpool"); ArgValidator.checkFieldUriType(param.getVarray(), VirtualArray.class, "varray"); Long softQuota = SizeUtil.translateSize(param.getSoftQuota()); Long hardQuota = SizeUtil.translateSize(param.getHardQuota()); Integer retention = Integer.valueOf(param.getRetention()); // Hard Quota should be more than SoftQuota verifyQuotaValues(softQuota, hardQuota, param.getLabel()); // check varray VirtualArray neighborhood = _dbClient.queryObject(VirtualArray.class, param.getVarray()); ArgValidator.checkEntity(neighborhood, param.getVarray(), false); _permissionsHelper.checkTenantHasAccessToVirtualArray(tenant.getId(), neighborhood); // check vpool reference VirtualPool cos = _dbClient.queryObject(VirtualPool.class, param.getVpool()); _permissionsHelper.checkTenantHasAccessToVirtualPool(tenant.getId(), cos); ArgValidator.checkEntity(cos, param.getVpool(), false); if (!VirtualPool.Type.object.name().equals(cos.getType())) { throw APIException.badRequests.virtualPoolNotForObjectStorage(VirtualPool.Type.object.name()); } // verify retention. Its validated only if Retention is configured. if (retention != 0 && cos.getMaxRetention() != 0 && retention > cos.getMaxRetention()) { throw APIException.badRequests.insufficientRetentionForVirtualPool(cos.getLabel(), "bucket"); } VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper(); capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, Integer.valueOf(1)); capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, Boolean.FALSE); List<BucketRecommendation> placement = _bucketScheduler.placeBucket(neighborhood, cos, capabilities); if (placement.isEmpty()) { throw APIException.badRequests.noMatchingStoragePoolsForVpoolAndVarray( cos.getId(), neighborhood.getId()); } // Randomly select a recommended pool Collections.shuffle(placement); BucketRecommendation recommendation = placement.get(0); String task = UUID.randomUUID().toString(); Bucket bucket = prepareBucket(param, project, tenant, neighborhood, cos, flags, recommendation); _log.info( String.format( "createBucket --- Bucket: %1$s, StoragePool: %2$s, StorageSystem: %3$s", bucket.getId(), recommendation.getSourceStoragePool(), recommendation.getSourceStorageSystem())); Operation op = _dbClient.createTaskOpStatus( Bucket.class, bucket.getId(), task, ResourceOperationTypeEnum.CREATE_BUCKET); op.setDescription("Bucket Create"); // Controller invocation StorageSystem system = _dbClient.queryObject(StorageSystem.class, recommendation.getSourceStorageSystem()); ObjectController controller = getController(ObjectController.class, system.getSystemType()); controller.createBucket( recommendation.getSourceStorageSystem(), recommendation.getSourceStoragePool(), bucket.getId(), bucket.getName(), bucket.getNamespace(), bucket.getRetention(), bucket.getHardQuota(), bucket.getSoftQuota(), bucket.getOwner(), task); auditOp( OperationTypeEnum.CREATE_BUCKET, true, AuditLogManager.AUDITOP_BEGIN, param.getLabel(), param.getHardQuota(), neighborhood.getId().toString(), project == null ? null : project.getId().toString()); return toTask(bucket, task, op); }
@Override public void deleteOrRemoveVolumesFromExportMask( URI arrayURI, URI exportGroupURI, URI exportMaskURI, List<URI> volumes, List<URI> initiatorURIs, TaskCompleter completer, String stepId) { try { StorageSystem array = _dbClient.queryObject(StorageSystem.class, arrayURI); BlockStorageDevice device = _blockController.getDevice(array.getSystemType()); ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI); WorkflowStepCompleter.stepExecuting(stepId); // If the exportMask isn't found, or has been deleted, nothing to do. if (exportMask == null || exportMask.getInactive()) { _log.info(String.format("ExportMask %s inactive, returning success", exportMaskURI)); WorkflowStepCompleter.stepSucceded(stepId); return; } // Protect concurrent operations by locking {host, array} dupple. // Lock will be released when workflow step completes. List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys( _dbClient, ExportGroupType.Host, StringSetUtil.stringSetToUriList(exportMask.getInitiators()), arrayURI); getWorkflowService() .acquireWorkflowStepLocks( stepId, lockKeys, LockTimeoutValue.get(LockType.VPLEX_BACKEND_EXPORT)); // Make sure the completer will complete the workflow. This happens on rollback case. if (!completer.getOpId().equals(stepId)) { completer.setOpId(stepId); } // Refresh the ExportMask exportMask = refreshExportMask(array, device, exportMask); // Determine if we're deleting the last volume in the mask. StringMap maskVolumesMap = exportMask.getVolumes(); Set<String> remainingVolumes = new HashSet<String>(); List<URI> passedVolumesInMask = new ArrayList<>(volumes); if (maskVolumesMap != null) { remainingVolumes.addAll(maskVolumesMap.keySet()); } for (URI volume : volumes) { remainingVolumes.remove(volume.toString()); // Remove any volumes from the volume list that are no longer // in the export mask. When a failure occurs removing a backend // volume from a mask, the rollback method will try and remove it // again. However, in the case of a distributed volume, one side // may have succeeded, so we will try and remove it again. Previously, // this was not a problem. However, new validation exists at the // block level that checks to make sure the volume to remove is // actually in the mask, which now causes a failure when you remove // it a second time. So, we check here and remove any volumes that // are not in the mask to handle this condition. if ((maskVolumesMap != null) && (!maskVolumesMap.keySet().contains(volume.toString()))) { passedVolumesInMask.remove(volume); } } // None of the volumes is in the export mask, so we are done. if (passedVolumesInMask.isEmpty()) { _log.info( "None of these volumes {} are in export mask {}", volumes, exportMask.forDisplay()); WorkflowStepCompleter.stepSucceded(stepId); return; } // If it is last volume and there are no existing volumes, delete the ExportMask. if (remainingVolumes.isEmpty() && !exportMask.hasAnyExistingVolumes()) { device.doExportDelete(array, exportMask, passedVolumesInMask, initiatorURIs, completer); } else { List<Initiator> initiators = null; if (initiatorURIs != null && !initiatorURIs.isEmpty()) { initiators = _dbClient.queryObject(Initiator.class, initiatorURIs); } device.doExportRemoveVolumes(array, exportMask, passedVolumesInMask, initiators, completer); } } catch (Exception ex) { _log.error("Failed to delete or remove volumes to export mask for vnx: ", ex); VPlexApiException vplexex = DeviceControllerExceptions.vplex.addStepsForCreateVolumesFailed(ex); WorkflowStepCompleter.stepFailed(stepId, vplexex); } }
@Override public void createOrAddVolumesToExportMask( URI arrayURI, URI exportGroupURI, URI exportMaskURI, Map<URI, Integer> volumeMap, List<URI> initiatorURIs2, TaskCompleter completer, String stepId) { try { StorageSystem array = _dbClient.queryObject(StorageSystem.class, arrayURI); ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI); WorkflowStepCompleter.stepExecuting(stepId); // If the exportMask isn't found, or has been deleted, fail, ask user to retry. if (exportMask == null || exportMask.getInactive()) { _log.info(String.format("ExportMask %s deleted or inactive, failing", exportMaskURI)); ServiceError svcerr = VPlexApiException.errors.createBackendExportMaskDeleted( exportMaskURI.toString(), arrayURI.toString()); WorkflowStepCompleter.stepFailed(stepId, svcerr); return; } // Protect concurrent operations by locking {host, array} dupple. // Lock will be released when workflow step completes. List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys( _dbClient, ExportGroupType.Host, StringSetUtil.stringSetToUriList(exportMask.getInitiators()), arrayURI); getWorkflowService() .acquireWorkflowStepLocks( stepId, lockKeys, LockTimeoutValue.get(LockType.VPLEX_BACKEND_EXPORT)); // Fetch the Initiators List<URI> initiatorURIs = new ArrayList<URI>(); List<Initiator> initiators = new ArrayList<Initiator>(); for (String initiatorId : exportMask.getInitiators()) { Initiator initiator = _dbClient.queryObject(Initiator.class, URI.create(initiatorId)); if (initiator != null) { initiators.add(initiator); initiatorURIs.add(initiator.getId()); } } // We do not refresh here, as the VNXExportOperations code will throw an exception // if the StorageGroup was not found. BlockStorageDevice device = _blockController.getDevice(array.getSystemType()); if (!exportMask.hasAnyVolumes() && exportMask.getCreatedBySystem()) { // We are creating this ExportMask on the hardware! (Maybe not the first time though...) // Fetch the targets List<URI> targets = new ArrayList<URI>(); for (String targetId : exportMask.getStoragePorts()) { targets.add(URI.create(targetId)); } // Clear the export_mask nativeId; otherwise the VnxExportOps will attempt to look it // up and fail. An empty String will suffice as having no nativeId. if (exportMask.getNativeId() != null) { exportMask.setNativeId(""); _dbClient.updateAndReindexObject(exportMask); } // The default completer passed in is for add volume, create correct one completer = new ExportMaskCreateCompleter( exportGroupURI, exportMaskURI, initiatorURIs, volumeMap, stepId); device.doExportCreate(array, exportMask, volumeMap, initiators, targets, completer); } else { device.doExportAddVolumes(array, exportMask, initiators, volumeMap, completer); } } catch (Exception ex) { _log.error("Failed to create or add volumes to export mask for vnx: ", ex); VPlexApiException vplexex = DeviceControllerExceptions.vplex.addStepsForCreateVolumesFailed(ex); WorkflowStepCompleter.stepFailed(stepId, vplexex); } }
/** * Discovers the RP CGs and all the volumes therein. It updates/creates the UnManagedProtectionSet * objects and updates (if it exists) the UnManagedVolume objects with RP information needed for * ingestion * * @param accessProfile access profile * @param dbClient db client * @param partitionManager partition manager * @throws Exception */ public void discoverUnManagedObjects( AccessProfile accessProfile, DbClient dbClient, PartitionManager partitionManager) throws Exception { this.partitionManager = partitionManager; log.info("Started discovery of UnManagedVolumes for system {}", accessProfile.getSystemId()); ProtectionSystem protectionSystem = dbClient.queryObject(ProtectionSystem.class, accessProfile.getSystemId()); if (protectionSystem == null) { log.error( "Discovery is not run! Protection System not found: " + accessProfile.getSystemId()); return; } RecoverPointClient rp = RPHelper.getRecoverPointClient(protectionSystem); unManagedCGsInsert = new ArrayList<UnManagedProtectionSet>(); unManagedCGsUpdate = new ArrayList<UnManagedProtectionSet>(); unManagedVolumesToDelete = new ArrayList<UnManagedVolume>(); unManagedVolumesToUpdateByWwn = new HashMap<String, UnManagedVolume>(); unManagedCGsReturnedFromProvider = new HashSet<URI>(); // Get all of the consistency groups (and their volumes) from RP Set<GetCGsResponse> cgs = rp.getAllCGs(); if (cgs == null) { log.warn("No CGs were found on protection system: " + protectionSystem.getLabel()); return; } // This section of code allows us to cache XIO native GUID to workaround an issue // with RP's understanding of XIO volume WWNs (128-bit) and the rest of the world's // understanding of the XIO volume WWN once it's exported (64-bit) Map<String, String> rpWwnToNativeWwn = new HashMap<String, String>(); List<URI> storageSystemIds = dbClient.queryByType(StorageSystem.class, true); List<String> storageNativeIdPrefixes = new ArrayList<String>(); if (storageSystemIds != null) { Iterator<StorageSystem> storageSystemsItr = dbClient.queryIterativeObjects(StorageSystem.class, storageSystemIds); while (storageSystemsItr.hasNext()) { StorageSystem storageSystem = storageSystemsItr.next(); if (storageSystem.getSystemType().equalsIgnoreCase(Type.xtremio.name())) { storageNativeIdPrefixes.add(storageSystem.getNativeGuid()); } } } for (GetCGsResponse cg : cgs) { try { log.info("Processing returned CG: " + cg.getCgName()); boolean newCG = false; // UnManagedProtectionSet native GUID is protection system GUID + consistency group ID String nativeGuid = protectionSystem.getNativeGuid() + Constants.PLUS + cg.getCgId(); // First check to see if this protection set is already part of our managed DB if (null != DiscoveryUtils.checkProtectionSetExistsInDB(dbClient, nativeGuid)) { log.info( "Protection Set " + nativeGuid + " already is managed by ViPR, skipping unmanaged discovery"); continue; } // Now check to see if the unmanaged CG exists in the database UnManagedProtectionSet unManagedProtectionSet = DiscoveryUtils.checkUnManagedProtectionSetExistsInDB(dbClient, nativeGuid); if (null == unManagedProtectionSet) { log.info("Creating new unmanaged protection set for CG: " + cg.getCgName()); unManagedProtectionSet = new UnManagedProtectionSet(); unManagedProtectionSet.setId(URIUtil.createId(UnManagedProtectionSet.class)); unManagedProtectionSet.setNativeGuid(nativeGuid); unManagedProtectionSet.setProtectionSystemUri(protectionSystem.getId()); StringSet protectionId = new StringSet(); protectionId.add("" + cg.getCgId()); unManagedProtectionSet.putCGInfo( SupportedCGInformation.PROTECTION_ID.toString(), protectionId); // Default MP to false until proven otherwise unManagedProtectionSet .getCGCharacteristics() .put( UnManagedProtectionSet.SupportedCGCharacteristics.IS_MP.name(), Boolean.FALSE.toString()); newCG = true; } else { log.info( "Found existing unmanaged protection set for CG: " + cg.getCgName() + ", using " + unManagedProtectionSet.getId().toString()); } unManagedCGsReturnedFromProvider.add(unManagedProtectionSet.getId()); // Update the fields for the CG unManagedProtectionSet.setCgName(cg.getCgName()); unManagedProtectionSet.setLabel(cg.getCgName()); // Indicate whether the CG is in a healthy state or not to ingest. unManagedProtectionSet .getCGCharacteristics() .put( UnManagedProtectionSet.SupportedCGCharacteristics.IS_HEALTHY.name(), cg.getCgState().equals(GetCGStateResponse.HEALTHY) ? Boolean.TRUE.toString() : Boolean.FALSE.toString()); // Indicate whether the CG is sync or async unManagedProtectionSet .getCGCharacteristics() .put( UnManagedProtectionSet.SupportedCGCharacteristics.IS_SYNC.name(), cg.getCgPolicy().synchronous ? Boolean.TRUE.toString() : Boolean.FALSE.toString()); // Fill in RPO type and value information StringSet rpoType = new StringSet(); rpoType.add(cg.getCgPolicy().rpoType); unManagedProtectionSet.putCGInfo(SupportedCGInformation.RPO_TYPE.toString(), rpoType); StringSet rpoValue = new StringSet(); rpoValue.add(cg.getCgPolicy().rpoValue.toString()); unManagedProtectionSet.putCGInfo(SupportedCGInformation.RPO_VALUE.toString(), rpoValue); if (null == cg.getCopies()) { log.info("Protection Set " + nativeGuid + " does not contain any copies. Skipping..."); continue; } if (null == cg.getRsets()) { log.info( "Protection Set " + nativeGuid + " does not contain any replication sets. Skipping..."); continue; } // clean up the existing journal and replicationsets info in the unmanaged protection set, // so that updated info is populated if (!newCG) { cleanUpUnManagedResources( unManagedProtectionSet, unManagedVolumesToUpdateByWwn, dbClient); } // Now map UnManagedVolume objects to the journal and rset (sources/targets) and put RP // fields in them Map<String, String> rpCopyAccessStateMap = new HashMap<String, String>(); mapCgJournals( unManagedProtectionSet, cg, rpCopyAccessStateMap, rpWwnToNativeWwn, storageNativeIdPrefixes, dbClient); mapCgSourceAndTargets( unManagedProtectionSet, cg, rpCopyAccessStateMap, rpWwnToNativeWwn, storageNativeIdPrefixes, dbClient); if (newCG) { unManagedCGsInsert.add(unManagedProtectionSet); } else { unManagedCGsUpdate.add(unManagedProtectionSet); } } catch (Exception ex) { log.error("Error processing RP CG {}", cg.getCgName(), ex); } } handlePersistence(dbClient, false); cleanUp(protectionSystem, dbClient); }
/** * Update Quota Directory for a file share * * <p>NOTE: This is an asynchronous operation. * * @param id the URN of a ViPR Quota directory * @param param File system Quota directory update parameters * @brief Update file system Quota directory * @return Task resource representation * @throws com.emc.storageos.svcs.errorhandling.resources.InternalException */ @POST @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Produces({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON}) @Path("/{id}") @CheckPermission( roles = {Role.TENANT_ADMIN}, acls = {ACL.OWN, ACL.ALL}) public TaskResourceRep updateQuotaDirectory( @PathParam("id") URI id, QuotaDirectoryUpdateParam param) throws InternalException { _log.info("FileService::Update Quota directory Request recieved {}", id); QuotaDirectory quotaDir = queryResource(id); String task = UUID.randomUUID().toString(); if (param.getSecurityStyle() != null) { ArgValidator.checkFieldValueFromEnum( param.getSecurityStyle(), "security_style", EnumSet.allOf(QuotaDirectory.SecurityStyles.class)); } // Get the FileSystem object FileShare fs = queryFileShareResource(quotaDir.getParent().getURI()); ArgValidator.checkFieldNotNull(fs, "filesystem"); // Update the quota directory object to store in ViPR database quotaDir.setOpStatus(new OpStatusMap()); // Set all other optional parameters too. if (param.getOpLock() != null) { quotaDir.setOpLock(param.getOpLock()); } if (param.getSecurityStyle() != null) { quotaDir.setSecurityStyle(param.getSecurityStyle()); } if (param.getSize() != null) { Long quotaSize = SizeUtil.translateSize(param.getSize()); if (quotaSize > 0) { ArgValidator.checkFieldMaximum(quotaSize, fs.getCapacity(), " Bytes", "size"); quotaDir.setSize(quotaSize); } } Operation op = new Operation(); op.setResourceType(ResourceOperationTypeEnum.UPDATE_FILE_SYSTEM_QUOTA_DIR); quotaDir.getOpStatus().createTaskStatus(task, op); fs.setOpStatus(new OpStatusMap()); fs.getOpStatus().createTaskStatus(task, op); _dbClient.persistObject(fs); _dbClient.persistObject(quotaDir); // Create an object of type "FileShareQtree" to be passed into the south-bound layers. FileShareQuotaDirectory qt = new FileShareQuotaDirectory(quotaDir); // Now get ready to make calls into the controller StorageSystem device = _dbClient.queryObject(StorageSystem.class, fs.getStorageDevice()); FileController controller = getController(FileController.class, device.getSystemType()); try { controller.updateQuotaDirectory(device.getId(), qt, fs.getId(), task); } catch (InternalException e) { _log.error("Error during update of Quota Directory {}", e); // treating all controller exceptions as internal error for now. controller // should discriminate between validation problems vs. internal errors throw e; } auditOp( OperationTypeEnum.UPDATE_FILE_SYSTEM_QUOTA_DIR, true, AuditLogManager.AUDITOP_BEGIN, quotaDir.getLabel(), quotaDir.getId().toString(), fs.getId().toString()); fs = _dbClient.queryObject(FileShare.class, fs.getId()); _log.debug( "FileService::Quota directory Before sending response, FS ID : {}, Taks : {} ; Status {}", fs.getOpStatus().get(task), fs.getOpStatus().get(task).getStatus()); return toTask(quotaDir, task, op); }
@Override public void deleteOrRemoveVolumesFromExportMask( URI arrayURI, URI exportGroupURI, URI exportMaskURI, List<URI> volumes, TaskCompleter completer, String stepId) { try { WorkflowStepCompleter.stepExecuting(stepId); StorageSystem array = _dbClient.queryObject(StorageSystem.class, arrayURI); BlockStorageDevice device = _blockController.getDevice(array.getSystemType()); ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI); // If the exportMask isn't found, or has been deleted, nothing to do. if (exportMask == null || exportMask.getInactive()) { _log.info(String.format("ExportMask %s inactive, returning success", exportMaskURI)); WorkflowStepCompleter.stepSucceded(stepId); return; } // Protect concurrent operations by locking {host, array} dupple. // Lock will be released when work flow step completes. List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys( _dbClient, ExportGroupType.Host, StringSetUtil.stringSetToUriList(exportMask.getInitiators()), arrayURI); getWorkflowService() .acquireWorkflowStepLocks( stepId, lockKeys, LockTimeoutValue.get(LockType.VPLEX_BACKEND_EXPORT)); // Make sure the completer will complete the work flow. This happens on roll back case. if (!completer.getOpId().equals(stepId)) { completer.setOpId(stepId); } // Refresh the ExportMask exportMask = refreshExportMask(array, device, exportMask); // Determine if we're deleting the last volume. Set<String> remainingVolumes = new HashSet<String>(); if (exportMask.getVolumes() != null) { remainingVolumes.addAll(exportMask.getVolumes().keySet()); } for (URI volume : volumes) { remainingVolumes.remove(volume.toString()); } // If it is last volume, delete the ExportMask. if (remainingVolumes.isEmpty() && (exportMask.getExistingVolumes() == null || exportMask.getExistingVolumes().isEmpty())) { _log.debug( String.format( "Calling doExportGroupDelete on the device %s", array.getId().toString())); device.doExportGroupDelete(array, exportMask, completer); } else { _log.debug( String.format( "Calling doExportRemoveVolumes on the device %s", array.getId().toString())); device.doExportRemoveVolumes(array, exportMask, volumes, completer); } } catch (Exception ex) { _log.error("Failed to delete or remove volumes to export mask for cinder: ", ex); VPlexApiException vplexex = DeviceControllerExceptions.vplex.addStepsForDeleteVolumesFailed(ex); WorkflowStepCompleter.stepFailed(stepId, vplexex); } }
@Override public void createOrAddVolumesToExportMask( URI arrayURI, URI exportGroupURI, URI exportMaskURI, Map<URI, Integer> volumeMap, TaskCompleter completer, String stepId) { _log.debug("START - createOrAddVolumesToExportMask"); try { WorkflowStepCompleter.stepExecuting(stepId); StorageSystem array = _dbClient.queryObject(StorageSystem.class, arrayURI); ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI); // If the exportMask isn't found, or has been deleted, fail, ask user to retry. if (exportMask == null || exportMask.getInactive()) { _log.info(String.format("ExportMask %s deleted or inactive, failing", exportMaskURI)); ServiceError svcerr = VPlexApiException.errors.createBackendExportMaskDeleted( exportMaskURI.toString(), arrayURI.toString()); WorkflowStepCompleter.stepFailed(stepId, svcerr); return; } // Protect concurrent operations by locking {host, array} dupple. // Lock will be released when work flow step completes. List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys( _dbClient, ExportGroupType.Host, StringSetUtil.stringSetToUriList(exportMask.getInitiators()), arrayURI); getWorkflowService() .acquireWorkflowStepLocks( stepId, lockKeys, LockTimeoutValue.get(LockType.VPLEX_BACKEND_EXPORT)); // Refresh the ExportMask BlockStorageDevice device = _blockController.getDevice(array.getSystemType()); if (!exportMask.hasAnyVolumes()) { /* * We are creating this ExportMask on the hardware! (Maybe not * the first time though...) * * Fetch the Initiators */ List<Initiator> initiators = new ArrayList<Initiator>(); for (String initiatorId : exportMask.getInitiators()) { Initiator initiator = _dbClient.queryObject(Initiator.class, URI.create(initiatorId)); if (initiator != null) { initiators.add(initiator); } } // Fetch the targets List<URI> targets = new ArrayList<URI>(); for (String targetId : exportMask.getStoragePorts()) { targets.add(URI.create(targetId)); } if (volumeMap != null) { for (URI volume : volumeMap.keySet()) { exportMask.addVolume(volume, volumeMap.get(volume)); } } _dbClient.persistObject(exportMask); _log.debug( String.format( "Calling doExportGroupCreate on the device %s", array.getId().toString())); device.doExportGroupCreate(array, exportMask, volumeMap, initiators, targets, completer); } else { _log.debug( String.format("Calling doExportAddVolumes on the device %s", array.getId().toString())); device.doExportAddVolumes(array, exportMask, volumeMap, completer); } } catch (Exception ex) { _log.error("Failed to create or add volumes to export mask for cinder: ", ex); VPlexApiException vplexex = DeviceControllerExceptions.vplex.addStepsForCreateVolumesFailed(ex); WorkflowStepCompleter.stepFailed(stepId, vplexex); } _log.debug("END - createOrAddVolumesToExportMask"); }
/** * create StorageVolume Info Object * * @param unManagedVolume * @param volumeInstance * @param unManagedVolumeNativeGuid * @param pool * @return */ private UnManagedVolume createUnManagedVolume( UnManagedVolume unManagedVolume, CIMInstance volumeInstance, String unManagedVolumeNativeGuid, StoragePool pool, StorageSystem system, String volumeNativeGuid, // to make the code uniform, passed in all the below sets as // arguments Map<String, VolHostIOObject> exportedVolumes, Set<String> existingVolumesInCG, Map<String, RemoteMirrorObject> volumeToRAGroupMap, Map<String, LocalReplicaObject> volumeToLocalReplicaMap, Set<String> poolSupportedSLONames, Map<String, Object> keyMap) { _logger.info("Process UnManagedVolume {}", unManagedVolumeNativeGuid); try { boolean created = false; if (null == unManagedVolume) { unManagedVolume = new UnManagedVolume(); unManagedVolume.setId(URIUtil.createId(UnManagedVolume.class)); unManagedVolume.setNativeGuid(unManagedVolumeNativeGuid); unManagedVolume.setStorageSystemUri(system.getId()); created = true; } // reset the auto-tiering info for unmanaged volumes already present // in db // so that the tiering info is updated correctly later if (!created) { unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.AUTO_TIERING_POLICIES.toString(), ""); unManagedVolume.putVolumeCharacterstics( SupportedVolumeCharacterstics.IS_AUTO_TIERING_ENABLED.toString(), "false"); // reset local replica info unManagedVolume.putVolumeCharacterstics( SupportedVolumeCharacterstics.IS_FULL_COPY.name(), FALSE); unManagedVolume.putVolumeCharacterstics( SupportedVolumeCharacterstics.IS_LOCAL_MIRROR.name(), FALSE); unManagedVolume.putVolumeCharacterstics( SupportedVolumeCharacterstics.IS_SNAP_SHOT.name(), FALSE); unManagedVolume.putVolumeCharacterstics( SupportedVolumeCharacterstics.HAS_REPLICAS.name(), FALSE); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.REPLICA_STATE.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.LOCAL_REPLICA_SOURCE_VOLUME.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.SYNC_STATE.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.SYNC_TYPE.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.SYNCHRONIZED_INSTANCE.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.IS_SYNC_ACTIVE.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.NEEDS_COPY_TO_TARGET.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.TECHNOLOGY_TYPE.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.SETTINGS_INSTANCE.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.FULL_COPIES.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.MIRRORS.name(), new StringSet()); unManagedVolume .getVolumeInformation() .put(SupportedVolumeInformation.SNAPSHOTS.name(), new StringSet()); } Map<String, StringSet> unManagedVolumeInformation = new HashMap<String, StringSet>(); Map<String, String> unManagedVolumeCharacteristics = new HashMap<String, String>(); if (null != system) { StringSet systemTypes = new StringSet(); systemTypes.add(system.getSystemType()); unManagedVolumeInformation.put( SupportedVolumeInformation.SYSTEM_TYPE.toString(), systemTypes); } if (exportedVolumes != null && exportedVolumes.containsKey(volumeNativeGuid)) { VolHostIOObject obj = exportedVolumes.get(volumeNativeGuid); if (null != obj) { StringSet bwValues = new StringSet(); bwValues.add(obj.getHostIoBw()); if (unManagedVolumeInformation.get( SupportedVolumeInformation.EMC_MAXIMUM_IO_BANDWIDTH.toString()) == null) { unManagedVolumeInformation.put( SupportedVolumeInformation.EMC_MAXIMUM_IO_BANDWIDTH.toString(), bwValues); } else { unManagedVolumeInformation .get(SupportedVolumeInformation.EMC_MAXIMUM_IO_BANDWIDTH.toString()) .replace(bwValues); } StringSet iopsVal = new StringSet(); iopsVal.add(obj.getHostIops()); if (unManagedVolumeInformation.get(SupportedVolumeInformation.EMC_MAXIMUM_IOPS.toString()) == null) { unManagedVolumeInformation.put( SupportedVolumeInformation.EMC_MAXIMUM_IOPS.toString(), iopsVal); } else { unManagedVolumeInformation .get(SupportedVolumeInformation.EMC_MAXIMUM_IOPS.toString()) .replace(iopsVal); } } unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_VOLUME_EXPORTED.toString(), TRUE); } else { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_VOLUME_EXPORTED.toString(), FALSE); StringSet bwValues = new StringSet(); bwValues.add("0"); if (unManagedVolumeInformation.get( SupportedVolumeInformation.EMC_MAXIMUM_IO_BANDWIDTH.toString()) == null) { unManagedVolumeInformation.put( SupportedVolumeInformation.EMC_MAXIMUM_IO_BANDWIDTH.toString(), bwValues); } else { unManagedVolumeInformation .get(SupportedVolumeInformation.EMC_MAXIMUM_IO_BANDWIDTH.toString()) .replace(bwValues); } StringSet iopsVal = new StringSet(); iopsVal.add("0"); if (unManagedVolumeInformation.get(SupportedVolumeInformation.EMC_MAXIMUM_IOPS.toString()) == null) { unManagedVolumeInformation.put( SupportedVolumeInformation.EMC_MAXIMUM_IOPS.toString(), iopsVal); } else { unManagedVolumeInformation .get(SupportedVolumeInformation.EMC_MAXIMUM_IOPS.toString()) .replace(iopsVal); } } // Set SLOName only for VMAX3 exported volumes if (system.checkIfVmax3()) { // If there are no slonames defined for a pool or no slo // set for a volume, update the tiering_enabled to false. if (poolSupportedSLONames.isEmpty() || !keyMap.containsKey(Constants.VOLUMES_WITH_SLOS)) { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_AUTO_TIERING_ENABLED.toString(), Boolean.FALSE.toString()); } else { Map<String, String> volumesWithSLO = (Map<String, String>) keyMap.get(Constants.VOLUMES_WITH_SLOS); if (volumesWithSLO.containsKey(volumeNativeGuid)) { String sloName = volumesWithSLO.get(volumeNativeGuid); _logger.debug("formattedSLOName: {}", sloName); updateSLOPolicies( poolSupportedSLONames, unManagedVolumeInformation, unManagedVolumeCharacteristics, sloName); } else { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_AUTO_TIERING_ENABLED.toString(), Boolean.FALSE.toString()); } } } if (existingVolumesInCG != null && existingVolumesInCG.contains(volumeNativeGuid)) { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_VOLUME_ADDED_TO_CONSISTENCYGROUP.toString(), TRUE); } else { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_VOLUME_ADDED_TO_CONSISTENCYGROUP.toString(), FALSE); } Object raidLevelObj; boolean isIngestable; String isBound; String isThinlyProvisioned; String isMetaVolume; String allocCapacity; // Set the attributes for new smis version. if (keyMap.containsKey(Constants.IS_NEW_SMIS_PROVIDER) && Boolean.valueOf(keyMap.get(Constants.IS_NEW_SMIS_PROVIDER).toString())) { unManagedVolume.setLabel(getCIMPropertyValue(volumeInstance, NAME)); raidLevelObj = volumeInstance.getPropertyValue( SupportedVolumeInformation.RAID_LEVEL.getAlternateKey()); isBound = getCIMPropertyValue( volumeInstance, SupportedVolumeCharacterstics.IS_BOUND.getAlterCharacterstic()); isIngestable = isVolumeIngestable(volumeInstance, isBound, USAGE); isThinlyProvisioned = getCIMPropertyValue(volumeInstance, THINLY_PROVISIONED); isMetaVolume = getCIMPropertyValue( volumeInstance, SupportedVolumeCharacterstics.IS_METAVOLUME.getAlterCharacterstic()); allocCapacity = getAllocatedCapacity(volumeInstance, _volumeToSpaceConsumedMap, system.checkIfVmax3()); } else { unManagedVolume.setLabel(getCIMPropertyValue(volumeInstance, SVELEMENT_NAME)); isBound = getCIMPropertyValue( volumeInstance, SupportedVolumeCharacterstics.IS_BOUND.getCharacterstic()); raidLevelObj = volumeInstance.getPropertyValue(SupportedVolumeInformation.RAID_LEVEL.getInfoKey()); isIngestable = isVolumeIngestable(volumeInstance, isBound, SVUSAGE); isThinlyProvisioned = getCIMPropertyValue(volumeInstance, EMC_THINLY_PROVISIONED); isMetaVolume = getCIMPropertyValue( volumeInstance, SupportedVolumeCharacterstics.IS_METAVOLUME.getCharacterstic()); allocCapacity = getCIMPropertyValue(volumeInstance, EMC_ALLOCATED_CAPACITY); } if (null != raidLevelObj) { StringSet raidLevels = new StringSet(); raidLevels.add(raidLevelObj.toString()); unManagedVolumeInformation.put( SupportedVolumeInformation.RAID_LEVEL.toString(), raidLevels); } if (null != isBound) { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_BOUND.toString(), isBound); } if (null != isThinlyProvisioned) { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_THINLY_PROVISIONED.toString(), isThinlyProvisioned); } if (null != isMetaVolume) { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_METAVOLUME.toString(), isMetaVolume); } // only Volumes with Usage 2 can be ingestable, other volumes // [SAVE,VAULT...] apart from replicas have usage other than 2 // Volumes which are set EMCIsBound as false cannot be ingested if (isIngestable) { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_INGESTABLE.toString(), TRUE); } else { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_INGESTABLE.toString(), FALSE); } if (volumeToRAGroupMap.containsKey(unManagedVolume.getNativeGuid())) { RemoteMirrorObject rmObj = volumeToRAGroupMap.get(unManagedVolume.getNativeGuid()); _logger.info("Found RA Object {}", rmObj.toString()); if (RemoteMirrorObject.Types.SOURCE.toString().equalsIgnoreCase(rmObj.getType())) { _logger.info("Found Source, updating targets {}", rmObj.getTargetVolumenativeGuids()); // setting target Volumes if (unManagedVolumeInformation.get(SupportedVolumeInformation.REMOTE_MIRRORS.toString()) == null) { unManagedVolumeInformation.put( SupportedVolumeInformation.REMOTE_MIRRORS.toString(), rmObj.getTargetVolumenativeGuids()); } else { if (null == rmObj.getTargetVolumenativeGuids() || rmObj.getTargetVolumenativeGuids().size() == 0) { unManagedVolumeInformation .get(SupportedVolumeInformation.REMOTE_MIRRORS.toString()) .clear(); } else { unManagedVolumeInformation .get(SupportedVolumeInformation.REMOTE_MIRRORS.toString()) .replace(rmObj.getTargetVolumenativeGuids()); } } } else if (RemoteMirrorObject.Types.TARGET.toString().equalsIgnoreCase(rmObj.getType())) { _logger.info( "Found Target {}, updating copyMode {}, RA Group", unManagedVolume.getNativeGuid(), rmObj.getCopyMode()); // setting srdfParent StringSet parentVolume = new StringSet(); parentVolume.add(rmObj.getSourceVolumeNativeGuid()); unManagedVolumeInformation.put( SupportedVolumeInformation.REMOTE_MIRROR_SOURCE_VOLUME.toString(), parentVolume); // setting RAGroup StringSet raGroup = new StringSet(); raGroup.add(rmObj.getRaGroupUri().toString()); unManagedVolumeInformation.put( SupportedVolumeInformation.REMOTE_MIRROR_RDF_GROUP.toString(), raGroup); } // setting Copy Modes StringSet copyModes = new StringSet(); copyModes.add(rmObj.getCopyMode()); if (unManagedVolumeInformation.get(SupportedVolumeInformation.REMOTE_COPY_MODE.toString()) == null) { unManagedVolumeInformation.put( SupportedVolumeInformation.REMOTE_COPY_MODE.toString(), copyModes); } else { unManagedVolumeInformation .get(SupportedVolumeInformation.REMOTE_COPY_MODE.toString()) .replace(copyModes); } // setting Volume Type StringSet volumeType = new StringSet(); volumeType.add(rmObj.getType()); unManagedVolumeInformation.put( SupportedVolumeInformation.REMOTE_VOLUME_TYPE.toString(), volumeType); unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.REMOTE_MIRRORING.toString(), TRUE); } else { unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.REMOTE_MIRRORING.toString(), FALSE); } // handle clones, local mirrors and snapshots boolean isLocalReplica = false; if (volumeToLocalReplicaMap.containsKey(unManagedVolume.getNativeGuid())) { _logger.info("Found in localReplicaMap {}", unManagedVolume.getNativeGuid()); LocalReplicaObject lrObj = volumeToLocalReplicaMap.get(unManagedVolume.getNativeGuid()); isLocalReplica = lrObj.isReplica(); // setting targets StringSet fullCopies = lrObj.getFullCopies(); if (fullCopies != null && !fullCopies.isEmpty()) { unManagedVolumeInformation.put(SupportedVolumeInformation.FULL_COPIES.name(), fullCopies); } StringSet mirrors = lrObj.getMirrors(); if (mirrors != null && !mirrors.isEmpty()) { unManagedVolumeInformation.put(SupportedVolumeInformation.MIRRORS.name(), mirrors); } StringSet snapshots = lrObj.getSnapshots(); if (snapshots != null && !snapshots.isEmpty()) { unManagedVolumeInformation.put(SupportedVolumeInformation.SNAPSHOTS.name(), snapshots); } if (lrObj.hasReplica()) { // set the HAS_REPLICAS property unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.HAS_REPLICAS.name(), TRUE); } if (LocalReplicaObject.Types.FullCopy.equals(lrObj.getType())) { _logger.info("Found Clone {}", unManagedVolume.getNativeGuid()); // setting clone specific info unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_FULL_COPY.name(), TRUE); StringSet sourceVolume = new StringSet(); sourceVolume.add(lrObj.getSourceNativeGuid()); unManagedVolumeInformation.put( SupportedVolumeInformation.LOCAL_REPLICA_SOURCE_VOLUME.name(), sourceVolume); StringSet isSyncActive = new StringSet(); isSyncActive.add(new Boolean(lrObj.isSyncActive()).toString()); unManagedVolumeInformation.put( SupportedVolumeInformation.IS_SYNC_ACTIVE.name(), isSyncActive); StringSet replicaState = new StringSet(); replicaState.add(lrObj.getReplicaState()); unManagedVolumeInformation.put( SupportedVolumeInformation.REPLICA_STATE.name(), replicaState); } else if (LocalReplicaObject.Types.BlockMirror.equals(lrObj.getType())) { _logger.info("Found Local Mirror {}", unManagedVolume.getNativeGuid()); // setting local mirror specific info unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_LOCAL_MIRROR.name(), TRUE); StringSet sourceVolume = new StringSet(); sourceVolume.add(lrObj.getSourceNativeGuid()); unManagedVolumeInformation.put( SupportedVolumeInformation.LOCAL_REPLICA_SOURCE_VOLUME.name(), sourceVolume); StringSet syncState = new StringSet(); syncState.add(lrObj.getSyncState()); unManagedVolumeInformation.put(SupportedVolumeInformation.SYNC_STATE.name(), syncState); StringSet syncType = new StringSet(); syncType.add(lrObj.getSyncType()); unManagedVolumeInformation.put(SupportedVolumeInformation.SYNC_TYPE.name(), syncType); String syncedInst = lrObj.getSynchronizedInstance(); if (syncedInst != null) { StringSet synchronizedInstance = new StringSet(); synchronizedInstance.add(syncedInst); unManagedVolumeInformation.put( SupportedVolumeInformation.SYNCHRONIZED_INSTANCE.name(), synchronizedInstance); } } else if (LocalReplicaObject.Types.BlockSnapshot.equals(lrObj.getType())) { _logger.info("Found Snapshot {}", unManagedVolume.getNativeGuid()); // setting snapshot specific info unManagedVolumeCharacteristics.put( SupportedVolumeCharacterstics.IS_SNAP_SHOT.name(), TRUE); StringSet sourceVolume = new StringSet(); sourceVolume.add(lrObj.getSourceNativeGuid()); unManagedVolumeInformation.put( SupportedVolumeInformation.LOCAL_REPLICA_SOURCE_VOLUME.name(), sourceVolume); StringSet isSyncActive = new StringSet(); isSyncActive.add(new Boolean(lrObj.isSyncActive()).toString()); unManagedVolumeInformation.put( SupportedVolumeInformation.IS_SYNC_ACTIVE.name(), isSyncActive); StringSet needsCopyToTarget = new StringSet(); needsCopyToTarget.add(new Boolean(lrObj.isNeedsCopyToTarget()).toString()); unManagedVolumeInformation.put( SupportedVolumeInformation.NEEDS_COPY_TO_TARGET.name(), needsCopyToTarget); StringSet technologyType = new StringSet(); technologyType.add(lrObj.getTechnologyType()); unManagedVolumeInformation.put( SupportedVolumeInformation.TECHNOLOGY_TYPE.name(), technologyType); String settingsInst = lrObj.getSettingsInstance(); if (settingsInst != null) { StringSet settingsInstance = new StringSet(); settingsInstance.add(settingsInst); unManagedVolumeInformation.put( SupportedVolumeInformation.SETTINGS_INSTANCE.name(), settingsInstance); } } } // set volume's isSyncActive if (!isLocalReplica) { StringSet isSyncActive = new StringSet(); isSyncActive.add(TRUE); unManagedVolumeInformation.put( SupportedVolumeInformation.IS_SYNC_ACTIVE.name(), isSyncActive); } if (null != pool) { unManagedVolume.setStoragePoolUri(pool.getId()); StringSet pools = new StringSet(); pools.add(pool.getId().toString()); unManagedVolumeInformation.put(SupportedVolumeInformation.STORAGE_POOL.toString(), pools); StringSet driveTypes = pool.getSupportedDriveTypes(); if (null != driveTypes) { unManagedVolumeInformation.put( SupportedVolumeInformation.DISK_TECHNOLOGY.toString(), driveTypes); } StringSet matchedVPools = DiscoveryUtils.getMatchedVirtualPoolsForPool( _dbClient, pool.getId(), unManagedVolumeCharacteristics.get( SupportedVolumeCharacterstics.IS_THINLY_PROVISIONED.toString())); if (unManagedVolumeInformation.containsKey( SupportedVolumeInformation.SUPPORTED_VPOOL_LIST.toString())) { _logger.debug("Matched Pools :" + Joiner.on("\t").join(matchedVPools)); if (null != matchedVPools && matchedVPools.size() == 0) { // replace with empty string set doesn't work, hence // added explicit code to remove all unManagedVolumeInformation .get(SupportedVolumeInformation.SUPPORTED_VPOOL_LIST.toString()) .clear(); } else { // replace with new StringSet unManagedVolumeInformation .get(SupportedVolumeInformation.SUPPORTED_VPOOL_LIST.toString()) .replace(matchedVPools); _logger.info( "Replaced Pools :" + Joiner.on("\t") .join( unManagedVolumeInformation.get( SupportedVolumeInformation.SUPPORTED_VPOOL_LIST.toString()))); } } else { unManagedVolumeInformation.put( SupportedVolumeInformation.SUPPORTED_VPOOL_LIST.toString(), matchedVPools); } } // set allocated capacity if (allocCapacity != null) { StringSet allocCapacitySet = new StringSet(); allocCapacitySet.add(allocCapacity); unManagedVolumeInformation.put( SupportedVolumeInformation.ALLOCATED_CAPACITY.toString(), allocCapacitySet); } StringSet provCapacity = new StringSet(); provCapacity.add(String.valueOf(returnProvisionedCapacity(volumeInstance, keyMap))); unManagedVolumeInformation.put( SupportedVolumeInformation.PROVISIONED_CAPACITY.toString(), provCapacity); injectVolumeInformation(unManagedVolume, volumeInstance, unManagedVolumeInformation); injectVolumeCharacterstics(unManagedVolume, volumeInstance, unManagedVolumeCharacteristics); unManagedVolume.getUnmanagedExportMasks().clear(); unManagedVolume.getInitiatorUris().clear(); unManagedVolume.getInitiatorNetworkIds().clear(); if (created) { _unManagedVolumesInsert.add(unManagedVolume); } else { _unManagedVolumesUpdate.add(unManagedVolume); } } catch (Exception e) { _logger.error("Exception: ", e); } return unManagedVolume; }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException { final Iterator<CIMInstance> it = (Iterator<CIMInstance>) resultObj; profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE); try { _newPoolList = new ArrayList<StoragePool>(); _updatePoolList = new ArrayList<StoragePool>(); _dbClient = (DbClient) keyMap.get(Constants.dbClient); _cimClient = (WBEMClient) keyMap.get(Constants._cimClient); _coordinator = (CoordinatorClient) keyMap.get(Constants.COORDINATOR_CLIENT); _eventManager = (RecordableEventManager) keyMap.get(Constants.EVENT_MANAGER); _logger.info("StoragePoolProcessor --- event manager: " + _eventManager); StorageSystem device = getStorageSystem(_dbClient, profile.getSystemId()); if (SupportedProvisioningTypes.NONE .toString() .equalsIgnoreCase(device.getSupportedProvisioningType())) { _logger.info( "Storage System doesn't support volume creations :" + device.getSerialNumber()); return; } Set<String> protocols = (Set<String>) keyMap.get(Constants.PROTOCOLS); Map<URI, StoragePool> poolsToMatchWithVpool = (Map<URI, StoragePool>) keyMap.get(Constants.MODIFIED_STORAGEPOOLS); while (it.hasNext()) { CIMInstance poolInstance = null; try { poolInstance = it.next(); // Supporting both thick and thin pools String[] poolClassNameAndSupportedVolumeTypes = determinePoolClassNameAndSupportedVolumeTypes(poolInstance, device); if (null != poolClassNameAndSupportedVolumeTypes) { String instanceID = getCIMPropertyValue(poolInstance, Constants.INSTANCEID); addPath(keyMap, operation.getResult(), poolInstance.getObjectPath()); StoragePool pool = checkStoragePoolExistsInDB(getNativeIDFromInstance(instanceID), _dbClient, device); createStoragePool( pool, poolInstance, profile, poolClassNameAndSupportedVolumeTypes[0], poolClassNameAndSupportedVolumeTypes[1], protocols, poolsToMatchWithVpool, device); if (DiscoveredDataObject.Type.vnxblock .toString() .equalsIgnoreCase(device.getSystemType())) { addPath(keyMap, Constants.VNXPOOLS, poolInstance.getObjectPath()); } if (DiscoveredDataObject.Type.vmax .toString() .equalsIgnoreCase(device.getSystemType())) { addPath(keyMap, Constants.VMAXPOOLS, poolInstance.getObjectPath()); if (!device.checkIfVmax3()) { addPath(keyMap, Constants.VMAX2POOLS, poolInstance.getObjectPath()); } } // This approach deviates from the existing built plugin framework for plugin // Discovery // To follow the existing pattern, we need to have different SMI-S calls // 1st to get Device StoragePools alone ,and 2nd to get Thin Pools. // Its a tradeoff between whether to go with the current plugin design or // reduce the number of calls to SMI Provider. // I chose the 2nd option. if (!poolClassNameAndSupportedVolumeTypes[0].contains(DEVICE_STORAGE_POOL)) { addPath(keyMap, Constants.THINPOOLS, poolInstance.getObjectPath()); } addPath(keyMap, Constants.DEVICEANDTHINPOOLS, poolInstance.getObjectPath()); } else { _logger.debug( "Skipping Pools other than Unified & Virtual & Device : {}", poolInstance.getObjectPath().toString()); } } catch (Exception e) { _logger.warn( "StoragePool Discovery failed for {}", getCIMPropertyValue(poolInstance, Constants.INSTANCEID), e); } } _dbClient.createObject(_newPoolList); _dbClient.updateAndReindexObject(_updatePoolList); // find the pools not visible in this discovery List<StoragePool> discoveredPools = new ArrayList<StoragePool>(_newPoolList); discoveredPools.addAll(_updatePoolList); List<StoragePool> notVisiblePools = DiscoveryUtils.checkStoragePoolsNotVisible(discoveredPools, _dbClient, device.getId()); for (StoragePool notVisiblePool : notVisiblePools) { poolsToMatchWithVpool.put(notVisiblePool.getId(), notVisiblePool); } // If any storage ports on the storage system are in a transport // zone, there is an implicit connection to the transport zone // varray. We need to add these implicit varray // connections for the new storage pool. StoragePoolAssociationHelper.setStoragePoolVarrays(device.getId(), _newPoolList, _dbClient); } catch (Exception e) { _logger.error("StoragePool Discovery failed --> {}", getMessage(e)); } finally { _newPoolList = null; _updatePoolList = null; } }