/** * Returns a Map of networkURI => [set of endpoints connected]. * * @param dbClient * @param initiators * @return */ public static Map<URI, Set<String>> getNetworkToInitiators( DbClient dbClient, List<Initiator> initiators) { Map<URI, Set<String>> networkToEndPoints = new HashMap<URI, Set<String>>(); for (Initiator initiator : initiators) { Set<NetworkLite> networkLites = getEndpointAllNetworksLite(initiator.getInitiatorPort(), dbClient); if (null == networkLites || networkLites.isEmpty()) { _log.info( String.format( "getNetworkToInitiators(%s) -- Initiator is not associated with any network", initiator.getInitiatorPort())); } else { for (NetworkLite networkLite : networkLites) { URI networkUri = networkLite.getId(); _log.info( String.format( "Adding initiator, network (%s, %s) to map", initiator.getInitiatorPort(), networkLite.getLabel())); Set<String> endPoints = networkToEndPoints.get(networkUri); if (null == endPoints) { endPoints = new HashSet<String>(); } endPoints.add(initiator.getInitiatorPort()); networkToEndPoints.put(networkUri, endPoints); } } } return networkToEndPoints; }
private String getHostNameFromInitiators(List<Initiator> initiators) { String host = null; for (Initiator initiator : initiators) { host = initiator.getHostName(); break; // all Initiators given belong to the same host } return host; }
private Map<String, URI> getWWNvsURIFCInitiatorsMap(List<Initiator> fcInitiatorList) { log.debug("START - getWWNvsURIFCInitiatorsMap"); Map<String, URI> initiatorsWWNVsURI = new HashMap<String, URI>(); for (Initiator init : fcInitiatorList) { String wwnNoColon = Initiator.normalizePort(init.getInitiatorPort()); URI uri = init.getId(); initiatorsWWNVsURI.put(wwnNoColon, uri); } log.debug("END - getWWNvsURIFCInitiatorsMap"); return initiatorsWWNVsURI; }
private void splitInitiatorsByProtocol( List<Initiator> initiatorList, List<Initiator> iSCSIInitiators, List<Initiator> fcInitiators) { for (Initiator initiator : initiatorList) { if (Protocol.iSCSI.name().equalsIgnoreCase(initiator.getProtocol())) { iSCSIInitiators.add(initiator); } else if (Protocol.FC.name().equalsIgnoreCase(initiator.getProtocol())) { fcInitiators.add(initiator); } } }
/** * Detaches volumes from initiators. * * @param storage the storage * @param volumes the volumes * @param initiators the initiators * @throws Exception the exception */ private void detachVolumesFromInitiators( StorageSystem storage, List<Volume> volumes, List<Initiator> initiators) throws Exception { CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storage.getActiveProviderURI(), dbClient); log.debug("Getting the cinder APi for the provider with id {}", storage.getActiveProviderURI()); CinderApi cinderApi = cinderApiFactory.getApi(storage.getActiveProviderURI(), ep); List<Initiator> iSCSIInitiators = new ArrayList<Initiator>(); List<Initiator> fcInitiators = new ArrayList<Initiator>(); splitInitiatorsByProtocol(initiators, iSCSIInitiators, fcInitiators); String host = getHostNameFromInitiators(initiators); Map<String, String[]> mapSettingVsValues = getFCInitiatorsArray(fcInitiators); String[] fcInitiatorsWwpns = mapSettingVsValues.get(WWPNS); String[] fcInitiatorsWwnns = mapSettingVsValues.get(WWNNS); for (Volume volume : volumes) { // cinder generated volume ID String volumeId = volume.getNativeId(); // for iSCSI for (Initiator initiator : iSCSIInitiators) { String initiatorPort = initiator.getInitiatorPort(); log.debug( String.format( "Detaching volume %s ( %s ) from initiator %s on Openstack cinder node", volumeId, volume.getId(), initiatorPort)); cinderApi.detachVolume(volumeId, initiatorPort, null, null, host); // TODO : Do not use Job to poll status till we figure out how // to get detach status. /* * CinderJob detachJob = new CinderDetachVolumeJob(volumeId, * volume.getLabel(), storage.getId(), * CinderConstants.ComponentType.volume.name(), ep, * taskCompleter); ControllerServiceImpl.enqueueJob(new * QueueJob(detachJob)); */ } // for FC if (fcInitiatorsWwpns.length > 0) { log.debug( String.format( "Detaching volume %s ( %s ) from initiator %s on Openstack cinder node", volumeId, volume.getId(), fcInitiatorsWwpns)); cinderApi.detachVolume(volumeId, null, fcInitiatorsWwpns, fcInitiatorsWwnns, host); } // If ITLs are added, remove them removeITLsFromVolume(volume); } }
/** * Create Initiator Target LUN Mapping as an extension in volume object * * @param volume - volume in which ITL to be added * @param exportMask - exportMask in which the volume is to be added * @param targetLunId - integer value of host LUN id on which volume is accessible. */ private void storeITLMappingInVolume( Map<URI, Integer> volumeToTargetLunMap, ExportMask exportMask) { log.debug("START - createITLMappingInVolume"); for (URI volumeURI : volumeToTargetLunMap.keySet()) { Integer targetLunId = volumeToTargetLunMap.get(volumeURI); Volume volume = dbClient.queryObject(Volume.class, volumeURI); StringSetMap zoningMap = exportMask.getZoningMap(); Set<String> zoningMapKeys = zoningMap.keySet(); int initiatorIndex = 0; for (String initiator : zoningMapKeys) { Initiator initiatorObj = dbClient.queryObject(Initiator.class, URI.create(initiator)); String initiatorWWPN = initiatorObj.getInitiatorPort().replaceAll(CinderConstants.COLON, ""); StringSet targetPorts = zoningMap.get(initiator); int targetIndex = 0; for (String target : targetPorts) { StoragePort targetPort = dbClient.queryObject(StoragePort.class, URI.create(target)); String targetPortWWN = targetPort.getPortNetworkId().replaceAll(CinderConstants.COLON, ""); // Format is - <InitiatorWWPN>-<TargetWWPN>-<LunId> String itl = initiatorWWPN + "-" + targetPortWWN + "-" + String.valueOf(targetLunId); // ITL keys will be formed as ITL-00, ITL-01, ITL-10, ITL-11 so on String itlKey = CinderConstants.PREFIX_ITL + String.valueOf(initiatorIndex) + String.valueOf(targetIndex); log.info(String.format("Adding ITL %s with key %s", itl, itlKey)); StringMap extensionsMap = volume.getExtensions(); if (null == extensionsMap) { extensionsMap = new StringMap(); extensionsMap.put(itlKey, itl); volume.setExtensions(extensionsMap); } else { volume.getExtensions().put(itlKey, itl); } targetIndex++; } initiatorIndex++; } dbClient.updateAndReindexObject(volume); } log.debug("END - createITLMappingInVolume"); }
/** * Creates a map of initiators grouped and keyed by their network. Initiators which are not in any * network are not returned. * * @param initiators the initiators * @param client * @return a map of network-to-initiators */ public static Map<NetworkLite, List<Initiator>> getInitiatorsByNetwork( Collection<Initiator> initiators, DbClient dbClient) { Map<NetworkLite, List<Initiator>> map = new HashMap<NetworkLite, List<Initiator>>(); NetworkLite network = null; List<Initiator> netInitiators = null; for (Initiator initiator : initiators) { network = NetworkUtil.getEndpointNetworkLite(initiator.getInitiatorPort(), dbClient); if (network != null) { netInitiators = map.get(network); if (netInitiators == null) { netInitiators = new ArrayList<Initiator>(); map.put(network, netInitiators); } netInitiators.add(initiator); } } return map; }
/** * Get an initiator as specified by the initiator's network port. * * @param networkPort The initiator's port WWN or IQN. * @return A reference to an initiator. */ public static Initiator getInitiator(String networkPort, DbClient dbClient) { Initiator initiator = null; URIQueryResultList resultsList = new URIQueryResultList(); // find the initiator dbClient.queryByConstraint( AlternateIdConstraint.Factory.getInitiatorPortInitiatorConstraint(networkPort), resultsList); Iterator<URI> resultsIter = resultsList.iterator(); while (resultsIter.hasNext()) { initiator = dbClient.queryObject(Initiator.class, resultsIter.next()); // there should be one initiator, so return as soon as it is found if (initiator != null && !initiator.getInactive()) { return initiator; } } return null; }
/** * If the endpoint is used in an active Export Group, throws an exception * * @param endpoint endpoint being added * @param dbClient Assumes endpoint formats have been validated. */ public static void checkNotUsedByActiveExportGroup(String endpoint, DbClient dbClient) { if (endpoint != null && !"".equals(endpoint)) { Initiator initiator = getInitiator(endpoint, dbClient); if (initiator != null && initiator.getId() != null) { if (NetworkUtil.isInitiatorInUse(initiator.getId(), dbClient)) { throw APIException.badRequests.endpointsCannotBeUpdatedActiveExport(endpoint); } } List<StoragePort> ports = findStoragePortsInDB(endpoint, dbClient); for (StoragePort port : ports) { if (port != null && port.getId() != null) { if (NetworkUtil.isBlockStoragePortInUse(port.getId(), dbClient)) { throw APIException.badRequests.endpointsCannotBeUpdatedActiveExport(endpoint); } } } } }
private Map<String, String[]> getFCInitiatorsArray(List<Initiator> fcInitiators) { Map<String, String[]> mapSettingVsValues = new HashMap<>(); // form an array with all FC initiator wwpns // to put into attach request body String[] fcInitiatorsWwpns = new String[fcInitiators.size()]; String[] fcInitiatorsWwnns = new String[fcInitiators.size()]; int index = 0; for (Initiator fcInitiator : fcInitiators) { // remove colons in initiator port fcInitiatorsWwpns[index] = fcInitiator.getInitiatorPort().replaceAll(CinderConstants.COLON, ""); String wwnn = fcInitiator.getInitiatorNode(); if (null != wwnn && wwnn.length() > 0) { fcInitiatorsWwnns[index] = wwnn.replaceAll(CinderConstants.COLON, ""); } index++; } mapSettingVsValues.put(WWPNS, fcInitiatorsWwpns); mapSettingVsValues.put(WWNNS, fcInitiatorsWwnns); return mapSettingVsValues; }
@Override public void createOrAddVolumesToExportMask( URI arrayURI, URI exportGroupURI, URI exportMaskURI, Map<URI, Integer> volumeMap, List<URI> initiatorURIs2, TaskCompleter completer, String stepId) { try { StorageSystem array = _dbClient.queryObject(StorageSystem.class, arrayURI); ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI); WorkflowStepCompleter.stepExecuting(stepId); // If the exportMask isn't found, or has been deleted, fail, ask user to retry. if (exportMask == null || exportMask.getInactive()) { _log.info(String.format("ExportMask %s deleted or inactive, failing", exportMaskURI)); ServiceError svcerr = VPlexApiException.errors.createBackendExportMaskDeleted( exportMaskURI.toString(), arrayURI.toString()); WorkflowStepCompleter.stepFailed(stepId, svcerr); return; } // Protect concurrent operations by locking {host, array} dupple. // Lock will be released when workflow step completes. List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys( _dbClient, ExportGroupType.Host, StringSetUtil.stringSetToUriList(exportMask.getInitiators()), arrayURI); getWorkflowService() .acquireWorkflowStepLocks( stepId, lockKeys, LockTimeoutValue.get(LockType.VPLEX_BACKEND_EXPORT)); // Fetch the Initiators List<URI> initiatorURIs = new ArrayList<URI>(); List<Initiator> initiators = new ArrayList<Initiator>(); for (String initiatorId : exportMask.getInitiators()) { Initiator initiator = _dbClient.queryObject(Initiator.class, URI.create(initiatorId)); if (initiator != null) { initiators.add(initiator); initiatorURIs.add(initiator.getId()); } } // We do not refresh here, as the VNXExportOperations code will throw an exception // if the StorageGroup was not found. BlockStorageDevice device = _blockController.getDevice(array.getSystemType()); if (!exportMask.hasAnyVolumes() && exportMask.getCreatedBySystem()) { // We are creating this ExportMask on the hardware! (Maybe not the first time though...) // Fetch the targets List<URI> targets = new ArrayList<URI>(); for (String targetId : exportMask.getStoragePorts()) { targets.add(URI.create(targetId)); } // Clear the export_mask nativeId; otherwise the VnxExportOps will attempt to look it // up and fail. An empty String will suffice as having no nativeId. if (exportMask.getNativeId() != null) { exportMask.setNativeId(""); _dbClient.updateAndReindexObject(exportMask); } // The default completer passed in is for add volume, create correct one completer = new ExportMaskCreateCompleter( exportGroupURI, exportMaskURI, initiatorURIs, volumeMap, stepId); device.doExportCreate(array, exportMask, volumeMap, initiators, targets, completer); } else { device.doExportAddVolumes(array, exportMask, initiators, volumeMap, completer); } } catch (Exception ex) { _log.error("Failed to create or add volumes to export mask for vnx: ", ex); VPlexApiException vplexex = DeviceControllerExceptions.vplex.addStepsForCreateVolumesFailed(ex); WorkflowStepCompleter.stepFailed(stepId, vplexex); } }
@Override public StringSetMap configureZoning( Map<URI, List<List<StoragePort>>> portGroup, Map<String, Map<URI, Set<Initiator>>> initiatorGroup, Map<URI, NetworkLite> networkMap, StoragePortsAssigner assigner) { StringSetMap zoningMap = new StringSetMap(); // Set up a map to track port usage so that we can use all ports more or less equally. Map<StoragePort, Integer> portAUsage = new HashMap<StoragePort, Integer>(); Map<StoragePort, Integer> portBUsage = new HashMap<StoragePort, Integer>(); // Iterate through each of the directors, matching each of its initiators // with one port. for (String director : initiatorGroup.keySet()) { for (URI networkURI : initiatorGroup.get(director).keySet()) { NetworkLite net = networkMap.get(networkURI); for (Initiator initiator : initiatorGroup.get(director).get(networkURI)) { // If there are no ports on the initiators network, too bad... if (portGroup.get(networkURI) == null) { _log.info(String.format("%s -> no ports in network", initiator.getInitiatorPort())); continue; } StringSet ports = new StringSet(); // Get an A Port String aPortName = " ", bPortName = " "; StoragePort portA = VPlexBackEndOrchestratorUtil.assignPortToInitiator( assigner, portGroup.get(networkURI).iterator().next(), net, initiator, portAUsage, "SP_A"); if (portA != null) { aPortName = portA.getPortName(); ports.add(portA.getId().toString()); } // Get a B Port StoragePort portB = VPlexBackEndOrchestratorUtil.assignPortToInitiator( assigner, portGroup.get(networkURI).iterator().next(), net, initiator, portBUsage, "SP_B"); if (portB != null) { bPortName = portB.getPortName(); ports.add(portB.getId().toString()); } _log.info( String.format( "%s %s %s -> %s %s", director, net.getLabel(), initiator.getInitiatorPort(), aPortName, bPortName)); zoningMap.put(initiator.getId().toString(), ports); } } } return zoningMap; }
/** * Update the zoning map entries from the updated target list in the mask * * <p>1. Clean existing zoning map entries 2. From the target storage ports in the mask, generate * a map of networkURI string vs list of target storage ports 3. From the initiator ports in the * mask, generate a map of its URI vs InitiatorWWN 4. From the initiatorPortMap, generate map of * its WWN vs networkURI string 5. Based on the networkURI matching, generate zoning map entries * adhering to vplex best practices 6. Persist the updated mask. * * @param initiatorPortMap * @param exportMask */ private void updateZoningMap( Map<URI, List<StoragePort>> initiatorPortMap, Map<String, Set<String>> directorToInitiatorIds, ExportMask exportMask) { // STEP 1 - Clean the existing zoning map for (String initiatorURIStr : exportMask.getZoningMap().keySet()) { exportMask.removeZoningMapEntry(initiatorURIStr); } exportMask.setZoningMap(null); // STEP 2- From Back-end storage system ports, which are used as target storage ports for VPLEX // generate a map of networkURI string vs list of target storage ports. Map<String, List<StoragePort>> nwUriVsTargetPortsFromMask = new HashMap<>(); StringSet targetPorts = exportMask.getStoragePorts(); for (String targetPortUri : targetPorts) { StoragePort targetPort = _dbClient.queryObject(StoragePort.class, URI.create(targetPortUri)); String networkUri = targetPort.getNetwork().toString(); if (nwUriVsTargetPortsFromMask.containsKey(networkUri)) { nwUriVsTargetPortsFromMask.get(networkUri).add(targetPort); } else { nwUriVsTargetPortsFromMask.put(networkUri, new ArrayList<StoragePort>()); nwUriVsTargetPortsFromMask.get(networkUri).add(targetPort); } } // STEP 3 - From the initiator ports in the mask, generate a map of its URI vs InitiatorWWN // Map<String, URI> initiatorWWNvsUriFromMask = new HashMap<>(); Map<String, String> initiatorUrivsWWNFromMask = new HashMap<>(); StringSet initiatorPorts = exportMask.getInitiators(); for (String initiatorUri : initiatorPorts) { Initiator initiator = _dbClient.queryObject(Initiator.class, URI.create(initiatorUri)); String initiatorWWN = initiator.getInitiatorPort(); initiatorUrivsWWNFromMask.put(initiator.getId().toString(), initiatorWWN); } // STEP 4 - Convert networkURIvsStoragePort to Initiator Port WWN vs NetworkURI Map<String, String> initiatorWWNvsNetworkURI = new HashMap<>(); Set<URI> networkURIs = initiatorPortMap.keySet(); for (URI networkURI : networkURIs) { List<StoragePort> initiatorPortList = initiatorPortMap.get(networkURI); List<String> initiatorWWNList = new ArrayList<>(initiatorPortList.size()); for (StoragePort initPort : initiatorPortList) { initiatorWWNList.add(initPort.getPortNetworkId()); initiatorWWNvsNetworkURI.put(initPort.getPortNetworkId(), networkURI.toString()); } } // STEP 5 - Consider directors to restrict paths not more than 4 for each director // And add the zoning map entries to adhere to the VPLEX best practices. Map<StoragePort, Integer> portUsage = new HashMap<>(); Set<String> directorKeySet = directorToInitiatorIds.keySet(); for (String director : directorKeySet) { Set<String> initiatorIds = directorToInitiatorIds.get(director); int directorPaths = 0; for (String initiatorId : initiatorIds) { if (4 == directorPaths) { break; } String initWWN = initiatorUrivsWWNFromMask.get(initiatorId); String initiatorNetworkURI = initiatorWWNvsNetworkURI.get(initWWN); List<StoragePort> matchingTargetPorts = nwUriVsTargetPortsFromMask.get(initiatorNetworkURI); if (null != matchingTargetPorts && !matchingTargetPorts.isEmpty()) { StoragePort assignedPort = assignPortBasedOnUsage(matchingTargetPorts, portUsage); StringSet targetPortURIs = new StringSet(); targetPortURIs.add(assignedPort.getId().toString()); _log.info( String.format( "Adding zoning map entry - Initiator is %s and its targetPorts %s", initiatorId, targetPortURIs.toString())); exportMask.addZoningMapEntry(initiatorId, targetPortURIs); directorPaths++; } } } // STEP 6 - persist the mask _dbClient.updateAndReindexObject(exportMask); }
/** * Routine contains logic to create an export mask on the array * * @param workflow - Workflow object to create steps against * @param previousStep - [optional] Identifier of workflow step to wait for * @param device - BlockStorageDevice implementation * @param storage - StorageSystem object representing the underlying array * @param exportGroup - ExportGroup object representing Bourne-level masking * @param initiatorURIs - List of Initiator URIs * @param volumeMap - Map of Volume URIs to requested Integer HLUs * @param zoningStepNeeded - Not required ofr HDS * @param token - Identifier for the operation * @throws Exception */ public boolean determineExportGroupCreateSteps( Workflow workflow, String previousStep, BlockStorageDevice device, StorageSystem storage, ExportGroup exportGroup, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, boolean zoningStepNeeded, String token) throws Exception { Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>(); List<URI> volumeURIs = new ArrayList<URI>(); volumeURIs.addAll(volumeMap.keySet()); Map<URI, URI> hostToExistingExportMaskMap = new HashMap<URI, URI>(); List<URI> hostURIs = new ArrayList<URI>(); List<String> portNames = new ArrayList<String>(); List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs); // Populate the port WWN/IQNs (portNames) and the // mapping of the WWN/IQNs to Initiator URIs processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs); // We always want to have the full list of initiators for the hosts involved in // this export. This will allow the export operation to always find any // existing exports for a given host. queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs); // Find the export masks that are associated with any or all the ports in // portNames. We will have to do processing differently based on whether // or there is an existing ExportMasks. Map<String, Set<URI>> matchingExportMaskURIs = device.findExportMasks(storage, portNames, false); if (matchingExportMaskURIs.isEmpty()) { _log.info( String.format( "No existing mask found w/ initiators { %s }", Joiner.on(",").join(portNames))); createNewExportMaskWorkflowForInitiators( initiatorURIs, exportGroup, workflow, volumeMap, storage, token, previousStep); } else { _log.info( String.format( "Mask(s) found w/ initiators {%s}. " + "MatchingExportMaskURIs {%s}, portNameToInitiators {%s}", Joiner.on(",").join(portNames), Joiner.on(",").join(matchingExportMaskURIs.values()), Joiner.on(",").join(portNameToInitiatorURI.entrySet()))); // There are some initiators that already exist. We need to create a // workflow that create new masking containers or updates masking // containers as necessary. // These data structures will be used to track new initiators - ones // that don't already exist on the array List<URI> initiatorURIsCopy = new ArrayList<URI>(); initiatorURIsCopy.addAll(initiatorURIs); // This loop will determine a list of volumes to update per export mask Map<URI, Map<URI, Integer>> existingMasksToUpdateWithNewVolumes = new HashMap<URI, Map<URI, Integer>>(); Map<URI, Set<Initiator>> existingMasksToUpdateWithNewInitiators = new HashMap<URI, Set<Initiator>>(); for (Map.Entry<String, Set<URI>> entry : matchingExportMaskURIs.entrySet()) { URI initiatorURI = portNameToInitiatorURI.get(entry.getKey()); Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI); // Keep track of those initiators that have been found to exist already // in some export mask on the array initiatorURIsCopy.remove(initiatorURI); // Get a list of the ExportMasks that were matched to the initiator List<URI> exportMaskURIs = new ArrayList<URI>(); exportMaskURIs.addAll(entry.getValue()); List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs); _log.info( String.format( "initiator %s masks {%s}", initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs))); for (ExportMask mask : masks) { // ExportMask is created using non-vipr. Set the mask name if it doesn't have. if (null == mask.getMaskName()) { String maskName = ExportMaskUtils.getMaskName(_dbClient, initiators, exportGroup, storage); _log.info("Generated mask name: {}", maskName); mask.setMaskName(maskName); } // Check for NO_VIPR. If found, avoid this mask. if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) { _log.info( String.format( "ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR)); continue; } _log.info( String.format( "mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort())); if (mask.getCreatedBySystem()) { _log.info( String.format( "initiator %s is in persisted mask %s", initiator.getInitiatorPort(), mask.getMaskName())); // We're still OK if the mask contains ONLY initiators that can be found // in our export group, because we would simply add to them. if (mask.getInitiators() != null) { for (String existingMaskInitiatorStr : mask.getInitiators()) { // Now look at it from a different angle. Which one of our export group initiators // are NOT in the current mask? And if so, if it belongs to the same host as an // existing one, // we should add it to this mask. Iterator<URI> initiatorIter = initiatorURIsCopy.iterator(); while (initiatorIter.hasNext()) { Initiator initiatorCopy = _dbClient.queryObject(Initiator.class, initiatorIter.next()); if (initiatorCopy != null && initiatorCopy.getId() != null && !mask.hasInitiator(initiatorCopy.getId().toString())) { Initiator existingMaskInitiator = _dbClient.queryObject( Initiator.class, URI.create(existingMaskInitiatorStr)); if (existingMaskInitiator != null && initiatorCopy.getHost() != null && initiatorCopy.getHost().equals(existingMaskInitiator.getHost())) { // Add to the list of initiators we need to add to this mask Set<Initiator> existingMaskInitiators = existingMasksToUpdateWithNewInitiators.get(mask.getId()); if (existingMaskInitiators == null) { existingMaskInitiators = new HashSet<Initiator>(); existingMasksToUpdateWithNewInitiators.put( mask.getId(), existingMaskInitiators); } existingMaskInitiators.add(initiatorCopy); initiatorIter .remove(); // remove this from the list of initiators we'll make a new // mask from } } } } } } else { // Insert this initiator into the mask's list of initiators managed by the system. // This will get persisted below. mask.addInitiator(initiator); if (!NullColumnValueGetter.isNullURI(initiator.getHost())) { hostToExistingExportMaskMap.put(initiator.getHost(), mask.getId()); } } // We need to see if the volume also exists the mask, // if it doesn't then we'll add it to the list of volumes to add. for (URI boURI : volumeURIs) { BlockObject bo = BlockObject.fetch(_dbClient, boURI); if (!mask.hasExistingVolume(bo)) { _log.info( String.format( "volume %s is not in mask %s", bo.getNativeGuid(), mask.getMaskName())); // The volume doesn't exist, so we have to add it to // the masking container. Map<URI, Integer> newVolumes = existingMasksToUpdateWithNewVolumes.get(mask.getId()); if (newVolumes == null) { newVolumes = new HashMap<URI, Integer>(); existingMasksToUpdateWithNewVolumes.put(mask.getId(), newVolumes); } // Check if the requested HLU for the volume is // already taken by a pre-existing volume. Integer requestedHLU = volumeMap.get(bo.getId()); StringMap existingVolumesInMask = mask.getExistingVolumes(); if (existingVolumesInMask != null && existingVolumesInMask.containsValue(requestedHLU.toString())) { ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token); ServiceError serviceError = DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU( boURI.toString(), requestedHLU.toString()); completer.error(_dbClient, serviceError); return false; } newVolumes.put(bo.getId(), requestedHLU); mask.addToUserCreatedVolumes(bo); } } // Update the list of volumes and initiators for the mask Map<URI, Integer> volumeMapForExistingMask = existingMasksToUpdateWithNewVolumes.get(mask.getId()); if (volumeMapForExistingMask != null && !volumeMapForExistingMask.isEmpty()) { mask.addVolumes(volumeMapForExistingMask); } Set<Initiator> initiatorSetForExistingMask = existingMasksToUpdateWithNewInitiators.get(mask.getId()); if (initiatorSetForExistingMask != null && initiatorSetForExistingMask.isEmpty()) { mask.addInitiators(initiatorSetForExistingMask); } updateZoningMap(exportGroup, mask); _dbClient.updateAndReindexObject(mask); // TODO: All export group modifications should be moved to completers exportGroup.addExportMask(mask.getId()); _dbClient.updateAndReindexObject(exportGroup); } } // The initiatorURIsCopy was used in the foreach initiator loop to see // which initiators already exist in a mask. If it is non-empty, // then it means there are initiators that are new, // so let's add them to the main tracker Map<URI, List<URI>> hostInitiatorMap = new HashMap<URI, List<URI>>(); if (!initiatorURIsCopy.isEmpty()) { for (URI newExportMaskInitiator : initiatorURIsCopy) { Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator); List<URI> initiatorSet = hostInitiatorMap.get(initiator.getHost()); if (initiatorSet == null) { initiatorSet = new ArrayList<URI>(); hostInitiatorMap.put(initiator.getHost(), initiatorSet); } initiatorSet.add(initiator.getId()); _log.info( String.format( "host = %s, " + "initiators to add: %d, " + "existingMasksToUpdateWithNewVolumes.size = %d", initiator.getHost(), hostInitiatorMap.get(initiator.getHost()).size(), existingMasksToUpdateWithNewVolumes.size())); } } _log.info( String.format( "existingMasksToUpdateWithNewVolumes.size = %d", existingMasksToUpdateWithNewVolumes.size())); // At this point we have the necessary data structures populated to // determine the workflow steps. We are going to create new masks // and/or add volumes to existing masks. if (!hostInitiatorMap.isEmpty()) { for (URI hostID : hostInitiatorMap.keySet()) { // Check if there is an existing mask (created outside of ViPR) for // the host. If there is we will need to add these intiators // associated with that host to the list if (hostToExistingExportMaskMap.containsKey(hostID)) { URI existingExportMaskURI = hostToExistingExportMaskMap.get(hostID); Set<Initiator> toAddInits = new HashSet<Initiator>(); List<URI> hostInitaitorList = hostInitiatorMap.get(hostID); for (URI initURI : hostInitaitorList) { Initiator initiator = _dbClient.queryObject(Initiator.class, initURI); if (!initiator.getInactive()) { toAddInits.add(initiator); } } _log.info( String.format( "Need to add new initiators to existing mask %s, %s", existingExportMaskURI.toString(), Joiner.on(',').join(hostInitaitorList))); existingMasksToUpdateWithNewInitiators.put(existingExportMaskURI, toAddInits); continue; } // We have some brand new initiators, let's add them to new masks _log.info( String.format( "new export masks %s", Joiner.on(",").join(hostInitiatorMap.get(hostID)))); generateExportMaskCreateWorkflow( workflow, previousStep, storage, exportGroup, hostInitiatorMap.get(hostID), volumeMap, token); } } Map<URI, String> stepMap = new HashMap<URI, String>(); for (Map.Entry<URI, Map<URI, Integer>> entry : existingMasksToUpdateWithNewVolumes.entrySet()) { ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey()); Map<URI, Integer> volumesToAdd = entry.getValue(); _log.info( String.format( "adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), mask.getMaskName())); stepMap.put( entry.getKey(), generateExportMaskAddVolumesWorkflow( workflow, null, storage, exportGroup, mask, volumesToAdd)); } for (Entry<URI, Set<Initiator>> entry : existingMasksToUpdateWithNewInitiators.entrySet()) { ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey()); Set<Initiator> initiatorsToAdd = entry.getValue(); List<URI> initiatorsURIs = new ArrayList<URI>(); for (Initiator initiator : initiatorsToAdd) { initiatorsURIs.add(initiator.getId()); } _log.info( String.format( "adding these initiators %s to mask %s", Joiner.on(",").join(initiatorsURIs), mask.getMaskName())); previousStep = stepMap.get(entry.getKey()) == null ? previousStep : stepMap.get(entry.getKey()); generateExportMaskAddInitiatorsWorkflow( workflow, previousStep, storage, exportGroup, mask, initiatorsURIs, null, token); } } return true; }
/** * Process the volumes to find the unmanaged volumes and populate the volume supported * information. * * @param it * @param keyMap * @param operation * @param pool * @param system * @param exportedVolumes * @param volumesAndReplicas * @param existingVolumesInCG * @param volumeToRAGroupMap * @param poolSupportedSLONames * @param boundVolumes */ private void processVolumes( Iterator<CIMInstance> it, Map<String, Object> keyMap, Operation operation, StoragePool pool, StorageSystem system, Map<String, VolHostIOObject> exportedVolumes, Set<String> existingVolumesInCG, Map<String, RemoteMirrorObject> volumeToRAGroupMap, Map<String, LocalReplicaObject> volumeToLocalReplicaMap, Set<String> poolSupportedSLONames, Set<String> boundVolumes) { List<CIMObjectPath> metaVolumes = new ArrayList<CIMObjectPath>(); List<CIMObjectPath> metaVolumeViews = new ArrayList<CIMObjectPath>(); while (it.hasNext()) { CIMInstance volumeViewInstance = null; try { volumeViewInstance = it.next(); String volumeNativeGuid = getVolumeViewNativeGuid(volumeViewInstance.getObjectPath(), keyMap); Volume volume = checkStorageVolumeExistsInDB(volumeNativeGuid, _dbClient); if (null != volume) { _logger.debug( "Skipping discovery, as this Volume {} is already being managed by ViPR.", volumeNativeGuid); continue; } // skip non-bound volumes for this pool if (boundVolumes != null) { String deviceId = null; if (system.getUsingSmis80()) { deviceId = volumeViewInstance.getObjectPath().getKey(DEVICE_ID).getValue().toString(); } else { deviceId = volumeViewInstance.getObjectPath().getKey(SVDEVICEID).getValue().toString(); } if (!boundVolumes.contains(deviceId)) { _logger.info( "Skipping volume, as this Volume {} is not bound to this Thin Storage Pool {}", volumeNativeGuid, pool.getLabel()); continue; } } addPath(keyMap, operation.get_result(), volumeViewInstance.getObjectPath()); String unManagedVolumeNativeGuid = getUnManagedVolumeNativeGuid(volumeViewInstance.getObjectPath(), keyMap); UnManagedVolume unManagedVolume = checkUnManagedVolumeExistsInDB(unManagedVolumeNativeGuid, _dbClient); unManagedVolume = createUnManagedVolume( unManagedVolume, volumeViewInstance, unManagedVolumeNativeGuid, pool, system, volumeNativeGuid, exportedVolumes, existingVolumesInCG, volumeToRAGroupMap, volumeToLocalReplicaMap, poolSupportedSLONames, keyMap); // set up UnManagedExportMask information @SuppressWarnings("unchecked") Map<String, Set<UnManagedExportMask>> masksMap = (Map<String, Set<UnManagedExportMask>>) keyMap.get(Constants.UNMANAGED_EXPORT_MASKS_MAP); if (masksMap != null) { Set<UnManagedExportMask> uems = masksMap.get(unManagedVolume.getNativeGuid()); if (uems != null) { _logger.info( "{} UnManagedExportMasks found in the keyMap for volume {}", uems.size(), unManagedVolume.getNativeGuid()); for (UnManagedExportMask uem : uems) { _logger.info( " adding UnManagedExportMask {} to UnManagedVolume", uem.getMaskingViewPath()); unManagedVolume.getUnmanagedExportMasks().add(uem.getId().toString()); uem.getUnmanagedVolumeUris().add(unManagedVolume.getId().toString()); _unManagedExportMasksUpdate.add(uem); // add the known initiators, too for (String initUri : uem.getKnownInitiatorUris()) { _logger.info(" adding known Initiator URI {} to UnManagedVolume", initUri); unManagedVolume.getInitiatorUris().add(initUri); Initiator init = _dbClient.queryObject(Initiator.class, URI.create(initUri)); unManagedVolume.getInitiatorNetworkIds().add(init.getInitiatorPort()); } // log this info for debugging for (String path : uem.getUnmanagedInitiatorNetworkIds()) { _logger.info(" UnManagedExportMask has this initiator unknown to ViPR: {}", path); } // check if this volume is in a vplex backend mask // and mark it as such if it is Object o = keyMap.get(Constants.UNMANAGED_VPLEX_BACKEND_MASKS_SET); if (o != null) { Set<String> unmanagedVplexBackendMasks = (Set<String>) o; if (unmanagedVplexBackendMasks.size() > 0) { if (unmanagedVplexBackendMasks.contains(uem.getId().toString())) { _logger.info( "unmanaged volume {} is a vplex backend volume", unManagedVolume.getLabel()); unManagedVolume.putVolumeCharacterstics( SupportedVolumeCharacterstics.IS_VPLEX_BACKEND_VOLUME.toString(), "true"); } } } } } } _logger.debug( "Going to check if the volume is meta: {}, volume meta property: {}", volumeViewInstance.getObjectPath(), unManagedVolume .getVolumeCharacterstics() .get(SupportedVolumeCharacterstics.IS_METAVOLUME.toString())); // Check if the volume is meta volume and add it to the meta // volume list String isMetaVolume = unManagedVolume .getVolumeCharacterstics() .get(SupportedVolumeCharacterstics.IS_METAVOLUME.toString()); if (null != isMetaVolume && Boolean.valueOf(isMetaVolume)) { if (keyMap.containsKey(Constants.IS_NEW_SMIS_PROVIDER) && Boolean.valueOf(keyMap.get(Constants.IS_NEW_SMIS_PROVIDER).toString())) { metaVolumes.add(volumeViewInstance.getObjectPath()); } else { metaVolumeViews.add(volumeViewInstance.getObjectPath()); } _logger.info( "Found meta volume: {}, name: {}", volumeViewInstance.getObjectPath(), unManagedVolume.getLabel()); } // if volumes size reaches 200 , then dump to Db. if (_unManagedVolumesInsert.size() > BATCH_SIZE) { _partitionManager.insertInBatches( _unManagedVolumesInsert, getPartitionSize(keyMap), _dbClient, UNMANAGED_VOLUME); _unManagedVolumesInsert.clear(); } if (_unManagedVolumesUpdate.size() > BATCH_SIZE) { _partitionManager.updateInBatches( _unManagedVolumesUpdate, getPartitionSize(keyMap), _dbClient, UNMANAGED_VOLUME); _unManagedVolumesUpdate.clear(); } if (_unManagedExportMasksUpdate.size() > BATCH_SIZE) { _partitionManager.updateInBatches( _unManagedExportMasksUpdate, getPartitionSize(keyMap), _dbClient, UNMANAGED_EXPORT_MASK); _unManagedExportMasksUpdate.clear(); } unManagedVolumesReturnedFromProvider.add(unManagedVolume.getId()); } catch (Exception ex) { _logger.error( "Processing UnManaged Storage Volume {} ", volumeViewInstance.getObjectPath(), ex); } } // Add meta volumes to the keyMap try { if (metaVolumes != null && !metaVolumes.isEmpty()) { _metaVolumePaths.addAll(metaVolumes); _logger.info("Added {} meta volumes.", metaVolumes.size()); } if (metaVolumeViews != null && !metaVolumeViews.isEmpty()) { _metaVolumeViewPaths.addAll(metaVolumeViews); _logger.info("Added {} meta volume views.", metaVolumeViews.size()); } } catch (Exception ex) { _logger.error("Processing UnManaged meta volumes.", ex); } }
@Override public void exportGroupCreate( URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception { ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token); try { ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI); StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI); if (initiatorURIs != null && !initiatorURIs.isEmpty()) { // Set up workflow steps. Workflow workflow = _workflowService.getNewWorkflow( MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token); // Create a mapping of ExportMasks to Add Volumes to or // add to a list of new Exports to create Map<URI, Map<URI, Integer>> exportMaskToVolumesToAdd = new HashMap<>(); List<URI> newInitiators = new ArrayList<>(); List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs); for (Initiator initiator : initiators) { List<ExportMask> exportMasks = ExportUtils.getInitiatorExportMasks(initiator, _dbClient); if (exportMasks == null || exportMasks.isEmpty()) { newInitiators.add(initiator.getId()); } else { for (ExportMask exportMask : exportMasks) { exportMaskToVolumesToAdd.put(exportMask.getId(), volumeMap); } } } Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, newInitiators); log.info( String.format( "Need to create ExportMasks for these compute resources %s", Joiner.on(',').join(computeResourceToInitiators.entrySet()))); // ExportMask that need to be newly create. That is, the initiators in // this ExportGroup create do not already exist on the system, hence // there aren't any already existing ExportMask for them for (Map.Entry<String, List<URI>> toCreate : computeResourceToInitiators.entrySet()) { generateExportMaskCreateWorkflow( workflow, null, storage, exportGroup, toCreate.getValue(), volumeMap, token); } log.info( String.format( "Need to add volumes for these ExportMasks %s", exportMaskToVolumesToAdd.entrySet())); // There are some existing ExportMasks for the initiators in the request. // For these, we want to reuse the ExportMask and add volumes to them. // These ExportMasks would be created by the system. ScaleIO has no // concept ExportMasks. for (Map.Entry<URI, Map<URI, Integer>> toAddVolumes : exportMaskToVolumesToAdd.entrySet()) { ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddVolumes.getKey()); generateExportMaskAddVolumesWorkflow( workflow, null, storage, exportGroup, exportMask, toAddVolumes.getValue()); } String successMessage = String.format( "ExportGroup successfully applied for StorageArray %s", storage.getLabel()); workflow.executePlan(taskCompleter, successMessage); } else { taskCompleter.ready(_dbClient); } } catch (DeviceControllerException dex) { taskCompleter.error( _dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation( "exportGroupCreate", dex.getMessage())); } catch (Exception ex) { _log.error("ExportGroup Orchestration failed.", ex); taskCompleter.error( _dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation( "exportGroupCreate", ex.getMessage())); } }
/** * Attaches volumes to initiators. * * @param storage the storage * @param volumes the volumes * @param initiators the initiators * @param volumeToTargetLunMap the volume to target lun map * @throws Exception the exception */ private void attachVolumesToInitiators( StorageSystem storage, List<Volume> volumes, List<Initiator> initiators, Map<URI, Integer> volumeToTargetLunMap, Map<Volume, Map<String, List<String>>> volumeToInitiatorTargetMap, ExportMask exportMask) throws Exception { CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storage.getActiveProviderURI(), dbClient); log.debug( "Getting the cinder APi for the provider with id {}", storage.getActiveProviderURI()); CinderApi cinderApi = cinderApiFactory.getApi(storage.getActiveProviderURI(), ep); List<Initiator> iSCSIInitiators = new ArrayList<Initiator>(); List<Initiator> fcInitiators = new ArrayList<Initiator>(); splitInitiatorsByProtocol(initiators, iSCSIInitiators, fcInitiators); String host = getHostNameFromInitiators(initiators); Map<String, String[]> mapSettingVsValues = getFCInitiatorsArray(fcInitiators); String[] fcInitiatorsWwpns = mapSettingVsValues.get(WWPNS); String[] fcInitiatorsWwnns = mapSettingVsValues.get(WWNNS); for (Volume volume : volumes) { // cinder generated volume ID String volumeId = volume.getNativeId(); int targetLunId = -1; VolumeAttachResponse attachResponse = null; // for iSCSI for (Initiator initiator : iSCSIInitiators) { String initiatorPort = initiator.getInitiatorPort(); log.debug( String.format( "Attaching volume %s ( %s ) to initiator %s on Openstack cinder node", volumeId, volume.getId(), initiatorPort)); attachResponse = cinderApi.attachVolume(volumeId, initiatorPort, null, null, host); log.info("Got response : {}", attachResponse.connection_info.toString()); targetLunId = attachResponse.connection_info.data.target_lun; } // for FC if (fcInitiatorsWwpns.length > 0) { log.debug( String.format( "Attaching volume %s ( %s ) to initiators %s on Openstack cinder node", volumeId, volume.getId(), fcInitiatorsWwpns)); attachResponse = cinderApi.attachVolume(volumeId, null, fcInitiatorsWwpns, fcInitiatorsWwnns, host); log.info("Got response : {}", attachResponse.connection_info.toString()); targetLunId = attachResponse.connection_info.data.target_lun; Map<String, List<String>> initTargetMap = attachResponse.connection_info.data.initiator_target_map; if (null != initTargetMap && !initTargetMap.isEmpty()) { volumeToInitiatorTargetMap.put( volume, attachResponse.connection_info.data.initiator_target_map); } } volumeToTargetLunMap.put(volume.getId(), targetLunId); // After the successful export, create or modify the storage ports CinderStoragePortOperations storagePortOperationsInstance = CinderStoragePortOperations.getInstance(storage, dbClient); storagePortOperationsInstance.invoke(attachResponse); } // Add ITLs to volume objects storeITLMappingInVolume(volumeToTargetLunMap, exportMask); }