/**
   * This method cleans up UnManaged Volumes in DB, which had been deleted manually from the Array
   * 1. Get All UnManagedVolumes from DB 2. Store URIs of unmanaged volumes returned from the
   * Provider in unManagedVolumesBookKeepingList. 3. If unmanaged volume is found only in DB, but
   * not in unManagedVolumesBookKeepingList, then set unmanaged volume to inactive.
   *
   * <p>DB | Provider
   *
   * <p>x,y,z | y,z.a [a --> new entry has been added but indexes didn't get added yet into DB]
   *
   * <p>x--> will be set to inactive
   *
   * @param storageSystem
   * @param discoveredUnManagedVolumes
   * @param dbClient
   * @param partitionManager
   */
  public static void markInActiveUnManagedVolumes(
      StorageSystem storageSystem,
      Set<URI> discoveredUnManagedVolumes,
      DbClient dbClient,
      PartitionManager partitionManager) {

    _log.info(
        " -- Processing {} discovered UnManaged Volumes Objects from -- {}",
        discoveredUnManagedVolumes.size(),
        storageSystem.getLabel());
    if (discoveredUnManagedVolumes.isEmpty()) {
      return;
    }
    // Get all available existing unmanaged Volume URIs for this array from DB
    URIQueryResultList allAvailableUnManagedVolumesInDB = new URIQueryResultList();
    dbClient.queryByConstraint(
        ContainmentConstraint.Factory.getStorageDeviceUnManagedVolumeConstraint(
            storageSystem.getId()),
        allAvailableUnManagedVolumesInDB);

    Set<URI> unManagedVolumesInDBSet = new HashSet<URI>();
    Iterator<URI> allAvailableUnManagedVolumesItr = allAvailableUnManagedVolumesInDB.iterator();
    while (allAvailableUnManagedVolumesItr.hasNext()) {
      unManagedVolumesInDBSet.add(allAvailableUnManagedVolumesItr.next());
    }

    SetView<URI> onlyAvailableinDB =
        Sets.difference(unManagedVolumesInDBSet, discoveredUnManagedVolumes);

    _log.info("Diff :" + Joiner.on("\t").join(onlyAvailableinDB));
    if (!onlyAvailableinDB.isEmpty()) {
      List<UnManagedVolume> unManagedVolumeTobeDeleted = new ArrayList<UnManagedVolume>();
      Iterator<UnManagedVolume> unManagedVolumes =
          dbClient.queryIterativeObjects(
              UnManagedVolume.class, new ArrayList<URI>(onlyAvailableinDB));

      while (unManagedVolumes.hasNext()) {
        UnManagedVolume volume = unManagedVolumes.next();
        if (null == volume || volume.getInactive()) {
          continue;
        }

        _log.info("Setting unManagedVolume {} inactive", volume.getId());
        volume.setStoragePoolUri(NullColumnValueGetter.getNullURI());
        volume.setStorageSystemUri(NullColumnValueGetter.getNullURI());
        volume.setInactive(true);
        unManagedVolumeTobeDeleted.add(volume);
      }
      if (!unManagedVolumeTobeDeleted.isEmpty()) {
        partitionManager.updateAndReIndexInBatches(
            unManagedVolumeTobeDeleted, 1000, dbClient, UNMANAGED_VOLUME);
      }
    }
  }
  @Override
  public void exportGroupCreate(
      URI storageURI,
      URI exportGroupURI,
      List<URI> initiatorURIs,
      Map<URI, Integer> volumeMap,
      String token)
      throws Exception {
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
      ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
      StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);

      if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
        // Set up workflow steps.
        Workflow workflow =
            _workflowService.getNewWorkflow(
                MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);

        // Create a mapping of ExportMasks to Add Volumes to or
        // add to a list of new Exports to create
        Map<URI, Map<URI, Integer>> exportMaskToVolumesToAdd = new HashMap<>();
        List<URI> newInitiators = new ArrayList<>();
        List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
        for (Initiator initiator : initiators) {
          List<ExportMask> exportMasks = ExportUtils.getInitiatorExportMasks(initiator, _dbClient);
          if (exportMasks == null || exportMasks.isEmpty()) {
            newInitiators.add(initiator.getId());
          } else {
            for (ExportMask exportMask : exportMasks) {
              exportMaskToVolumesToAdd.put(exportMask.getId(), volumeMap);
            }
          }
        }

        Map<String, List<URI>> computeResourceToInitiators =
            mapInitiatorsToComputeResource(exportGroup, newInitiators);
        log.info(
            String.format(
                "Need to create ExportMasks for these compute resources %s",
                Joiner.on(',').join(computeResourceToInitiators.entrySet())));
        // ExportMask that need to be newly create. That is, the initiators in
        // this ExportGroup create do not already exist on the system, hence
        // there aren't any already existing ExportMask for them
        for (Map.Entry<String, List<URI>> toCreate : computeResourceToInitiators.entrySet()) {
          generateExportMaskCreateWorkflow(
              workflow, null, storage, exportGroup, toCreate.getValue(), volumeMap, token);
        }

        log.info(
            String.format(
                "Need to add volumes for these ExportMasks %s",
                exportMaskToVolumesToAdd.entrySet()));
        // There are some existing ExportMasks for the initiators in the request.
        // For these, we want to reuse the ExportMask and add volumes to them.
        // These ExportMasks would be created by the system. ScaleIO has no
        // concept ExportMasks.
        for (Map.Entry<URI, Map<URI, Integer>> toAddVolumes : exportMaskToVolumesToAdd.entrySet()) {
          ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddVolumes.getKey());
          generateExportMaskAddVolumesWorkflow(
              workflow, null, storage, exportGroup, exportMask, toAddVolumes.getValue());
        }

        String successMessage =
            String.format(
                "ExportGroup successfully applied for StorageArray %s", storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
      } else {
        taskCompleter.ready(_dbClient);
      }
    } catch (DeviceControllerException dex) {
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupCreate", dex.getMessage()));
    } catch (Exception ex) {
      _log.error("ExportGroup Orchestration failed.", ex);
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupCreate", ex.getMessage()));
    }
  }
  @Override
  public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
      ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
      StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);

      List<ExportMask> masks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
      if (masks != null && !masks.isEmpty()) {
        Workflow workflow =
            _workflowService.getNewWorkflow(
                MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token);

        Map<URI, Integer> volumeMap =
            ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
        List<URI> volumeURIs = new ArrayList<>(volumeMap.keySet());
        List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
        Map<URI, Map<URI, Integer>> exportMaskToVolumeCount =
            ExportMaskUtils.mapExportMaskToVolumeShareCount(_dbClient, volumeURIs, initiatorURIs);

        for (ExportMask exportMask : masks) {
          List<URI> exportGroupURIs = new ArrayList<>();
          if (!ExportUtils.isExportMaskShared(_dbClient, exportMask.getId(), exportGroupURIs)) {
            log.info(
                String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
            generateExportMaskDeleteWorkflow(
                workflow, null, storage, exportGroup, exportMask, null);
          } else {
            Map<URI, Integer> volumeToExportGroupCount =
                exportMaskToVolumeCount.get(exportMask.getId());
            List<URI> volumesToRemove = new ArrayList<>();
            for (URI uri : volumeMap.keySet()) {
              if (volumeToExportGroupCount == null) {
                continue;
              }
              // Remove the volume only if it is not shared with
              // more than 1 ExportGroup
              Integer numExportGroupsVolumeIsIn = volumeToExportGroupCount.get(uri);
              if (numExportGroupsVolumeIsIn != null && numExportGroupsVolumeIsIn == 1) {
                volumesToRemove.add(uri);
              }
            }
            if (!volumesToRemove.isEmpty()) {
              log.info(
                  String.format(
                      "Adding step to remove volumes %s from ExportMask %s",
                      Joiner.on(',').join(volumesToRemove), exportMask.getMaskName()));
              generateExportMaskRemoveVolumesWorkflow(
                  workflow, null, storage, exportGroup, exportMask, volumesToRemove, null);
            }
          }
        }

        String successMessage =
            String.format(
                "ExportGroup delete successfully completed for StorageArray %s",
                storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
      } else {
        taskCompleter.ready(_dbClient);
      }
    } catch (DeviceControllerException dex) {
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupDelete", dex.getMessage()));
    } catch (Exception ex) {
      _log.error("ExportGroup Orchestration failed.", ex);
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupDelete", ex.getMessage()));
    }
  }
  @Override
  public void exportGroupRemoveVolumes(
      URI storageURI, URI exportGroupURI, List<URI> volumeURIs, String token) throws Exception {
    /*
     * foreach volume in list
     * foreach initiator in ExportGroup
     * if volume not used in another ExportGroup with same initiator
     * scli unmap --volume volid --sdc initiator.sdcid
     */
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
      ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
      StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);

      List<ExportMask> masks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
      if (masks != null && !masks.isEmpty()) {
        // Set up workflow steps.
        Workflow workflow =
            _workflowService.getNewWorkflow(
                MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveVolumes", true, token);

        // Generate a list of Initiators
        List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
        Map<URI, List<URI>> exportToRemoveVolumesList = new HashMap<>();
        // Generate a mapping of volume URIs to the # of
        // ExportGroups that it is associated with
        Map<URI, Map<URI, Integer>> exportMaskToVolumeCount =
            ExportMaskUtils.mapExportMaskToVolumeShareCount(_dbClient, volumeURIs, initiatorURIs);

        // Generate a mapping of the ExportMask URI to a list volumes to
        // remove from that ExportMask
        for (ExportMask exportMask : masks) {
          Map<URI, Integer> volumeToCountMap = exportMaskToVolumeCount.get(exportMask.getId());
          if (volumeToCountMap == null) {
            continue;
          }
          for (Map.Entry<URI, Integer> it : volumeToCountMap.entrySet()) {
            URI volumeURI = it.getKey();
            Integer numberOfExportGroupsVolumesIsIn = it.getValue();
            if (numberOfExportGroupsVolumesIsIn == 1) {
              List<URI> volumesToRemove = exportToRemoveVolumesList.get(exportMask.getId());
              if (volumesToRemove == null) {
                volumesToRemove = new ArrayList<>();
                exportToRemoveVolumesList.put(exportMask.getId(), volumesToRemove);
              }
              volumesToRemove.add(volumeURI);
            }
          }
        }

        // With the mapping of ExportMask to list of volume URIs,
        // generate a step to remove the volumes from the ExportMask
        for (Map.Entry<URI, List<URI>> entry : exportToRemoveVolumesList.entrySet()) {
          ExportMask exportMask = _dbClient.queryObject(ExportMask.class, entry.getKey());
          log.info(
              String.format(
                  "Adding step to remove volumes %s from ExportMask %s",
                  Joiner.on(',').join(entry.getValue()), exportMask.getMaskName()));
          generateExportMaskRemoveVolumesWorkflow(
              workflow, null, storage, exportGroup, exportMask, entry.getValue(), null);
        }

        String successMessage =
            String.format(
                "ExportGroup remove volumes successfully applied for StorageArray %s",
                storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
      } else {
        taskCompleter.ready(_dbClient);
      }
    } catch (DeviceControllerException dex) {
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupRemoveVolumes", dex.getMessage()));
    } catch (Exception ex) {
      _log.error("ExportGroup Orchestration failed.", ex);
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupRemoveVolumes", ex.getMessage()));
    }
  }
  @Override
  public void exportGroupAddVolumes(
      URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token)
      throws Exception {
    /*
     * foreach volume in list
     * foreach initiator in ExportGroup
     * scli map --volume volid --sdc initiator.sdcid
     */
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
      ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
      StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);

      List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
      if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
        // Set up workflow steps.
        Workflow workflow =
            _workflowService.getNewWorkflow(
                MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes", true, token);

        // Create a mapping of ExportMasks to Add Volumes to or
        // add to a list of new Exports to create
        Map<URI, Map<URI, Integer>> exportMaskToVolumesToAdd = new HashMap<>();
        List<URI> initiatorsToPlace = new ArrayList<>(initiatorURIs);

        // Need to figure out which ExportMasks to add volumes to.
        for (ExportMask exportMask :
            ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI)) {
          if (exportMask.hasAnyInitiators()) {
            exportMaskToVolumesToAdd.put(exportMask.getId(), volumeMap);
            for (String uriString : exportMask.getInitiators()) {
              URI initiatorURI = URI.create(uriString);
              initiatorsToPlace.remove(initiatorURI);
            }
          }
        }

        Map<String, List<URI>> computeResourceToInitiators =
            mapInitiatorsToComputeResource(exportGroup, initiatorsToPlace);
        log.info(
            String.format(
                "Need to create ExportMasks for these compute resources %s",
                Joiner.on(',').join(computeResourceToInitiators.entrySet())));
        // ExportMask that need to be newly create because we just added
        // volumes from 'storage' StorageSystem to this ExportGroup
        for (Map.Entry<String, List<URI>> toCreate : computeResourceToInitiators.entrySet()) {
          generateExportMaskCreateWorkflow(
              workflow, null, storage, exportGroup, toCreate.getValue(), volumeMap, token);
        }

        log.info(
            String.format(
                "Need to add volumes for these ExportMasks %s",
                exportMaskToVolumesToAdd.entrySet()));
        // We already know about the ExportMask, so we just add volumes to it
        for (Map.Entry<URI, Map<URI, Integer>> toAddVolumes : exportMaskToVolumesToAdd.entrySet()) {
          ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddVolumes.getKey());
          generateExportMaskAddVolumesWorkflow(
              workflow, null, storage, exportGroup, exportMask, toAddVolumes.getValue());
        }

        String successMessage =
            String.format(
                "ExportGroup add volumes successfully applied for StorageArray %s",
                storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
      } else {
        taskCompleter.ready(_dbClient);
      }
    } catch (DeviceControllerException dex) {
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupAddVolumes", dex.getMessage()));
    } catch (Exception ex) {
      _log.error("ExportGroup Orchestration failed.", ex);
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupAddVolumes", ex.getMessage()));
    }
  }
  @Override
  public void exportGroupRemoveInitiators(
      URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
    /*
     * foreach ScaleOI volume in ExportGroup
     * foreach initiator in list
     * if volume not used in another ExportGroup with same initiator
     * scli unmap --volume volid --sdc initiator.sdcid
     */
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
      ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
      StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);

      if (initiatorURIs != null
          && !initiatorURIs.isEmpty()
          && exportGroup.getExportMasks() != null) {
        // Set up workflow steps.
        Workflow workflow =
            _workflowService.getNewWorkflow(
                MaskingWorkflowEntryPoints.getInstance(),
                "exportGroupRemoveInitiators",
                true,
                token);

        // Create a mapping of ExportMask URI to initiators to remove
        Map<URI, List<URI>> exportToInitiatorsToRemove = new HashMap<>();
        Map<URI, List<URI>> exportToVolumesToRemove = new HashMap<>();
        Map<URI, Integer> volumeMap = null;
        for (String exportMaskURIStr : exportGroup.getExportMasks()) {
          URI exportMaskURI = URI.create(exportMaskURIStr);
          ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
          if (exportMask == null) {
            continue;
          }
          for (URI initiatorURI : initiatorURIs) {
            Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
            if (initiator == null || !exportMask.hasInitiator(initiatorURI.toString())) {
              continue;
            }
            if (ExportUtils.getInitiatorExportGroups(initiator, _dbClient).size() == 1) {
              List<URI> initiators = exportToInitiatorsToRemove.get(exportGroupURI);
              if (initiators == null) {
                initiators = new ArrayList<>();
                exportToInitiatorsToRemove.put(exportMaskURI, initiators);
              }
              initiators.add(initiatorURI);
            } else {
              if (volumeMap == null) {
                volumeMap = ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
              }
              List<URI> volumeURIs = exportToVolumesToRemove.get(exportGroupURI);
              if (volumeURIs == null) {
                volumeURIs = new ArrayList<>();
                exportToVolumesToRemove.put(exportMaskURI, volumeURIs);
              }
              for (URI volumeURI : volumeMap.keySet()) {
                // Only add to the remove list for the ExportMask if
                // the EM is not being shared with another ExportGroup
                Integer count =
                    ExportUtils.getNumberOfExportGroupsWithVolume(initiator, volumeURI, _dbClient);
                if (count == 1) {
                  volumeURIs.add(volumeURI);
                }
              }
            }
          }
        }

        // Generate the remove initiators steps for the entries that were determined above
        for (Map.Entry<URI, List<URI>> toRemoveInits : exportToInitiatorsToRemove.entrySet()) {
          ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toRemoveInits.getKey());
          if (exportMask != null) {
            List<URI> removeInitURIs = toRemoveInits.getValue();
            List<String> exportMaskInitiatorURIs = new ArrayList<>(exportMask.getInitiators());
            for (URI uri : removeInitURIs) {
              exportMaskInitiatorURIs.remove(uri.toString());
            }
            if (exportMaskInitiatorURIs.isEmpty()) {
              log.info(
                  String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
              generateExportMaskDeleteWorkflow(
                  workflow, null, storage, exportGroup, exportMask, null);
            } else {
              log.info(
                  String.format(
                      "Adding step to remove initiators %s from ExportMask %s",
                      Joiner.on(',').join(removeInitURIs), exportMask.getMaskName()));
              generateExportMaskRemoveInitiatorsWorkflow(
                  workflow, null, storage, exportGroup, exportMask, removeInitURIs, true);
            }
          }
        }

        // Generate the remove volume for those cases where we remove initiators
        // from an ExportGroup that contains more than one host/initiator
        for (Map.Entry<URI, List<URI>> toRemoveVols : exportToVolumesToRemove.entrySet()) {
          ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toRemoveVols.getKey());
          List<URI> removeVolumeURIs = toRemoveVols.getValue();
          if (exportMask != null && !removeVolumeURIs.isEmpty()) {
            List<String> exportMaskVolumeURIs = new ArrayList<>(exportMask.getVolumes().keySet());
            for (URI uri : removeVolumeURIs) {
              exportMaskVolumeURIs.remove(uri.toString());
            }
            if (exportMaskVolumeURIs.isEmpty()) {
              log.info(
                  String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
              generateExportMaskDeleteWorkflow(
                  workflow, null, storage, exportGroup, exportMask, null);
            } else {
              log.info(
                  String.format(
                      "Adding step to remove volumes %s from ExportMask %s",
                      Joiner.on(',').join(removeVolumeURIs), exportMask.getMaskName()));
              generateExportMaskRemoveVolumesWorkflow(
                  workflow, null, storage, exportGroup, exportMask, removeVolumeURIs, null);
            }
          }
        }

        String successMessage =
            String.format(
                "ExportGroup remove initiators successfully applied for StorageArray %s",
                storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
      } else {
        taskCompleter.ready(_dbClient);
      }
    } catch (DeviceControllerException dex) {
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupRemoveInitiators", dex.getMessage()));
    } catch (Exception ex) {
      _log.error("ExportGroup Orchestration failed.", ex);
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupRemoveInitiators", ex.getMessage()));
    }
  }
  @Override
  public void exportGroupAddInitiators(
      URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
    /*
     * foreach ExportGroup.volume
     * if ScaleIO volume
     * foreach initiator
     * scli map --volume volid --sdc initiator.sdcid
     */
    ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
    try {
      ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
      StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);

      if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
        // Set up workflow steps.
        Workflow workflow =
            _workflowService.getNewWorkflow(
                MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddInitiators", true, token);

        Map<String, URI> portNameToInitiatorURI = new HashMap<>();
        List<URI> hostURIs = new ArrayList<>();
        List<String> portNames = new ArrayList<>();
        // Populate the portNames and the mapping of the portNames to Initiator URIs
        processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);

        Map<URI, Integer> volumesToAdd =
            ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
        List<URI> initiatorURIsToPlace = new ArrayList<>(initiatorURIs);
        Map<String, List<URI>> computeResourceToInitiators =
            mapInitiatorsToComputeResource(exportGroup, initiatorURIs);
        Set<URI> partialMasks = new HashSet<>();
        Map<String, Set<URI>> initiatorToExport =
            determineInitiatorToExportMaskPlacements(
                exportGroup,
                storageURI,
                computeResourceToInitiators,
                Collections.EMPTY_MAP,
                portNameToInitiatorURI,
                partialMasks);
        Map<URI, List<URI>> exportToInitiators =
            toExportMaskToInitiatorURIs(initiatorToExport, portNameToInitiatorURI);
        for (Map.Entry<URI, List<URI>> toAddInitiators : exportToInitiators.entrySet()) {
          ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddInitiators.getKey());
          if (exportMask == null || exportMask.getInactive()) {
            continue;
          }
          for (URI toAddInitiator : toAddInitiators.getValue()) {
            if (!exportMask.hasInitiator(toAddInitiator.toString())) {
              log.info(
                  String.format(
                      "Add step to add initiator %s to ExportMask %s",
                      toAddInitiator.toString(), exportMask.getMaskName()));
              generateExportMaskAddInitiatorsWorkflow(
                  workflow,
                  null,
                  storage,
                  exportGroup,
                  exportMask,
                  toAddInitiators.getValue(),
                  null,
                  null);
            } else if (volumesToAdd != null && volumesToAdd.size() > 0) {
              log.info(
                  String.format(
                      "Add step to add volumes %s to ExportMask %s",
                      Joiner.on(',').join(volumesToAdd.entrySet()), exportMask.getMaskName()));
              generateExportMaskAddVolumesWorkflow(
                  workflow, null, storage, exportGroup, exportMask, volumesToAdd);
            }
            initiatorURIsToPlace.remove(toAddInitiator);
          }
        }

        // If there are any new initiators that weren't already known to the system
        // previously, add them now.
        if (!initiatorURIsToPlace.isEmpty() && volumesToAdd != null) {
          Map<String, List<URI>> newComputeResources =
              mapInitiatorsToComputeResource(exportGroup, initiatorURIsToPlace);
          log.info(
              String.format(
                  "Need to create ExportMasks for these compute resources %s",
                  Joiner.on(',').join(newComputeResources.entrySet())));
          for (Map.Entry<String, List<URI>> toCreate : newComputeResources.entrySet()) {
            generateExportMaskCreateWorkflow(
                workflow, null, storage, exportGroup, toCreate.getValue(), volumesToAdd, null);
          }
        }

        String successMessage =
            String.format(
                "ExportGroup add initiators successfully applied for StorageArray %s",
                storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
      } else {
        taskCompleter.ready(_dbClient);
      }
    } catch (DeviceControllerException dex) {
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupAddInitiators", dex.getMessage()));
    } catch (Exception ex) {
      _log.error("ExportGroup Orchestration failed.", ex);
      taskCompleter.error(
          _dbClient,
          DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation(
              "exportGroupAddInitiators", ex.getMessage()));
    }
  }
  @Override
  public void exportGroupAddVolumes(
      URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token)
      throws Exception {
    ExportOrchestrationTask taskCompleter = null;
    try {
      BlockStorageDevice device = getDevice();
      taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
      StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
      ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
      boolean anyVolumesAdded = false;
      boolean createdNewMask = false;
      if (exportGroup.getExportMasks() != null) {
        // Set up workflow steps.
        Workflow workflow =
            _workflowService.getNewWorkflow(
                MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes", true, token);
        List<ExportMask> exportMasksToZoneAddVolumes = new ArrayList<ExportMask>();
        List<URI> volumesToZoneAddVolumes = new ArrayList<URI>();
        List<URI> exportMasksToZoneCreate = new ArrayList<URI>();
        Map<URI, Integer> volumesToZoneCreate = new HashMap<URI, Integer>();
        // Add the volume to all the ExportMasks that are contained in the
        // ExportGroup. The volumes should be added only if they don't
        // already exist for the ExportMask.
        Collection<URI> initiatorURIs =
            Collections2.transform(
                exportGroup.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI);
        List<URI> hostURIs = new ArrayList<URI>();
        Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
        List<String> portNames = new ArrayList<String>();
        processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);
        // We always want to have the full list of initiators for the hosts involved in
        // this export. This will allow the export operation to always find any
        // existing exports for a given host.
        queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);
        Map<String, Set<URI>> foundMatches = device.findExportMasks(storage, portNames, false);
        Set<String> checkMasks = mergeWithExportGroupMaskURIs(exportGroup, foundMatches.values());
        for (String maskURIStr : checkMasks) {
          ExportMask exportMask = _dbClient.queryObject(ExportMask.class, URI.create(maskURIStr));
          _log.info(String.format("Checking mask %s", exportMask.getMaskName()));
          if (!exportMask.getInactive() && exportMask.getStorageDevice().equals(storageURI)) {
            exportMask = device.refreshExportMask(storage, exportMask);
            // BlockStorageDevice level, so that it has up-to-date
            // info from the array
            Map<URI, Integer> volumesToAdd =
                getVolumesToAdd(volumeMap, exportMask, exportGroup, token);
            // Not able to get VolumesToAdd due to error condition so, return
            if (null == volumesToAdd) {
              return;
            }
            _log.info(
                String.format(
                    "Mask %s, adding volumes %s",
                    exportMask.getMaskName(), Joiner.on(',').join(volumesToAdd.entrySet())));
            if (volumesToAdd.size() > 0) {
              exportMasksToZoneAddVolumes.add(exportMask);
              volumesToZoneAddVolumes.addAll(volumesToAdd.keySet());

              // Make sure the zoning map is getting updated for user-created masks
              updateZoningMap(exportGroup, exportMask, true);
              generateExportMaskAddVolumesWorkflow(
                  workflow,
                  EXPORT_GROUP_ZONING_TASK,
                  storage,
                  exportGroup,
                  exportMask,
                  volumesToAdd);
              anyVolumesAdded = true;
              // Need to check if the mask is not already associated with
              // ExportGroup. This is case when we are adding volume to
              // the export and there is an existing export on the array.
              // We have to reuse that existing export, but we need also
              // associated it with the ExportGroup.
              if (!exportGroup.hasMask(exportMask.getId())) {
                exportGroup.addExportMask(exportMask.getId());
                _dbClient.updateAndReindexObject(exportGroup);
              }
            }
          }
        }
        if (!anyVolumesAdded) {
          // This is the case where we were requested to add volumes to the
          // export for this storage array, but none were scheduled to be
          // added. This could be either because there are existing masks,
          // but the volumes are already in the export mask or there are no
          // masks for the storage array. We are checking if there are any
          // masks and if there are initiators for the export.
          if (!ExportMaskUtils.hasExportMaskForStorage(_dbClient, exportGroup, storageURI)
              && exportGroup.hasInitiators()) {
            _log.info(
                "No existing masks to which the requested volumes can be added. Creating a new mask");
            List<URI> initiators = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());

            Map<String, List<URI>> hostInitiatorMap =
                mapInitiatorsToComputeResource(exportGroup, initiators);

            if (!hostInitiatorMap.isEmpty()) {
              for (Map.Entry<String, List<URI>> resourceEntry : hostInitiatorMap.entrySet()) {
                String computeKey = resourceEntry.getKey();
                List<URI> computeInitiatorURIs = resourceEntry.getValue();
                _log.info(String.format("New export masks for %s", computeKey));
                GenExportMaskCreateWorkflowResult result =
                    generateExportMaskCreateWorkflow(
                        workflow,
                        EXPORT_GROUP_ZONING_TASK,
                        storage,
                        exportGroup,
                        computeInitiatorURIs,
                        volumeMap,
                        token);
                exportMasksToZoneCreate.add(result.getMaskURI());
                volumesToZoneCreate.putAll(volumeMap);
              }
              createdNewMask = true;
            }
          }
        }

        if (!exportMasksToZoneAddVolumes.isEmpty()) {
          generateZoningAddVolumesWorkflow(
              workflow, null, exportGroup, exportMasksToZoneAddVolumes, volumesToZoneAddVolumes);
        }

        if (!exportMasksToZoneCreate.isEmpty()) {
          generateZoningCreateWorkflow(
              workflow, null, exportGroup, exportMasksToZoneCreate, volumesToZoneCreate);
        }

        String successMessage =
            String.format(
                "Successfully added volumes to export on StorageArray %s", storage.getLabel());
        workflow.executePlan(taskCompleter, successMessage);
      } else {
        if (exportGroup.hasInitiators()) {
          _log.info("There are no masks for this export. Need to create anew.");
          List<URI> initiatorURIs = new ArrayList<URI>();
          for (String initiatorURIStr : exportGroup.getInitiators()) {
            initiatorURIs.add(URI.create(initiatorURIStr));
          }
          // Invoke the export group create operation,
          // which should in turn create a workflow operations to
          // create the export for the newly added volume(s).
          exportGroupCreate(storageURI, exportGroupURI, initiatorURIs, volumeMap, token);
          anyVolumesAdded = true;
        } else {
          _log.warn("There are no initiator for export group: " + exportGroup.getLabel());
        }
      }
      if (!anyVolumesAdded && !createdNewMask) {
        taskCompleter.ready(_dbClient);
        _log.info(
            "No volumes pushed to array because either they already exist "
                + "or there were no initiators added to the export yet.");
      }
    } catch (Exception ex) {
      _log.error("ExportGroup Orchestration failed.", ex);
      // TODO add service code here
      if (taskCompleter != null) {
        ServiceError serviceError =
            DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
        taskCompleter.error(_dbClient, serviceError);
      }
    }
  }
  /**
   * Create storage level masking components to support the requested ExportGroup object. This
   * operation will be flexible enough to take into account initiators that are in some already
   * existent in some StorageGroup. In such a case, the underlying masking component will be
   * "adopted" by the ExportGroup. Further operations against the "adopted" mask will only allow for
   * addition and removal of those initiators/volumes that were added by a Bourne request. Existing
   * initiators/volumes will be maintained.
   *
   * @param storageURI - URI referencing underlying storage array
   * @param exportGroupURI - URI referencing Bourne-level masking, ExportGroup
   * @param initiatorURIs - List of Initiator URIs
   * @param volumeMap - Map of Volume URIs to requested Integer URI
   * @param token - Identifier for operation
   * @throws Exception
   */
  @Override
  public void exportGroupCreate(
      URI storageURI,
      URI exportGroupURI,
      List<URI> initiatorURIs,
      Map<URI, Integer> volumeMap,
      String token)
      throws Exception {
    ExportOrchestrationTask taskCompleter = null;
    try {
      BlockStorageDevice device = getDevice();
      ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
      StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
      taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);

      if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
        _log.info("export_create: initiator list non-empty");

        // Set up workflow steps.
        Workflow workflow =
            _workflowService.getNewWorkflow(
                MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);

        // Create two steps, one for Zoning, one for the ExportGroup actions.
        // This step is for zoning. It is not specific to a single
        // NetworkSystem, as it will look at all the initiators and targets and compute
        // the zones required (which might be on multiple NetworkSystems.)

        boolean createdSteps =
            determineExportGroupCreateSteps(
                workflow,
                null,
                device,
                storage,
                exportGroup,
                initiatorURIs,
                volumeMap,
                false,
                token);

        String zoningStep =
            generateDeviceSpecificZoningCreateWorkflow(
                workflow, EXPORT_GROUP_MASKING_TASK, exportGroup, null, volumeMap);

        if (createdSteps && null != zoningStep) {
          // Execute the plan and allow the WorkflowExecutor to fire the
          // taskCompleter.
          String successMessage =
              String.format(
                  "ExportGroup successfully applied for StorageArray %s", storage.getLabel());
          workflow.executePlan(taskCompleter, successMessage);
        }
      } else {
        _log.info("export_create: initiator list");
        taskCompleter.ready(_dbClient);
      }
    } catch (DeviceControllerException dex) {
      if (taskCompleter != null) {
        taskCompleter.error(
            _dbClient,
            DeviceControllerException.errors.vmaxExportGroupCreateError(dex.getMessage()));
      }
    } catch (Exception ex) {
      _log.error("ExportGroup Orchestration failed.", ex);
      // TODO add service code here
      if (taskCompleter != null) {
        ServiceError serviceError =
            DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
        taskCompleter.error(_dbClient, serviceError);
      }
    }
  }