@Override
  public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap)
      throws BaseCollectionException {
    EnumerateResponse<CIMInstance> volumeInstanceChunks =
        (EnumerateResponse<CIMInstance>) resultObj;
    WBEMClient client = SMICommunicationInterface.getCIMClient(keyMap);
    _dbClient = (DbClient) keyMap.get(Constants.dbClient);
    _updateVolumes = new ArrayList<Volume>();
    _updateSnapShots = new ArrayList<BlockSnapshot>();
    _updateMirrors = new ArrayList<BlockMirror>();
    CloseableIterator<CIMInstance> volumeInstances = null;
    try {
      _metaVolumeViewPaths = (List<CIMObjectPath>) keyMap.get(Constants.META_VOLUMES_VIEWS);
      if (_metaVolumeViewPaths == null) {
        _metaVolumeViewPaths = new ArrayList<CIMObjectPath>();
        keyMap.put(Constants.META_VOLUMES_VIEWS, _metaVolumeViewPaths);
      }
      // create empty place holder list for meta volume paths (cannot define this in xml)
      List<CIMObjectPath> metaVolumePaths =
          (List<CIMObjectPath>) keyMap.get(Constants.META_VOLUMES);
      if (metaVolumePaths == null) {
        keyMap.put(Constants.META_VOLUMES, new ArrayList<CIMObjectPath>());
      }

      CIMObjectPath storagePoolPath = getObjectPathfromCIMArgument(_args);
      volumeInstances = volumeInstanceChunks.getResponses();
      processVolumes(volumeInstances, keyMap);
      while (!volumeInstanceChunks.isEnd()) {
        _logger.info("Processing Next Volume Chunk of size {}", BATCH_SIZE);
        volumeInstanceChunks =
            client.getInstancesWithPath(
                storagePoolPath,
                volumeInstanceChunks.getContext(),
                new UnsignedInteger32(BATCH_SIZE));
        processVolumes(volumeInstanceChunks.getResponses(), keyMap);
      }

      // if list empty, this method returns back immediately.
      // partition size might not be used in this context, as batch size < partition size.
      // TODO metering might need some extra work to push volumes in batches, hence not changing
      // this method
      // signature
      _partitionManager.updateInBatches(
          _updateVolumes, getPartitionSize(keyMap), _dbClient, VOLUME);
      _partitionManager.updateInBatches(
          _updateSnapShots, getPartitionSize(keyMap), _dbClient, BLOCK_SNAPSHOT);
      _partitionManager.updateInBatches(
          _updateMirrors, getPartitionSize(keyMap), _dbClient, BLOCK_MIRROR);

    } catch (Exception e) {
      _logger.error("Processing Volumes and Snapshots failed", e);
    } finally {
      _updateVolumes = null;
      _updateSnapShots = null;
      _updateMirrors = null;
      if (null != volumeInstances) {
        volumeInstances.close();
      }
    }
  }
 // For VMAX V3 only for now
 public CIMInstance getDefaultReplicationSettingData(StorageSystem storage) throws WBEMException {
   CIMInstance defaultInstance = null;
   CloseableIterator<CIMInstance> repSvcCapIter = null;
   try {
     repSvcCapIter =
         _helper.getAssociatorInstances(
             storage,
             _cimPath.getControllerReplicationSvcPath(storage),
             null,
             _cimPath.prefixWithParamName(SmisConstants.REPLICATION_SERVICE_CAPABILTIES),
             null,
             null,
             null);
     if (repSvcCapIter != null && repSvcCapIter.hasNext()) {
       CIMInstance instance = repSvcCapIter.next();
       CIMArgument[] in = _helper.getDefaultReplicationSettingDataInputArgumentsForLocalMirror();
       CIMArgument[] out = new CIMArgument[5];
       _helper.invokeMethod(
           storage,
           instance.getObjectPath(),
           SmisConstants.GET_DEFAULT_REPLICATION_SETTING_DATA,
           in,
           out);
       defaultInstance =
           (CIMInstance) _cimPath.getFromOutputArgs(out, SmisConstants.DEFAULT_INSTANCE);
     }
   } finally {
     if (repSvcCapIter != null) {
       repSvcCapIter.close();
     }
   }
   return defaultInstance;
 }
 @Override
 public void fractureSingleVolumeMirror(
     StorageSystem storage, URI mirror, Boolean sync, TaskCompleter taskCompleter)
     throws DeviceControllerException {
   _log.info("fractureSingleVolumeMirror operation START");
   CloseableIterator<CIMObjectPath> storageSyncRefs = null;
   try {
     BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
     CIMObjectPath mirrorPath = _cimPath.getBlockObjectPath(storage, mirrorObj);
     // Get reference to the CIM_StorageSynchronized instance
     storageSyncRefs =
         _helper.getReference(storage, mirrorPath, SmisConstants.CIM_STORAGE_SYNCHRONIZED, null);
     boolean isVmax3 = storage.checkIfVmax3();
     while (storageSyncRefs.hasNext()) {
       CIMObjectPath storageSync = storageSyncRefs.next();
       CIMArgument[] inArgs =
           isVmax3
               ? _helper.getFractureMirrorInputArgumentsWithCopyState(storageSync, sync)
               : _helper.getFractureMirrorInputArguments(storageSync, sync);
       CIMArgument[] outArgs = new CIMArgument[5];
       // Invoke method to fracture the synchronization
       _helper.callModifyReplica(storage, inArgs, outArgs);
       taskCompleter.ready(_dbClient);
     }
   } catch (Exception e) {
     _log.info("Problem making SMI-S call", e);
     ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
     taskCompleter.error(_dbClient, serviceError);
   } finally {
     if (storageSyncRefs != null) {
       storageSyncRefs.close();
     }
   }
 }
  @Override
  public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap)
      throws BaseCollectionException {
    CloseableIterator<CIMInstance> volumeInstances = null;
    try {
      _dbClient = (DbClient) keyMap.get(Constants.dbClient);
      WBEMClient client = (WBEMClient) keyMap.get(Constants._cimClient);
      _unManagedVolumesUpdate = new ArrayList<UnManagedVolume>();
      CIMObjectPath storagePoolPath = getObjectPathfromCIMArgument(_args);
      String poolNativeGuid = NativeGUIDGenerator.generateNativeGuidForPool(storagePoolPath);
      StoragePool pool = checkStoragePoolExistsInDB(poolNativeGuid, _dbClient);
      if (pool == null) {
        _logger.error(
            "Skipping unmanaged volume discovery of Access Sattes as the storage pool with path {} doesn't exist in ViPR",
            storagePoolPath.toString());
        return;
      }
      EnumerateResponse<CIMInstance> volumeInstanceChunks =
          (EnumerateResponse<CIMInstance>) resultObj;
      volumeInstances = volumeInstanceChunks.getResponses();

      processVolumes(volumeInstances, keyMap, operation);
      while (!volumeInstanceChunks.isEnd()) {
        _logger.debug("Processing Next Volume Chunk of size {}", BATCH_SIZE);
        volumeInstanceChunks =
            client.getInstancesWithPath(
                storagePoolPath,
                volumeInstanceChunks.getContext(),
                new UnsignedInteger32(BATCH_SIZE));
        processVolumes(volumeInstanceChunks.getResponses(), keyMap, operation);
      }
      if (null != _unManagedVolumesUpdate && _unManagedVolumesUpdate.size() > 0) {
        _partitionManager.updateInBatches(
            _unManagedVolumesUpdate, getPartitionSize(keyMap), _dbClient, "UnManagedVolume");
      }

    } catch (Exception e) {
      _logger.error("Discovering Access States of unManaged Volumes failed", e);
    } finally {
      volumeInstances.close();
    }
  }
  private String[] getVMAX3PoolDriveTypes(StorageSystem storageDevice, CIMInstance poolInstance) {
    Set<String> driveTypes = new HashSet<String>();
    CloseableIterator<CIMInstance> virtualProvisioningPoolItr = null;

    _logger.info(
        "Trying to get all VirtualProvisioningPools for storage pool {}",
        poolInstance.getProperty(SmisConstants.CP_INSTANCE_ID).toString());
    CIMObjectPath poolPath = poolInstance.getObjectPath();
    try {
      virtualProvisioningPoolItr =
          getAssociatorInstances(
              poolPath,
              null,
              SmisConstants.SYMM_VIRTUAL_PROVISIONING_POOL,
              null,
              null,
              SmisConstants.PS_V3_VIRTUAL_PROVISIONING_POOL_PROPERTIES);
      while (virtualProvisioningPoolItr != null && virtualProvisioningPoolItr.hasNext()) {
        CIMInstance virtualProvisioningPoolInstance = virtualProvisioningPoolItr.next();
        String diskDriveType =
            CIMPropertyFactory.getPropertyValue(
                virtualProvisioningPoolInstance, SmisConstants.CP_DISK_DRIVE_TYPE);
        if (diskDriveType != null) {
          driveTypes.add(diskDriveType);
        }
      }
    } catch (WBEMException e) {
      _logger.error("Error getting VirtualProvisioningPools", e);
    } finally {
      if (virtualProvisioningPoolItr != null) {
        virtualProvisioningPoolItr.close();
      }
    }
    String[] driveTypesArr = driveTypes.toArray(new String[driveTypes.size()]);
    return driveTypesArr;
  }
 @Override
 public void resumeSingleVolumeMirror(
     StorageSystem storage, URI mirror, TaskCompleter taskCompleter)
     throws DeviceControllerException {
   _log.info("resumeSingleVolumeMirror operation START");
   CloseableIterator<CIMObjectPath> storageSyncRefs = null;
   try {
     BlockMirror mirrorObj = _dbClient.queryObject(BlockMirror.class, mirror);
     CIMObjectPath mirrorPath = _cimPath.getBlockObjectPath(storage, mirrorObj);
     // Get reference to the CIM_StorageSynchronized instance
     storageSyncRefs =
         _helper.getReference(storage, mirrorPath, SmisConstants.CIM_STORAGE_SYNCHRONIZED, null);
     if (!storageSyncRefs.hasNext()) {
       _log.error("No synchronization instance found for {}", mirror);
       taskCompleter.error(
           _dbClient, DeviceControllerException.exceptions.resumeVolumeMirrorFailed(mirror));
       return;
     }
     boolean isVmax3 = storage.checkIfVmax3();
     while (storageSyncRefs.hasNext()) {
       CIMObjectPath storageSync = storageSyncRefs.next();
       _log.debug(storageSync.toString());
       /**
        * JIRA CTRL-11855 User created mirror and did pause operation using SMI 4.6.2. Then He
        * upgraded to SMI 8.0.3. While doing mirror resume getting exception from SMI because of
        * the existing mirrorObj.getSynchronizedInstance() contains
        * SystemName=\"SYMMETRIX+000195701573\"" This is wrong with 8.0.3 as
        * SystemName=\"SYMMETRIX-+-000195701573\"". To resolve this issue setting new value
        * collected from current smis provider here.
        */
       mirrorObj.setSynchronizedInstance(storageSync.toString());
       _dbClient.persistObject(mirrorObj);
       CIMArgument[] inArgs =
           isVmax3
               ? _helper.getResumeSynchronizationInputArgumentsWithCopyState(storageSync)
               : _helper.getResumeSynchronizationInputArguments(storageSync);
       CIMArgument[] outArgs = new CIMArgument[5];
       _helper.callModifyReplica(storage, inArgs, outArgs);
       CIMObjectPath job = _cimPath.getCimObjectPathFromOutputArgs(outArgs, SmisConstants.JOB);
       if (job != null) {
         ControllerServiceImpl.enqueueJob(
             new QueueJob(new SmisBlockResumeMirrorJob(job, storage.getId(), taskCompleter)));
       } else {
         CIMInstance syncObject =
             _helper.getInstance(
                 storage, storageSync, false, false, new String[] {SmisConstants.CP_SYNC_STATE});
         mirrorObj.setSyncState(
             CIMPropertyFactory.getPropertyValue(syncObject, SmisConstants.CP_SYNC_STATE));
         _dbClient.persistObject(mirrorObj);
         taskCompleter.ready(_dbClient);
       }
     }
   } catch (Exception e) {
     _log.error("Failed to resume single volume mirror: {}", mirror);
     ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
     taskCompleter.error(_dbClient, serviceError);
   } finally {
     if (storageSyncRefs != null) {
       storageSyncRefs.close();
     }
   }
 }
  @Override
  public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap)
      throws BaseCollectionException {
    CloseableIterator<CIMInstance> volumeInstances = null;
    EnumerateResponse<CIMInstance> volumeInstanceChunks = null;
    CIMObjectPath storagePoolPath = null;
    WBEMClient client = null;
    try {
      _dbClient = (DbClient) keyMap.get(Constants.dbClient);
      client = (WBEMClient) keyMap.get(Constants._cimClient);
      _profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
      Map<String, VolHostIOObject> exportedVolumes =
          (Map<String, VolHostIOObject>) keyMap.get(Constants.EXPORTED_VOLUMES);
      Set<String> existingVolumesInCG = (Set<String>) keyMap.get(Constants.VOLUMES_PART_OF_CG);
      @SuppressWarnings("unchecked")
      Map<String, RemoteMirrorObject> volumeToRAGroupMap =
          (Map<String, RemoteMirrorObject>) keyMap.get(Constants.UN_VOLUME_RAGROUP_MAP);
      @SuppressWarnings("unchecked")
      Map<String, LocalReplicaObject> volumeToLocalReplicaMap =
          (Map<String, LocalReplicaObject>) keyMap.get(Constants.UN_VOLUME_LOCAL_REPLICA_MAP);
      @SuppressWarnings("unchecked")
      Map<String, Set<String>> vmax2ThinPoolToBoundVolumesMap =
          (Map<String, Set<String>>) keyMap.get(Constants.VMAX2_THIN_POOL_TO_BOUND_VOLUMES);
      Set<String> boundVolumes = null;

      storagePoolPath = getObjectPathfromCIMArgument(_args);
      String poolNativeGuid = NativeGUIDGenerator.generateNativeGuidForPool(storagePoolPath);
      StoragePool pool = checkStoragePoolExistsInDB(poolNativeGuid, _dbClient);
      if (pool == null) {
        _logger.error(
            "Skipping unmanaged volume discovery as the storage pool with path {} doesn't exist in ViPR",
            storagePoolPath.toString());
        return;
      }
      StorageSystem system = _dbClient.queryObject(StorageSystem.class, _profile.getSystemId());
      _unManagedVolumesInsert = new ArrayList<UnManagedVolume>();
      _unManagedVolumesUpdate = new ArrayList<UnManagedVolume>();
      _unManagedExportMasksUpdate = new ArrayList<UnManagedExportMask>();

      // get bound volumes list for VMAX2 Thin pools
      boundVolumes = vmax2ThinPoolToBoundVolumesMap.get(storagePoolPath.toString());

      Set<String> poolSupportedSLONames = (Set<String>) keyMap.get(poolNativeGuid);
      _logger.debug("Pool Supporting SLO Names:{}", poolSupportedSLONames);
      _metaVolumeViewPaths = (List<CIMObjectPath>) keyMap.get(Constants.META_VOLUMES_VIEWS);
      if (_metaVolumeViewPaths == null) {
        _metaVolumeViewPaths = new ArrayList<CIMObjectPath>();
        keyMap.put(Constants.META_VOLUMES_VIEWS, _metaVolumeViewPaths);
      }
      // create empty place holder list for meta volume paths (cannot
      // define this in xml)
      _metaVolumePaths = (List<CIMObjectPath>) keyMap.get(Constants.META_VOLUMES);
      if (_metaVolumePaths == null) {
        _metaVolumePaths = new ArrayList<CIMObjectPath>();
        keyMap.put(Constants.META_VOLUMES, _metaVolumePaths);
      }

      _volumeToSpaceConsumedMap =
          (Map<String, String>) keyMap.get(Constants.VOLUME_SPACE_CONSUMED_MAP);

      // get VolumeInfo Object and inject Fast Policy Name.

      volumeInstanceChunks = (EnumerateResponse<CIMInstance>) resultObj;
      volumeInstances = volumeInstanceChunks.getResponses();

      processVolumes(
          volumeInstances,
          keyMap,
          operation,
          pool,
          system,
          exportedVolumes,
          existingVolumesInCG,
          volumeToRAGroupMap,
          volumeToLocalReplicaMap,
          poolSupportedSLONames,
          boundVolumes);
      while (!volumeInstanceChunks.isEnd()) {
        _logger.info("Processing Next Volume Chunk of size {}", BATCH_SIZE);
        volumeInstanceChunks =
            client.getInstancesWithPath(
                storagePoolPath,
                volumeInstanceChunks.getContext(),
                new UnsignedInteger32(BATCH_SIZE));
        processVolumes(
            volumeInstanceChunks.getResponses(),
            keyMap,
            operation,
            pool,
            system,
            exportedVolumes,
            existingVolumesInCG,
            volumeToRAGroupMap,
            volumeToLocalReplicaMap,
            poolSupportedSLONames,
            boundVolumes);
      }
      if (null != _unManagedVolumesUpdate && _unManagedVolumesUpdate.size() > 0) {
        _partitionManager.updateInBatches(
            _unManagedVolumesUpdate, getPartitionSize(keyMap), _dbClient, UNMANAGED_VOLUME);
      }

      if (null != _unManagedVolumesInsert && _unManagedVolumesInsert.size() > 0) {
        _partitionManager.insertInBatches(
            _unManagedVolumesInsert, getPartitionSize(keyMap), _dbClient, UNMANAGED_VOLUME);
      }

      if (null != _unManagedExportMasksUpdate && _unManagedExportMasksUpdate.size() > 0) {
        _partitionManager.updateInBatches(
            _unManagedExportMasksUpdate,
            getPartitionSize(keyMap),
            _dbClient,
            UNMANAGED_EXPORT_MASK);
      }

      performStorageUnManagedVolumeBookKeeping(pool.getId());

    } catch (Exception e) {
      _logger.error("Processing Storage Volume Information failed :", e);
    } finally {
      _unManagedVolumesInsert = null;
      _unManagedVolumesUpdate = null;
      if (null != volumeInstances) {
        volumeInstances.close();
      }
      if (null != volumeInstanceChunks) {
        try {
          client.closeEnumeration(storagePoolPath, volumeInstanceChunks.getContext());
        } catch (Exception e) {
        }
      }
    }
  }
  /**
   * Called to update the job status when the volume expand job completes.
   *
   * @param jobContext The job context.
   */
  public void updateStatus(JobContext jobContext) throws Exception {
    CloseableIterator<CIMObjectPath> associatorIterator = null;
    CloseableIterator<CIMInstance> instanceIterator = null;
    JobStatus jobStatus = getJobStatus();

    try {
      if (jobStatus == JobStatus.IN_PROGRESS) {
        return;
      }

      DbClient dbClient = jobContext.getDbClient();
      CIMConnectionFactory cimConnectionFactory = jobContext.getCimConnectionFactory();
      WBEMClient client = getWBEMClient(dbClient, cimConnectionFactory);

      // If terminal state update storage pool capacity and remove reservation for volume capacity
      // from pool's reserved capacity map.
      if (jobStatus == JobStatus.SUCCESS
          || jobStatus == JobStatus.FAILED
          || jobStatus == JobStatus.FATAL_ERROR) {
        SmisUtils.updateStoragePoolCapacity(dbClient, client, _storagePoolURI);

        StoragePool pool = dbClient.queryObject(StoragePool.class, _storagePoolURI);
        StringMap reservationMap = pool.getReservedCapacityMap();
        URI volumeId = getTaskCompleter().getId();
        // remove from reservation map
        reservationMap.remove(volumeId.toString());
        dbClient.persistObject(pool);
      }

      String opId = getTaskCompleter().getOpId();
      StringBuilder logMsgBuilder =
          new StringBuilder(
              String.format(
                  "Updating status of job %s to %s, task: %s",
                  this.getJobName(), jobStatus.name(), opId));

      if (jobStatus == JobStatus.SUCCESS) {
        VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter();
        Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId());
        // set requested capacity
        volume.setCapacity(taskCompleter.getSize());
        // set meta related properties
        volume.setTotalMetaMemberCapacity(taskCompleter.getTotalMetaMembersSize());
        volume.setMetaMemberCount(taskCompleter.getMetaMemberCount());
        volume.setMetaMemberSize(taskCompleter.getMetaMemberSize());
        volume.setIsComposite(taskCompleter.isComposite());
        volume.setCompositionType(taskCompleter.getMetaVolumeType());

        // set provisioned capacity
        associatorIterator =
            client.associatorNames(getCimJob(), null, SmisConstants.CIM_STORAGE_VOLUME, null, null);
        if (associatorIterator.hasNext()) {
          CIMObjectPath volumePath = associatorIterator.next();
          CIMInstance volumeInstance = client.getInstance(volumePath, true, false, null);
          if (volumeInstance != null) {
            CIMProperty consumableBlocks =
                volumeInstance.getProperty(SmisConstants.CP_CONSUMABLE_BLOCKS);
            CIMProperty blockSize = volumeInstance.getProperty(SmisConstants.CP_BLOCK_SIZE);
            // calculate provisionedCapacity = consumableBlocks * block size
            Long provisionedCapacity =
                Long.valueOf(consumableBlocks.getValue().toString())
                    * Long.valueOf(blockSize.getValue().toString());
            volume.setProvisionedCapacity(provisionedCapacity);
          }

          // set allocated capacity
          instanceIterator =
              client.referenceInstances(
                  volumePath,
                  SmisConstants.CIM_ALLOCATED_FROM_STORAGEPOOL,
                  null,
                  false,
                  SmisConstants.PS_SPACE_CONSUMED);
          if (instanceIterator.hasNext()) {
            CIMInstance allocatedFromStoragePoolPath = instanceIterator.next();
            CIMProperty spaceConsumed =
                allocatedFromStoragePoolPath.getProperty(SmisConstants.CP_SPACE_CONSUMED);
            if (null != spaceConsumed) {
              volume.setAllocatedCapacity(Long.valueOf(spaceConsumed.getValue().toString()));
            }
          }
        }
        logMsgBuilder.append(
            String.format(
                "%n   Capacity: %s, Provisioned capacity: %s, Allocated Capacity: %s",
                volume.getCapacity(),
                volume.getProvisionedCapacity(),
                volume.getAllocatedCapacity()));
        if (volume.getIsComposite()) {
          logMsgBuilder.append(
              String.format(
                  "%n   Is Meta: %s, Total meta member capacity: %s, Meta member count %s, Meta member size: %s",
                  volume.getIsComposite(),
                  volume.getTotalMetaMemberCapacity(),
                  volume.getMetaMemberCount(),
                  volume.getMetaMemberSize()));
        }

        _log.info(logMsgBuilder.toString());

        // Reset list of meta member volumes in the volume
        if (volume.getMetaVolumeMembers() != null) {
          volume.getMetaVolumeMembers().clear();
        }

        StorageSystem storageSystem =
            dbClient.queryObject(StorageSystem.class, volume.getStorageController());
        // set the RP tag on the volume if the volume is RP protected
        if (volume.checkForRp()
            && storageSystem.getSystemType() != null
            && storageSystem
                .getSystemType()
                .equalsIgnoreCase(DiscoveredDataObject.Type.vmax.toString())) {
          SmisCommandHelper helper = jobContext.getSmisCommandHelper();
          List<CIMObjectPath> volumePathList = new ArrayList<CIMObjectPath>();
          volumePathList.add(helper.getVolumeMember(storageSystem, volume));
          helper.setRecoverPointTag(storageSystem, volumePathList, true);
        }

        dbClient.persistObject(volume);
        // Reset list of meta members native ids in WF data (when meta is created meta members are
        // removed from array)
        WorkflowService.getInstance().storeStepData(opId, new ArrayList<String>());
      }
    } catch (Exception e) {
      _log.error("Caught an exception while trying to updateStatus for SmisVolumeExpandJob", e);
      setPostProcessingErrorStatus(
          "Encountered an internal error during volume expand job status processing : "
              + e.getMessage());
    } finally {
      _metaVolumeTaskCompleter.setLastStepStatus(jobStatus);
      if (associatorIterator != null) {
        associatorIterator.close();
      }
      if (instanceIterator != null) {
        instanceIterator.close();
      }
      super.updateStatus(jobContext);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void updateStatus(JobContext jobContext) throws Exception {
    JobStatus jobStatus = getJobStatus();
    CloseableIterator<CIMObjectPath> volumeIter = null;
    try {
      DbClient dbClient = jobContext.getDbClient();
      TaskCompleter completer = getTaskCompleter();
      BlockSnapshot snapshot = dbClient.queryObject(BlockSnapshot.class, _snapshotURI);
      if (jobStatus == JobStatus.IN_PROGRESS) {
        return;
      }
      if (jobStatus == JobStatus.SUCCESS) {
        s_logger.info(
            "Post-processing successful link snapshot session target {} for task {}",
            snapshot.getId(),
            completer.getOpId());
        // Get the snapshot session to which the target is being linked.
        BlockSnapshotSession snapSession =
            dbClient.queryObject(BlockSnapshotSession.class, completer.getId());

        // Get the snapshot device ID and set it against the BlockSnapshot object.
        BlockObject sourceObj = BlockObject.fetch(dbClient, snapshot.getParent().getURI());
        CIMConnectionFactory cimConnectionFactory = jobContext.getCimConnectionFactory();
        WBEMClient client = getWBEMClient(dbClient, cimConnectionFactory);
        volumeIter =
            client.associatorNames(getCimJob(), null, SmisConstants.CIM_STORAGE_VOLUME, null, null);
        while (volumeIter.hasNext()) {
          // Get the sync volume native device id
          CIMObjectPath volumePath = volumeIter.next();
          s_logger.info("volumePath: {}", volumePath.toString());
          CIMInstance volume = client.getInstance(volumePath, false, false, null);
          String volumeDeviceId =
              volumePath.getKey(SmisConstants.CP_DEVICE_ID).getValue().toString();
          s_logger.info("volumeDeviceId: {}", volumeDeviceId);
          if (volumeDeviceId.equals(sourceObj.getNativeId())) {
            // Don't want the source, we want the linked target.
            continue;
          }
          String volumeElementName =
              CIMPropertyFactory.getPropertyValue(volume, SmisConstants.CP_ELEMENT_NAME);
          s_logger.info("volumeElementName: {}", volumeElementName);
          String volumeWWN = CIMPropertyFactory.getPropertyValue(volume, SmisConstants.CP_WWN_NAME);
          s_logger.info("volumeWWN: {}", volumeWWN);
          String volumeAltName = CIMPropertyFactory.getPropertyValue(volume, SmisConstants.CP_NAME);
          s_logger.info("volumeAltName: {}", volumeAltName);
          StorageSystem system = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
          snapshot.setNativeId(volumeDeviceId);
          snapshot.setNativeGuid(NativeGUIDGenerator.generateNativeGuid(system, snapshot));
          snapshot.setDeviceLabel(volumeElementName);
          snapshot.setInactive(false);
          snapshot.setIsSyncActive(Boolean.TRUE);
          snapshot.setCreationTime(Calendar.getInstance());
          snapshot.setWWN(volumeWWN.toUpperCase());
          snapshot.setAlternateName(volumeAltName);
          snapshot.setSettingsInstance(snapSession.getSessionInstance());
          commonSnapshotUpdate(
              snapshot,
              volume,
              client,
              system,
              sourceObj.getNativeId(),
              volumeDeviceId,
              false,
              dbClient);
          s_logger.info(
              String.format(
                  "For target volume path %1$s, going to set blocksnapshot %2$s nativeId to %3$s (%4$s). Associated volume is %5$s (%6$s)",
                  volumePath.toString(),
                  snapshot.getId().toString(),
                  volumeDeviceId,
                  volumeElementName,
                  sourceObj.getNativeId(),
                  sourceObj.getDeviceLabel()));
          dbClient.updateObject(snapshot);
        }
      } else if (jobStatus == JobStatus.FAILED || jobStatus == JobStatus.FATAL_ERROR) {
        s_logger.info(
            "Failed to link snapshot session target {} for task {}",
            snapshot.getId(),
            completer.getOpId());
        snapshot.setInactive(true);
        dbClient.updateObject(snapshot);
      }
    } catch (Exception e) {
      setPostProcessingErrorStatus(
          "Encountered an internal error in link snapshot session target job status processing: "
              + e.getMessage());
      s_logger.error(
          "Encountered an internal error in link snapshot session target job status processing", e);
    } finally {
      if (volumeIter != null) {
        volumeIter.close();
      }
      super.updateStatus(jobContext);
    }
  }