/** Sends requests to remote nodes. */
  public void map() {
    if (!mappings.isEmpty()) {
      for (GridDhtAtomicUpdateRequest req : mappings.values()) {
        try {
          if (log.isDebugEnabled())
            log.debug(
                "Sending DHT atomic update request [nodeId=" + req.nodeId() + ", req=" + req + ']');

          cctx.io().send(req.nodeId(), req, cctx.ioPolicy());
        } catch (ClusterTopologyCheckedException ignored) {
          U.warn(
              log,
              "Failed to send update request to backup node because it left grid: " + req.nodeId());

          registerResponse(req.nodeId());
        } catch (IgniteCheckedException e) {
          U.error(
              log,
              "Failed to send update request to backup node (did node leave the grid?): "
                  + req.nodeId(),
              e);

          registerResponse(req.nodeId());
        }
      }
    } else onDone();

    // Send response right away if no ACKs from backup is required.
    // Backups will send ACKs anyway, future will be completed after all backups have replied.
    if (updateReq.writeSynchronizationMode() != FULL_SYNC) completionCb.apply(updateReq, updateRes);
  }
  /**
   * @param readers Entry readers.
   * @param entry Entry.
   * @param val Value.
   * @param entryProcessor Entry processor..
   * @param ttl TTL for near cache update (optional).
   * @param expireTime Expire time for near cache update (optional).
   */
  public void addNearWriteEntries(
      Iterable<UUID> readers,
      GridDhtCacheEntry entry,
      @Nullable CacheObject val,
      EntryProcessor<Object, Object, Object> entryProcessor,
      long ttl,
      long expireTime) {
    CacheWriteSynchronizationMode syncMode = updateReq.writeSynchronizationMode();

    keys.add(entry.key());

    AffinityTopologyVersion topVer = updateReq.topologyVersion();

    for (UUID nodeId : readers) {
      GridDhtAtomicUpdateRequest updateReq = mappings.get(nodeId);

      if (updateReq == null) {
        ClusterNode node = cctx.discovery().node(nodeId);

        // Node left the grid.
        if (node == null) continue;

        updateReq =
            new GridDhtAtomicUpdateRequest(
                cctx.cacheId(),
                nodeId,
                futVer,
                writeVer,
                syncMode,
                topVer,
                forceTransformBackups,
                this.updateReq.subjectId(),
                this.updateReq.taskNameHash(),
                forceTransformBackups ? this.updateReq.invokeArguments() : null,
                cctx.deploymentEnabled(),
                this.updateReq.keepBinary());

        mappings.put(nodeId, updateReq);
      }

      if (nearReadersEntries == null) nearReadersEntries = new HashMap<>();

      nearReadersEntries.put(entry.key(), entry);

      updateReq.addNearWriteValue(entry.key(), val, entryProcessor, ttl, expireTime);
    }
  }
  /**
   * @param cctx Cache context.
   * @param completionCb Callback to invoke when future is completed.
   * @param writeVer Write version.
   * @param updateReq Update request.
   * @param updateRes Update response.
   */
  public GridDhtAtomicUpdateFuture(
      GridCacheContext cctx,
      CI2<GridNearAtomicUpdateRequest, GridNearAtomicUpdateResponse> completionCb,
      GridCacheVersion writeVer,
      GridNearAtomicUpdateRequest updateReq,
      GridNearAtomicUpdateResponse updateRes) {
    this.cctx = cctx;
    this.writeVer = writeVer;

    futVer = cctx.versions().next(updateReq.topologyVersion());
    this.updateReq = updateReq;
    this.completionCb = completionCb;
    this.updateRes = updateRes;

    if (log == null) log = U.logger(cctx.kernalContext(), logRef, GridDhtAtomicUpdateFuture.class);

    keys = new ArrayList<>(updateReq.keys().size());
    mappings = U.newHashMap(updateReq.keys().size());

    boolean topLocked =
        updateReq.topologyLocked() || (updateReq.fastMap() && !updateReq.clientRequest());

    waitForExchange = !topLocked;
  }
  /**
   * @param req Update request.
   * @param res Update response.
   */
  public void processNearAtomicUpdateResponse(
      GridNearAtomicUpdateRequest req, GridNearAtomicUpdateResponse res) {
    if (F.size(res.failedKeys()) == req.keys().size()) return;

    /*
     * Choose value to be stored in near cache: first check key is not in failed and not in skipped list,
     * then check if value was generated on primary node, if not then use value sent in request.
     */

    Collection<KeyCacheObject> failed = res.failedKeys();
    List<Integer> nearValsIdxs = res.nearValuesIndexes();
    List<Integer> skipped = res.skippedIndexes();

    GridCacheVersion ver = req.updateVersion();

    if (ver == null) ver = res.nearVersion();

    assert ver != null : "Failed to find version [req=" + req + ", res=" + res + ']';

    int nearValIdx = 0;

    String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());

    for (int i = 0; i < req.keys().size(); i++) {
      if (F.contains(skipped, i)) continue;

      KeyCacheObject key = req.keys().get(i);

      if (F.contains(failed, key)) continue;

      if (ctx.affinity()
          .belongs(
              ctx.localNode(),
              ctx.affinity().partition(key),
              req.topologyVersion())) { // Reader became backup.
        GridCacheEntryEx entry = peekEx(key);

        if (entry != null && entry.markObsolete(ver)) removeEntry(entry);

        continue;
      }

      CacheObject val = null;

      if (F.contains(nearValsIdxs, i)) {
        val = res.nearValue(nearValIdx);

        nearValIdx++;
      } else {
        assert req.operation() != TRANSFORM;

        if (req.operation() != DELETE) val = req.value(i);
      }

      long ttl = res.nearTtl(i);
      long expireTime = res.nearExpireTime(i);

      if (ttl != CU.TTL_NOT_CHANGED && expireTime == CU.EXPIRE_TIME_CALCULATE)
        expireTime = CU.toExpireTime(ttl);

      try {
        processNearAtomicUpdateResponse(
            ver,
            key,
            val,
            null,
            ttl,
            expireTime,
            req.keepBinary(),
            req.nodeId(),
            req.subjectId(),
            taskName);
      } catch (IgniteCheckedException e) {
        res.addFailedKey(
            key, new IgniteCheckedException("Failed to update key in near cache: " + key, e));
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public boolean onDone(@Nullable Void res, @Nullable Throwable err) {
    if (super.onDone(res, err)) {
      cctx.mvcc().removeAtomicFuture(version());

      if (err != null) {
        if (!mappings.isEmpty()) {
          Collection<KeyCacheObject> hndKeys = new ArrayList<>(keys.size());

          exit:
          for (GridDhtAtomicUpdateRequest req : mappings.values()) {
            for (int i = 0; i < req.size(); i++) {
              KeyCacheObject key = req.key(i);

              if (!hndKeys.contains(key)) {
                updateRes.addFailedKey(key, err);

                cctx.continuousQueries()
                    .skipUpdateEvent(
                        key, req.partitionId(i), req.updateCounter(i), updateReq.topologyVersion());

                hndKeys.add(key);

                if (hndKeys.size() == keys.size()) break exit;
              }
            }
          }
        } else for (KeyCacheObject key : keys) updateRes.addFailedKey(key, err);
      } else {
        Collection<KeyCacheObject> hndKeys = new ArrayList<>(keys.size());

        exit:
        for (GridDhtAtomicUpdateRequest req : mappings.values()) {
          for (int i = 0; i < req.size(); i++) {
            KeyCacheObject key = req.key(i);

            if (!hndKeys.contains(key)) {
              try {
                cctx.continuousQueries()
                    .onEntryUpdated(
                        key,
                        req.value(i),
                        req.localPreviousValue(i),
                        key.internal() || !cctx.userCache(),
                        req.partitionId(i),
                        true,
                        false,
                        req.updateCounter(i),
                        updateReq.topologyVersion());
              } catch (IgniteCheckedException e) {
                U.warn(
                    log,
                    "Failed to send continuous query message. [key="
                        + key
                        + ", newVal="
                        + req.value(i)
                        + ", err="
                        + e
                        + "]");
              }

              hndKeys.add(key);

              if (hndKeys.size() == keys.size()) break exit;
            }
          }
        }
      }

      if (updateReq.writeSynchronizationMode() == FULL_SYNC)
        completionCb.apply(updateReq, updateRes);

      return true;
    }

    return false;
  }
  /**
   * @param entry Entry to map.
   * @param val Value to write.
   * @param entryProcessor Entry processor.
   * @param ttl TTL (optional).
   * @param conflictExpireTime Conflict expire time (optional).
   * @param conflictVer Conflict version (optional).
   * @param updateCntr Partition update counter.
   */
  public void addWriteEntry(
      GridDhtCacheEntry entry,
      @Nullable CacheObject val,
      EntryProcessor<Object, Object, Object> entryProcessor,
      long ttl,
      long conflictExpireTime,
      @Nullable GridCacheVersion conflictVer,
      boolean addPrevVal,
      @Nullable CacheObject prevVal,
      long updateCntr) {
    AffinityTopologyVersion topVer = updateReq.topologyVersion();

    Collection<ClusterNode> dhtNodes = cctx.dht().topology().nodes(entry.partition(), topVer);

    if (log.isDebugEnabled())
      log.debug(
          "Mapping entry to DHT nodes [nodes=" + U.nodeIds(dhtNodes) + ", entry=" + entry + ']');

    CacheWriteSynchronizationMode syncMode = updateReq.writeSynchronizationMode();

    keys.add(entry.key());

    for (ClusterNode node : dhtNodes) {
      UUID nodeId = node.id();

      if (!nodeId.equals(cctx.localNodeId())) {
        GridDhtAtomicUpdateRequest updateReq = mappings.get(nodeId);

        if (updateReq == null) {
          updateReq =
              new GridDhtAtomicUpdateRequest(
                  cctx.cacheId(),
                  nodeId,
                  futVer,
                  writeVer,
                  syncMode,
                  topVer,
                  forceTransformBackups,
                  this.updateReq.subjectId(),
                  this.updateReq.taskNameHash(),
                  forceTransformBackups ? this.updateReq.invokeArguments() : null,
                  cctx.deploymentEnabled(),
                  this.updateReq.keepBinary());

          mappings.put(nodeId, updateReq);
        }

        updateReq.addWriteValue(
            entry.key(),
            val,
            entryProcessor,
            ttl,
            conflictExpireTime,
            conflictVer,
            addPrevVal,
            entry.partition(),
            prevVal,
            updateCntr);
      } else if (dhtNodes.size() == 1) {
        try {
          cctx.continuousQueries()
              .onEntryUpdated(
                  entry.key(),
                  val,
                  prevVal,
                  entry.key().internal() || !cctx.userCache(),
                  entry.partition(),
                  true,
                  false,
                  updateCntr,
                  updateReq.topologyVersion());
        } catch (IgniteCheckedException e) {
          U.warn(
              log,
              "Failed to send continuous query message. [key="
                  + entry.key()
                  + ", newVal="
                  + val
                  + ", err="
                  + e
                  + "]");
        }
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public IgniteInternalFuture<Void> completeFuture(AffinityTopologyVersion topVer) {
    if (waitForExchange && updateReq.topologyVersion().compareTo(topVer) < 0) return this;

    return null;
  }