/** @param m Mapping. */
  private void finish(GridDistributedTxMapping m) {
    ClusterNode n = m.node();

    assert !m.empty();

    GridNearTxFinishRequest req =
        new GridNearTxFinishRequest(
            futId,
            tx.xidVersion(),
            tx.threadId(),
            commit,
            tx.isInvalidate(),
            tx.system(),
            tx.ioPolicy(),
            tx.syncCommit(),
            tx.syncRollback(),
            m.explicitLock(),
            tx.storeEnabled(),
            tx.topologyVersion(),
            null,
            null,
            null,
            tx.size(),
            tx.subjectId(),
            tx.taskNameHash(),
            tx.activeCachesDeploymentEnabled());

    // If this is the primary node for the keys.
    if (n.isLocal()) {
      req.miniId(IgniteUuid.randomUuid());

      IgniteInternalFuture<IgniteInternalTx> fut = cctx.tm().txHandler().finish(n.id(), tx, req);

      // Add new future.
      if (fut != null) add(fut);
    } else {
      FinishMiniFuture fut = new FinishMiniFuture(m);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      if (tx.pessimistic()) cctx.tm().beforeFinishRemote(n.id(), tx.threadId());

      try {
        cctx.io().send(n, req, tx.ioPolicy());

        // If we don't wait for result, then mark future as done.
        if (!isSync() && !m.explicitLock()) fut.onDone();
      } catch (ClusterTopologyCheckedException e) {
        // Remove previous mapping.
        mappings.remove(m.node().id());

        fut.onNodeLeft(n.id());
      } catch (IgniteCheckedException e) {
        // Fail the whole thing.
        fut.onDone(e);
      }
    }
  }
示例#2
0
  /**
   * @param nodes Nodes.
   * @param msg Message.
   * @param partsMap Partitions.
   * @return {@code true} If all messages sent successfully.
   */
  private boolean send(
      Collection<ClusterNode> nodes, Message msg, Map<ClusterNode, IntArray> partsMap) {
    boolean locNodeFound = false;

    boolean ok = true;

    for (ClusterNode node : nodes) {
      if (node.isLocal()) {
        locNodeFound = true;

        continue;
      }

      try {
        ctx.io().send(node, GridTopic.TOPIC_QUERY, copy(msg, node, partsMap), QUERY_POOL);
      } catch (IgniteCheckedException e) {
        ok = false;

        U.warn(log, e.getMessage());
      }
    }

    if (locNodeFound) // Local node goes the last to allow parallel execution.
    h2.mapQueryExecutor()
          .onMessage(ctx.localNodeId(), copy(msg, ctx.discovery().localNode(), partsMap));

    return ok;
  }
    /** @param nodeId Failed node ID. */
    boolean onNodeLeft(UUID nodeId) {
      if (nodeId.equals(m.node().id())) {
        if (log.isDebugEnabled())
          log.debug("Remote node left grid while sending or waiting for reply: " + this);

        if (isSync()) {
          Map<UUID, Collection<UUID>> txNodes = tx.transactionNodes();

          if (txNodes != null) {
            Collection<UUID> backups = txNodes.get(nodeId);

            if (!F.isEmpty(backups)) {
              final CheckRemoteTxMiniFuture mini =
                  new CheckRemoteTxMiniFuture(new HashSet<>(backups));

              add(mini);

              GridDhtTxFinishRequest req = checkCommittedRequest(mini.futureId());

              req.waitRemoteTransactions(true);

              for (UUID backupId : backups) {
                ClusterNode backup = cctx.discovery().node(backupId);

                if (backup != null && WAIT_REMOTE_TXS_SINCE.compareTo(backup.version()) <= 0) {
                  if (backup.isLocal()) {
                    IgniteInternalFuture<?> fut =
                        cctx.tm().remoteTxFinishFuture(tx.nearXidVersion());

                    fut.listen(
                        new CI1<IgniteInternalFuture<?>>() {
                          @Override
                          public void apply(IgniteInternalFuture<?> fut) {
                            mini.onDhtFinishResponse(cctx.localNodeId());
                          }
                        });
                  } else {
                    try {
                      cctx.io().send(backup, req, tx.ioPolicy());
                    } catch (ClusterTopologyCheckedException e) {
                      mini.onNodeLeft(backupId);
                    } catch (IgniteCheckedException e) {
                      mini.onDone(e);
                    }
                  }
                } else mini.onDhtFinishResponse(backupId);
              }
            }
          }
        }

        onDone(tx);

        return true;
      }

      return false;
    }
  /**
   * @param queueLimit Maximum size of unacknowledged messages queue.
   * @param node Node.
   * @param log Logger.
   */
  public GridNioRecoveryDescriptor(int queueLimit, ClusterNode node, IgniteLogger log) {
    assert !node.isLocal() : node;
    assert queueLimit > 0;

    msgFuts = new ArrayDeque<>(queueLimit);

    this.queueLimit = queueLimit;
    this.node = node;
    this.log = log;
  }
  /**
   * Updates partition map in all caches.
   *
   * @param msg Partitions full messages.
   */
  private void updatePartitionFullMap(GridDhtPartitionsFullMessage msg) {
    for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) {
      Integer cacheId = entry.getKey();

      GridCacheContext cacheCtx = cctx.cacheContext(cacheId);

      if (cacheCtx != null) cacheCtx.topology().update(exchId, entry.getValue());
      else {
        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, AffinityTopologyVersion.NONE);

        if (oldest != null && oldest.isLocal())
          cctx.exchange().clientTopology(cacheId, this).update(exchId, entry.getValue());
      }
    }
  }
    private void init() {
      ClusterNode node = nodes.poll();

      GridCacheQueryFutureAdapter<?, ?, R> fut0 =
          (GridCacheQueryFutureAdapter<?, ?, R>)
              (node.isLocal()
                  ? qryMgr.queryLocal(bean)
                  : qryMgr.queryDistributed(bean, Collections.singleton(node)));

      fut0.listen(
          new IgniteInClosure<IgniteInternalFuture<Collection<R>>>() {
            @Override
            public void apply(IgniteInternalFuture<Collection<R>> fut) {
              try {
                onDone(fut.get());
              } catch (IgniteCheckedException e) {
                if (F.isEmpty(nodes)) onDone(e);
                else init();
              }
            }
          });

      fut = fut0;
    }
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
  private void checkBackup() {
    GridDistributedTxMapping mapping = mappings.singleMapping();

    if (mapping != null) {
      UUID nodeId = mapping.node().id();

      Collection<UUID> backups = tx.transactionNodes().get(nodeId);

      if (!F.isEmpty(backups)) {
        assert backups.size() == 1;

        UUID backupId = F.first(backups);

        ClusterNode backup = cctx.discovery().node(backupId);

        // Nothing to do if backup has left the grid.
        if (backup == null) {
          readyNearMappingFromBackup(mapping);

          ClusterTopologyCheckedException cause =
              new ClusterTopologyCheckedException("Backup node left grid: " + backupId);

          cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));

          onDone(
              new IgniteTxRollbackCheckedException(
                  "Failed to commit transaction " + "(backup has left grid): " + tx.xidVersion(),
                  cause));
        } else {
          final CheckBackupMiniFuture mini = new CheckBackupMiniFuture(backup, mapping);

          add(mini);

          if (backup.isLocal()) {
            boolean committed = !cctx.tm().addRolledbackTx(tx);

            readyNearMappingFromBackup(mapping);

            if (committed) {
              if (tx.syncCommit()) {
                GridCacheVersion nearXidVer = tx.nearXidVersion();

                assert nearXidVer != null : tx;

                IgniteInternalFuture<?> fut = cctx.tm().remoteTxFinishFuture(nearXidVer);

                fut.listen(
                    new CI1<IgniteInternalFuture<?>>() {
                      @Override
                      public void apply(IgniteInternalFuture<?> fut) {
                        mini.onDone(tx);
                      }
                    });

                return;
              }

              mini.onDone(tx);
            } else {
              ClusterTopologyCheckedException cause =
                  new ClusterTopologyCheckedException("Primary node left grid: " + nodeId);

              cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));

              mini.onDone(
                  new IgniteTxRollbackCheckedException(
                      "Failed to commit transaction "
                          + "(transaction has been rolled back on backup node): "
                          + tx.xidVersion(),
                      cause));
            }
          } else {
            GridDhtTxFinishRequest finishReq = checkCommittedRequest(mini.futureId());

            // Preserve old behavior, otherwise response is not sent.
            if (WAIT_REMOTE_TXS_SINCE.compareTo(backup.version()) > 0) finishReq.syncCommit(true);

            try {
              if (FINISH_NEAR_ONE_PHASE_SINCE.compareTo(backup.version()) <= 0)
                cctx.io().send(backup, finishReq, tx.ioPolicy());
              else {
                mini.onDone(
                    new IgniteTxHeuristicCheckedException(
                        "Failed to check for tx commit on "
                            + "the backup node (node has an old Ignite version) [rmtNodeId="
                            + backup.id()
                            + ", ver="
                            + backup.version()
                            + ']'));
              }
            } catch (ClusterTopologyCheckedException e) {
              mini.onNodeLeft(backupId);
            } catch (IgniteCheckedException e) {
              mini.onDone(e);
            }
          }
        }
      } else readyNearMappingFromBackup(mapping);
    }
  }
  /**
   * @param mappings Mappings.
   * @param key Key to map.
   * @param locVals Local values.
   * @param topVer Topology version.
   * @param mapped Previously mapped.
   * @return {@code True} if has remote nodes.
   */
  @SuppressWarnings("ConstantConditions")
  private boolean map(
      KeyCacheObject key,
      Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings,
      Map<K, V> locVals,
      AffinityTopologyVersion topVer,
      Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped) {
    int part = cctx.affinity().partition(key);

    List<ClusterNode> affNodes = cctx.affinity().nodes(part, topVer);

    if (affNodes.isEmpty()) {
      onDone(serverNotFoundError(topVer));

      return false;
    }

    boolean fastLocGet =
        (!forcePrimary || affNodes.get(0).isLocal())
            && cctx.allowFastLocalRead(part, affNodes, topVer);

    if (fastLocGet && localGet(key, part, locVals)) return false;

    ClusterNode node = affinityNode(affNodes);

    if (node == null) {
      onDone(serverNotFoundError(topVer));

      return false;
    }

    boolean remote = !node.isLocal();

    LinkedHashMap<KeyCacheObject, Boolean> keys = mapped.get(node);

    if (keys != null && keys.containsKey(key)) {
      if (REMAP_CNT_UPD.incrementAndGet(this) > MAX_REMAP_CNT) {
        onDone(
            new ClusterTopologyCheckedException(
                "Failed to remap key to a new node after "
                    + MAX_REMAP_CNT
                    + " attempts (key got remapped to the same node) [key="
                    + key
                    + ", node="
                    + U.toShortString(node)
                    + ", mappings="
                    + mapped
                    + ']'));

        return false;
      }
    }

    LinkedHashMap<KeyCacheObject, Boolean> old = mappings.get(node);

    if (old == null) mappings.put(node, old = new LinkedHashMap<>(3, 1f));

    old.put(key, false);

    return remote;
  }
  /**
   * @param keys Keys.
   * @param mapped Mappings to check for duplicates.
   * @param topVer Topology version on which keys should be mapped.
   */
  private void map(
      Collection<KeyCacheObject> keys,
      Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped,
      AffinityTopologyVersion topVer) {
    Collection<ClusterNode> cacheNodes = CU.affinityNodes(cctx, topVer);

    if (cacheNodes.isEmpty()) {
      onDone(
          new ClusterTopologyServerNotFoundException(
              "Failed to map keys for cache "
                  + "(all partition nodes left the grid) [topVer="
                  + topVer
                  + ", cache="
                  + cctx.name()
                  + ']'));

      return;
    }

    Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings =
        U.newHashMap(cacheNodes.size());

    final int keysSize = keys.size();

    Map<K, V> locVals = U.newHashMap(keysSize);

    boolean hasRmtNodes = false;

    // Assign keys to primary nodes.
    for (KeyCacheObject key : keys) hasRmtNodes |= map(key, mappings, locVals, topVer, mapped);

    if (isDone()) return;

    if (!locVals.isEmpty()) add(new GridFinishedFuture<>(locVals));

    if (hasRmtNodes) {
      if (!trackable) {
        trackable = true;

        cctx.mvcc().addFuture(this, futId);
      }
    }

    // Create mini futures.
    for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry :
        mappings.entrySet()) {
      final ClusterNode n = entry.getKey();

      final LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();

      assert !mappedKeys.isEmpty();

      // If this is the primary or backup node for the keys.
      if (n.isLocal()) {
        final GridDhtFuture<Collection<GridCacheEntryInfo>> fut =
            cache()
                .getDhtAsync(
                    n.id(),
                    -1,
                    mappedKeys,
                    readThrough,
                    topVer,
                    subjId,
                    taskName == null ? 0 : taskName.hashCode(),
                    expiryPlc,
                    skipVals);

        final Collection<Integer> invalidParts = fut.invalidPartitions();

        if (!F.isEmpty(invalidParts)) {
          Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);

          for (KeyCacheObject key : keys) {
            if (key != null && invalidParts.contains(cctx.affinity().partition(key)))
              remapKeys.add(key);
          }

          AffinityTopologyVersion updTopVer = cctx.discovery().topologyVersionEx();

          assert updTopVer.compareTo(topVer) > 0
              : "Got invalid partitions for local node but topology version did "
                  + "not change [topVer="
                  + topVer
                  + ", updTopVer="
                  + updTopVer
                  + ", invalidParts="
                  + invalidParts
                  + ']';

          // Remap recursively.
          map(remapKeys, mappings, updTopVer);
        }

        // Add new future.
        add(
            fut.chain(
                new C1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>, Map<K, V>>() {
                  @Override
                  public Map<K, V> apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut) {
                    try {
                      return createResultMap(fut.get());
                    } catch (Exception e) {
                      U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);

                      onDone(e);

                      return Collections.emptyMap();
                    }
                  }
                }));
      } else {
        MiniFuture fut = new MiniFuture(n, mappedKeys, topVer);

        GridCacheMessage req =
            new GridNearGetRequest(
                cctx.cacheId(),
                futId,
                fut.futureId(),
                n.version().compareTo(SINGLE_GET_MSG_SINCE) >= 0 ? null : DUMMY_VER,
                mappedKeys,
                readThrough,
                topVer,
                subjId,
                taskName == null ? 0 : taskName.hashCode(),
                expiryPlc != null ? expiryPlc.forAccess() : -1L,
                skipVals,
                cctx.deploymentEnabled());

        add(fut); // Append new future.

        try {
          cctx.io().send(n, req, cctx.ioPolicy());
        } catch (IgniteCheckedException e) {
          // Fail the whole thing.
          if (e instanceof ClusterTopologyCheckedException)
            fut.onNodeLeft((ClusterTopologyCheckedException) e);
          else fut.onResult(e);
        }
      }
    }
  }
 @Override
 public boolean apply(ClusterNode n) {
   return n.isLocal();
 }
  /**
   * @param dhtMap DHT map.
   * @param nearMap Near map.
   * @return {@code True} in case there is at least one synchronous {@code MiniFuture} to wait for.
   */
  private boolean finish(
      Map<UUID, GridDistributedTxMapping> dhtMap, Map<UUID, GridDistributedTxMapping> nearMap) {
    if (tx.onePhaseCommit()) return false;

    boolean sync = tx.syncMode() == FULL_SYNC;

    if (tx.explicitLock()) sync = true;

    boolean res = false;

    // Create mini futures.
    for (GridDistributedTxMapping dhtMapping : dhtMap.values()) {
      ClusterNode n = dhtMapping.node();

      assert !n.isLocal();

      GridDistributedTxMapping nearMapping = nearMap.get(n.id());

      if (dhtMapping.empty() && nearMapping != null && nearMapping.empty())
        // Nothing to send.
        continue;

      MiniFuture fut = new MiniFuture(dhtMapping, nearMapping);

      add(fut); // Append new future.

      Collection<Long> updCntrs = new ArrayList<>(dhtMapping.entries().size());

      for (IgniteTxEntry e : dhtMapping.entries()) updCntrs.add(e.updateCounter());

      GridDhtTxFinishRequest req =
          new GridDhtTxFinishRequest(
              tx.nearNodeId(),
              futId,
              fut.futureId(),
              tx.topologyVersion(),
              tx.xidVersion(),
              tx.commitVersion(),
              tx.threadId(),
              tx.isolation(),
              commit,
              tx.isInvalidate(),
              tx.system(),
              tx.ioPolicy(),
              tx.isSystemInvalidate(),
              sync,
              sync,
              tx.completedBase(),
              tx.committedVersions(),
              tx.rolledbackVersions(),
              tx.pendingVersions(),
              tx.size(),
              tx.subjectId(),
              tx.taskNameHash(),
              tx.activeCachesDeploymentEnabled(),
              updCntrs,
              false,
              false);

      req.writeVersion(tx.writeVersion() != null ? tx.writeVersion() : tx.xidVersion());

      try {
        cctx.io().send(n, req, tx.ioPolicy());

        if (msgLog.isDebugEnabled()) {
          msgLog.debug(
              "DHT finish fut, sent request dht [txId="
                  + tx.nearXidVersion()
                  + ", dhtTxId="
                  + tx.xidVersion()
                  + ", node="
                  + n.id()
                  + ']');
        }

        if (sync) res = true;
        else fut.onDone();
      } catch (IgniteCheckedException e) {
        // Fail the whole thing.
        if (e instanceof ClusterTopologyCheckedException)
          fut.onNodeLeft((ClusterTopologyCheckedException) e);
        else {
          if (msgLog.isDebugEnabled()) {
            msgLog.debug(
                "DHT finish fut, failed to send request dht [txId="
                    + tx.nearXidVersion()
                    + ", dhtTxId="
                    + tx.xidVersion()
                    + ", node="
                    + n.id()
                    + ", err="
                    + e
                    + ']');
          }

          fut.onResult(e);
        }
      }
    }

    for (GridDistributedTxMapping nearMapping : nearMap.values()) {
      if (!dhtMap.containsKey(nearMapping.node().id())) {
        if (nearMapping.empty())
          // Nothing to send.
          continue;

        MiniFuture fut = new MiniFuture(null, nearMapping);

        add(fut); // Append new future.

        GridDhtTxFinishRequest req =
            new GridDhtTxFinishRequest(
                tx.nearNodeId(),
                futId,
                fut.futureId(),
                tx.topologyVersion(),
                tx.xidVersion(),
                tx.commitVersion(),
                tx.threadId(),
                tx.isolation(),
                commit,
                tx.isInvalidate(),
                tx.system(),
                tx.ioPolicy(),
                tx.isSystemInvalidate(),
                sync,
                sync,
                tx.completedBase(),
                tx.committedVersions(),
                tx.rolledbackVersions(),
                tx.pendingVersions(),
                tx.size(),
                tx.subjectId(),
                tx.taskNameHash(),
                tx.activeCachesDeploymentEnabled(),
                false,
                false);

        req.writeVersion(tx.writeVersion());

        try {
          cctx.io().send(nearMapping.node(), req, tx.ioPolicy());

          if (msgLog.isDebugEnabled()) {
            msgLog.debug(
                "DHT finish fut, sent request near [txId="
                    + tx.nearXidVersion()
                    + ", dhtTxId="
                    + tx.xidVersion()
                    + ", node="
                    + nearMapping.node().id()
                    + ']');
          }

          if (sync) res = true;
          else fut.onDone();
        } catch (IgniteCheckedException e) {
          // Fail the whole thing.
          if (e instanceof ClusterTopologyCheckedException)
            fut.onNodeLeft((ClusterTopologyCheckedException) e);
          else {
            if (msgLog.isDebugEnabled()) {
              msgLog.debug(
                  "DHT finish fut, failed to send request near [txId="
                      + tx.nearXidVersion()
                      + ", dhtTxId="
                      + tx.xidVersion()
                      + ", node="
                      + nearMapping.node().id()
                      + ", err="
                      + e
                      + ']');
            }

            fut.onResult(e);
          }
        }
      }
    }

    return res;
  }
  /**
   * @param nodes Nodes.
   * @return {@code True} in case there is at least one synchronous {@code MiniFuture} to wait for.
   */
  private boolean rollbackLockTransactions(Collection<ClusterNode> nodes) {
    assert !commit;
    assert !F.isEmpty(nodes);

    if (tx.onePhaseCommit()) return false;

    boolean sync = tx.syncMode() == FULL_SYNC;

    if (tx.explicitLock()) sync = true;

    boolean res = false;

    for (ClusterNode n : nodes) {
      assert !n.isLocal();

      MiniFuture fut = new MiniFuture(n);

      add(fut); // Append new future.

      GridDhtTxFinishRequest req =
          new GridDhtTxFinishRequest(
              tx.nearNodeId(),
              futId,
              fut.futureId(),
              tx.topologyVersion(),
              tx.xidVersion(),
              tx.commitVersion(),
              tx.threadId(),
              tx.isolation(),
              commit,
              tx.isInvalidate(),
              tx.system(),
              tx.ioPolicy(),
              tx.isSystemInvalidate(),
              sync,
              sync,
              tx.completedBase(),
              tx.committedVersions(),
              tx.rolledbackVersions(),
              tx.pendingVersions(),
              tx.size(),
              tx.subjectId(),
              tx.taskNameHash(),
              tx.activeCachesDeploymentEnabled(),
              false,
              false);

      try {
        cctx.io().send(n, req, tx.ioPolicy());

        if (msgLog.isDebugEnabled()) {
          msgLog.debug(
              "DHT finish fut, sent request lock tx [txId="
                  + tx.nearXidVersion()
                  + ", dhtTxId="
                  + tx.xidVersion()
                  + ", node="
                  + n.id()
                  + ']');
        }

        if (sync) res = true;
        else fut.onDone();
      } catch (IgniteCheckedException e) {
        // Fail the whole thing.
        if (e instanceof ClusterTopologyCheckedException)
          fut.onNodeLeft((ClusterTopologyCheckedException) e);
        else {
          if (msgLog.isDebugEnabled()) {
            msgLog.debug(
                "DHT finish fut, failed to send request lock tx [txId="
                    + tx.nearXidVersion()
                    + ", dhtTxId="
                    + tx.xidVersion()
                    + ", node="
                    + n.id()
                    + ", err="
                    + e
                    + ']');
          }

          fut.onResult(e);
        }
      }
    }

    return res;
  }