/** @param m Mapping. */
  @SuppressWarnings({"unchecked"})
  private void finish(GridDistributedTxMapping<K, V> m) {
    GridRichNode n = m.node();

    assert !m.empty();

    GridNearTxFinishRequest req =
        new GridNearTxFinishRequest<K, V>(
            futId,
            tx.xidVersion(),
            tx.commitVersion(),
            tx.threadId(),
            commit,
            tx.isInvalidate(),
            m.explicitLock(),
            tx.topologyVersion(),
            null,
            null,
            null,
            commit && tx.pessimistic() ? m.writes() : null,
            tx.syncCommit() && commit || tx.syncRollback() && !commit);

    // If this is the primary node for the keys.
    if (n.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (CU.DHT_ENABLED) {
        GridFuture<GridCacheTx> fut =
            commit ? dht().commitTx(n.id(), req) : dht().rollbackTx(n.id(), req);

        // Add new future.
        add(fut);
      } else
        // Add done future for testing.
        add(new GridFinishedFuture<GridCacheTx>(ctx));
    } else {
      MiniFuture fut = new MiniFuture(m);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      try {
        cctx.io().send(n, req);

        // If we don't wait for result, then mark future as done.
        if (!isSync() && !m.explicitLock()) fut.onDone();
      } catch (GridTopologyException e) {
        // Remove previous mapping.
        mappings.remove(m.node().id());

        fut.onResult(e);
      } catch (GridException e) {
        // Fail the whole thing.
        fut.onResult(e);
      }
    }
  }
  /**
   * @param cctx Context.
   * @param tx Transaction.
   * @param commit Commit flag.
   */
  public GridNearTxFinishFuture(
      GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) {
    super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx));

    assert cctx != null;

    this.cctx = cctx;
    this.tx = tx;
    this.commit = commit;

    mappings = tx.mappings();

    futId = GridUuid.randomUuid();

    log = U.logger(ctx, logRef, GridNearTxFinishFuture.class);
  }
Example #3
0
  /**
   * @param cctx Registry.
   * @param keys Keys to lock.
   * @param tx Transaction.
   * @param read Read flag.
   * @param retval Flag to return value or not.
   * @param timeout Lock acquisition timeout.
   * @param filter Filter.
   */
  public GridNearLockFuture(
      GridCacheContext<K, V> cctx,
      Collection<? extends K> keys,
      @Nullable GridNearTxLocal<K, V> tx,
      boolean read,
      boolean retval,
      long timeout,
      GridPredicate<GridCacheEntry<K, V>>[] filter) {
    super(cctx.kernalContext(), CU.boolReducer());
    assert cctx != null;
    assert keys != null;

    this.cctx = cctx;
    this.keys = keys;
    this.tx = tx;
    this.read = read;
    this.retval = retval;
    this.timeout = timeout;
    this.filter = filter;

    threadId = tx == null ? Thread.currentThread().getId() : tx.threadId();

    lockVer = tx != null ? tx.xidVersion() : cctx.versions().next();

    futId = GridUuid.randomUuid();

    entries = new ArrayList<>(keys.size());

    log = U.logger(ctx, logRef, GridNearLockFuture.class);

    if (timeout > 0) {
      timeoutObj = new LockTimeoutObject();

      cctx.time().addTimeoutObject(timeoutObj);
    }

    valMap = new ConcurrentHashMap8<>(keys.size(), 1f);
  }
  /**
   * @param ctx Grid kernal context.
   * @param cacheName Cache name.
   * @param flushQ Flush queue.
   */
  public GridDataLoaderImpl(
      final GridKernalContext ctx,
      @Nullable final String cacheName,
      DelayQueue<GridDataLoaderImpl<K, V>> flushQ) {
    assert ctx != null;

    this.ctx = ctx;
    this.cacheName = cacheName;
    this.flushQ = flushQ;

    log = U.logger(ctx, logRef, GridDataLoaderImpl.class);

    discoLsnr =
        new GridLocalEventListener() {
          @Override
          public void onEvent(GridEvent evt) {
            assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT;

            GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt;

            UUID id = discoEvt.eventNodeId();

            // Remap regular mappings.
            final Buffer buf = bufMappings.remove(id);

            if (buf != null) {
              // Only async notification is possible since
              // discovery thread may be trapped otherwise.
              ctx.closure()
                  .callLocalSafe(
                      new Callable<Object>() {
                        @Override
                        public Object call() throws Exception {
                          buf.onNodeLeft();

                          return null;
                        }
                      },
                      true /* system pool */);
            }
          }
        };

    ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT);

    // Generate unique topic for this loader.
    topic = TOPIC_DATALOAD.topic(GridUuid.fromUuid(ctx.localNodeId()));

    ctx.io()
        .addMessageListener(
            topic,
            new GridMessageListener() {
              @Override
              public void onMessage(UUID nodeId, Object msg) {
                assert msg instanceof GridDataLoadResponse;

                GridDataLoadResponse res = (GridDataLoadResponse) msg;

                if (log.isDebugEnabled()) log.debug("Received data load response: " + res);

                Buffer buf = bufMappings.get(nodeId);

                if (buf != null) buf.onResponse(res);
                else if (log.isDebugEnabled())
                  log.debug("Ignoring response since node has left [nodeId=" + nodeId + ", ");
              }
            });

    if (log.isDebugEnabled()) log.debug("Added response listener within topic: " + topic);

    fut = new GridDataLoaderFuture(ctx, this);
  }
  /**
   * Mini-future for get operations. Mini-futures are only waiting on a single node as opposed to
   * multiple nodes.
   */
  private class MiniFuture extends GridFutureAdapter<GridCacheTx> {
    /** */
    private final GridUuid futId = GridUuid.randomUuid();

    /** Keys. */
    @GridToStringInclude private GridDistributedTxMapping<K, V> m;

    /** Empty constructor required for {@link Externalizable}. */
    public MiniFuture() {
      // No-op.
    }

    /** @param m Mapping. */
    MiniFuture(GridDistributedTxMapping<K, V> m) {
      super(cctx.kernalContext());

      this.m = m;
    }

    /** @return Future ID. */
    GridUuid futureId() {
      return futId;
    }

    /** @return Node ID. */
    public GridRichNode node() {
      return m.node();
    }

    /** @return Keys. */
    public GridDistributedTxMapping<K, V> mapping() {
      return m;
    }

    /** @param e Error. */
    void onResult(Throwable e) {
      if (log.isDebugEnabled())
        log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']');

      // Fail.
      onDone(e);
    }

    /** @param e Node failure. */
    void onResult(GridTopologyException e) {
      if (log.isDebugEnabled())
        log.debug(
            "Remote node left grid while sending or waiting for reply (will ignore): " + this);

      onDone(tx);
    }

    /** @param res Result callback. */
    void onResult(GridNearTxFinishResponse<K, V> res) {
      onDone(tx);
    }

    /** {@inheritDoc} */
    @Override
    public String toString() {
      return S.toString(
          MiniFuture.class, this, "done", isDone(), "cancelled", isCancelled(), "err", error());
    }
  }
Example #6
0
  /**
   * Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
   * remote primary node.
   *
   * @param mappings Queue of mappings.
   * @throws GridException If mapping can not be completed.
   */
  private void proceedMapping(final ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings)
      throws GridException {
    GridNearLockMapping<K, V> map = mappings.poll();

    // If there are no more mappings to process, complete the future.
    if (map == null) return;

    final GridNearLockRequest<K, V> req = map.request();
    final Collection<K> mappedKeys = map.distributedKeys();
    final GridNode node = map.node();

    if (filter != null && filter.length != 0) req.filter(filter, cctx);

    if (node.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (log.isDebugEnabled()) log.debug("Before locally locking near request: " + req);

      GridFuture<GridNearLockResponse<K, V>> fut;

      if (CU.DHT_ENABLED) fut = dht().lockAllAsync(cctx.localNode(), req, filter);
      else {
        // Create dummy values for testing.
        GridNearLockResponse<K, V> res =
            new GridNearLockResponse<>(lockVer, futId, null, false, 1, null);

        res.addValueBytes(null, null, true, lockVer, lockVer, cctx);

        fut = new GridFinishedFuture<>(ctx, res);
      }

      // Add new future.
      add(
          new GridEmbeddedFuture<>(
              cctx.kernalContext(),
              fut,
              new C2<GridNearLockResponse<K, V>, Exception, Boolean>() {
                @Override
                public Boolean apply(GridNearLockResponse<K, V> res, Exception e) {
                  if (CU.isLockTimeoutOrCancelled(e)
                      || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false;

                  if (e != null) {
                    onError(e);

                    return false;
                  }

                  if (res == null) {
                    onError(new GridException("Lock response is null for future: " + this));

                    return false;
                  }

                  if (res.error() != null) {
                    onError(res.error());

                    return false;
                  }

                  if (log.isDebugEnabled())
                    log.debug(
                        "Acquired lock for local DHT mapping [locId="
                            + cctx.nodeId()
                            + ", mappedKeys="
                            + mappedKeys
                            + ", fut="
                            + GridNearLockFuture.this
                            + ']');

                  try {
                    int i = 0;

                    for (K k : mappedKeys) {
                      while (true) {
                        GridNearCacheEntry<K, V> entry =
                            cctx.near().entryExx(k, req.topologyVersion());

                        try {
                          GridTuple3<GridCacheVersion, V, byte[]> oldValTup =
                              valMap.get(entry.key());

                          boolean hasBytes = entry.hasValue();
                          V oldVal = entry.rawGet();
                          V newVal = res.value(i);
                          byte[] newBytes = res.valueBytes(i);

                          GridCacheVersion dhtVer = res.dhtVersion(i);
                          GridCacheVersion mappedVer = res.mappedVersion(i);

                          // On local node don't record twice if DHT cache already recorded.
                          boolean record =
                              retval && oldValTup != null && oldValTup.get1().equals(dhtVer);

                          if (newVal == null) {
                            if (oldValTup != null) {
                              if (oldValTup.get1().equals(dhtVer)) {
                                newVal = oldValTup.get2();

                                newBytes = oldValTup.get3();
                              }

                              oldVal = oldValTup.get2();
                            }
                          }

                          // Lock is held at this point, so we can set the
                          // returned value if any.
                          entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

                          entry.readyNearLock(
                              lockVer,
                              mappedVer,
                              res.committedVersions(),
                              res.rolledbackVersions(),
                              res.pending());

                          if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                            boolean pass = res.filterResult(i);

                            tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
                          }

                          if (record) {
                            if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
                              cctx.events()
                                  .addEvent(
                                      entry.partition(),
                                      entry.key(),
                                      tx,
                                      null,
                                      EVT_CACHE_OBJECT_READ,
                                      newVal,
                                      newVal != null,
                                      oldVal,
                                      hasBytes,
                                      CU.subjectId(tx, cctx));

                            cctx.cache().metrics0().onRead(oldVal != null);
                          }

                          if (log.isDebugEnabled())
                            log.debug(
                                "Processed response for entry [res="
                                    + res
                                    + ", entry="
                                    + entry
                                    + ']');

                          break; // Inner while loop.
                        } catch (GridCacheEntryRemovedException ignored) {
                          if (log.isDebugEnabled())
                            log.debug(
                                "Failed to add candidates because entry was "
                                    + "removed (will renew).");

                          // Replace old entry with new one.
                          entries.set(
                              i,
                              (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
                        }
                      }

                      i++; // Increment outside of while loop.
                    }

                    // Proceed and add new future (if any) before completing embedded future.
                    proceedMapping(mappings);
                  } catch (GridException ex) {
                    onError(ex);

                    return false;
                  }

                  return true;
                }
              }));
    } else {
      final MiniFuture fut = new MiniFuture(node, mappedKeys, mappings);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      GridFuture<?> txSync = null;

      if (inTx()) txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());

      if (txSync == null || txSync.isDone()) {
        try {
          if (log.isDebugEnabled())
            log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');

          cctx.io().send(node, req);
        } catch (GridTopologyException ex) {
          assert fut != null;

          fut.onResult(ex);
        }
      } else {
        txSync.listenAsync(
            new CI1<GridFuture<?>>() {
              @Override
              public void apply(GridFuture<?> t) {
                try {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Sending near lock request [node=" + node.id() + ", req=" + req + ']');

                  cctx.io().send(node, req);
                } catch (GridTopologyException ex) {
                  assert fut != null;

                  fut.onResult(ex);
                } catch (GridException e) {
                  onError(e);
                }
              }
            });
      }
    }
  }
Example #7
0
 /** {@inheritDoc} */
 @Override
 public int hashCode() {
   return futId.hashCode();
 }
Example #8
0
  /**
   * Mini-future for get operations. Mini-futures are only waiting on a single node as opposed to
   * multiple nodes.
   */
  private class MiniFuture extends GridFutureAdapter<Boolean> {
    /** */
    private static final long serialVersionUID = 0L;

    /** */
    private final GridUuid futId = GridUuid.randomUuid();

    /** Node ID. */
    @GridToStringExclude private GridNode node;

    /** Keys. */
    @GridToStringInclude private Collection<K> keys;

    /** Mappings to proceed. */
    @GridToStringExclude private ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings;

    /** */
    private AtomicBoolean rcvRes = new AtomicBoolean(false);

    /** Empty constructor required for {@link Externalizable}. */
    public MiniFuture() {
      // No-op.
    }

    /**
     * @param node Node.
     * @param keys Keys.
     * @param mappings Mappings to proceed.
     */
    MiniFuture(
        GridNode node,
        Collection<K> keys,
        ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings) {
      super(cctx.kernalContext());

      this.node = node;
      this.keys = keys;
      this.mappings = mappings;
    }

    /** @return Future ID. */
    GridUuid futureId() {
      return futId;
    }

    /** @return Node ID. */
    public GridNode node() {
      return node;
    }

    /** @return Keys. */
    public Collection<K> keys() {
      return keys;
    }

    /** @param e Error. */
    void onResult(Throwable e) {
      if (rcvRes.compareAndSet(false, true)) {
        if (log.isDebugEnabled())
          log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']');

        // Fail.
        onDone(e);
      } else
        U.warn(
            log,
            "Received error after another result has been processed [fut="
                + GridNearLockFuture.this
                + ", mini="
                + this
                + ']',
            e);
    }

    /** @param e Node left exception. */
    void onResult(GridTopologyException e) {
      if (isDone()) return;

      if (rcvRes.compareAndSet(false, true)) {
        if (log.isDebugEnabled())
          log.debug(
              "Remote node left grid while sending or waiting for reply (will fail): " + this);

        if (tx != null) tx.removeMapping(node.id());

        // Primary node left the grid, so fail the future.
        GridNearLockFuture.this.onDone(newTopologyException(e, node.id()));

        onDone(true);
      }
    }

    /** @param res Result callback. */
    void onResult(GridNearLockResponse<K, V> res) {
      if (rcvRes.compareAndSet(false, true)) {
        if (res.error() != null) {
          if (log.isDebugEnabled())
            log.debug(
                "Finishing mini future with an error due to error in response [miniFut="
                    + this
                    + ", res="
                    + res
                    + ']');

          // Fail.
          if (res.error() instanceof GridCacheLockTimeoutException) onDone(false);
          else onDone(res.error());

          return;
        }

        int i = 0;

        long topVer = topSnapshot.get().topologyVersion();

        for (K k : keys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, topVer);

            try {
              if (res.dhtVersion(i) == null) {
                onDone(
                    new GridException(
                        "Failed to receive DHT version from remote node "
                            + "(will fail the lock): "
                            + res));

                return;
              }

              GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key());

              V oldVal = entry.rawGet();
              boolean hasOldVal = false;
              V newVal = res.value(i);
              byte[] newBytes = res.valueBytes(i);

              boolean readRecordable = false;

              if (retval) {
                readRecordable = cctx.events().isRecordable(EVT_CACHE_OBJECT_READ);

                if (readRecordable) hasOldVal = entry.hasValue();
              }

              GridCacheVersion dhtVer = res.dhtVersion(i);
              GridCacheVersion mappedVer = res.mappedVersion(i);

              if (newVal == null) {
                if (oldValTup != null) {
                  if (oldValTup.get1().equals(dhtVer)) {
                    newVal = oldValTup.get2();

                    newBytes = oldValTup.get3();
                  }

                  oldVal = oldValTup.get2();
                }
              }

              // Lock is held at this point, so we can set the
              // returned value if any.
              entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

              if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                boolean pass = res.filterResult(i);

                tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
              }

              entry.readyNearLock(
                  lockVer,
                  mappedVer,
                  res.committedVersions(),
                  res.rolledbackVersions(),
                  res.pending());

              if (retval) {
                if (readRecordable)
                  cctx.events()
                      .addEvent(
                          entry.partition(),
                          entry.key(),
                          tx,
                          null,
                          EVT_CACHE_OBJECT_READ,
                          newVal,
                          newVal != null || newBytes != null,
                          oldVal,
                          hasOldVal,
                          CU.subjectId(tx, cctx));

                cctx.cache().metrics0().onRead(false);
              }

              if (log.isDebugEnabled())
                log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']');

              break; // Inner while loop.
            } catch (GridCacheEntryRemovedException ignored) {
              if (log.isDebugEnabled())
                log.debug("Failed to add candidates because entry was removed (will renew).");

              // Replace old entry with new one.
              entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
            } catch (GridException e) {
              onDone(e);

              return;
            }
          }

          i++;
        }

        try {
          proceedMapping(mappings);
        } catch (GridException e) {
          onDone(e);
        }

        onDone(true);
      }
    }

    /** {@inheritDoc} */
    @Override
    public String toString() {
      return S.toString(MiniFuture.class, this, "node", node.id(), "super", super.toString());
    }
  }
/**
 * Future verifying that all remote transactions related to some optimistic transaction were
 * prepared.
 *
 * @author @java.author
 * @version @java.version
 */
public class GridCachePessimisticCheckCommittedTxFuture<K, V>
    extends GridCompoundIdentityFuture<GridCacheCommittedTxInfo<K, V>>
    implements GridCacheFuture<GridCacheCommittedTxInfo<K, V>> {
  /** Logger reference. */
  private static final AtomicReference<GridLogger> logRef = new AtomicReference<>();

  /** Trackable flag. */
  private boolean trackable = true;

  /** Context. */
  private final GridCacheContext<K, V> cctx;

  /** Future ID. */
  private final GridUuid futId = GridUuid.randomUuid();

  /** Transaction. */
  private final GridCacheTxEx<K, V> tx;

  /** All involved nodes. */
  private final Map<UUID, GridNode> nodes;

  /** ID of failed node started transaction. */
  private final UUID failedNodeId;

  /**
   * Flag indicating that future checks near node instead of checking all topology in case of
   * primary node crash.
   */
  private boolean nearCheck;

  /** Logger. */
  private final GridLogger log;

  /**
   * @param cctx Context.
   * @param tx Transaction.
   * @param failedNodeId ID of failed node started transaction.
   */
  @SuppressWarnings("ConstantConditions")
  public GridCachePessimisticCheckCommittedTxFuture(
      GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) {
    super(cctx.kernalContext(), new SingleReducer<K, V>());

    this.cctx = cctx;
    this.tx = tx;
    this.failedNodeId = failedNodeId;

    log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class);

    nodes = new GridLeanMap<>();

    for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node);
  }

  /** Initializes future. */
  public void prepare() {
    if (log.isDebugEnabled())
      log.debug("Checking if transaction was committed on remote nodes: " + tx);

    // Check local node first (local node can be a backup node for some part of this transaction).
    long originatingThreadId = tx.threadId();

    if (tx instanceof GridCacheTxRemoteEx)
      originatingThreadId = ((GridCacheTxRemoteEx) tx).remoteThreadId();

    GridCacheCommittedTxInfo<K, V> txInfo =
        cctx.tm().txCommitted(tx.nearXidVersion(), tx.eventNodeId(), originatingThreadId);

    if (txInfo != null) {
      onDone(txInfo);

      markInitialized();

      return;
    }

    Collection<GridNode> checkNodes = CU.remoteNodes(cctx, tx.topologyVersion());

    if (tx instanceof GridDhtTxRemote) {
      // If we got primary node failure and near node has not failed.
      if (tx.nodeId().equals(failedNodeId) && !tx.eventNodeId().equals(failedNodeId)) {
        nearCheck = true;

        GridNode nearNode = cctx.discovery().node(tx.eventNodeId());

        if (nearNode == null) {
          // Near node failed, separate check prepared future will take care of it.
          onDone(
              new GridTopologyException(
                  "Failed to check near transaction state (near node left grid): "
                      + tx.eventNodeId()));

          return;
        }

        checkNodes = Collections.singletonList(nearNode);
      }
    }

    for (GridNode rmtNode : checkNodes) {
      // Skip left nodes and local node.
      if (rmtNode.id().equals(failedNodeId)) continue;

      /*
       * Send message to all cache nodes in the topology.
       */

      MiniFuture fut = new MiniFuture(rmtNode.id());

      GridCachePessimisticCheckCommittedTxRequest<K, V> req =
          new GridCachePessimisticCheckCommittedTxRequest<>(
              tx, originatingThreadId, futureId(), fut.futureId());

      add(fut);

      try {
        cctx.io().send(rmtNode.id(), req);
      } catch (GridTopologyException ignored) {
        fut.onNodeLeft();
      } catch (GridException e) {
        fut.onError(e);

        break;
      }
    }

    markInitialized();
  }

  /**
   * @param nodeId Node ID.
   * @param res Response.
   */
  public void onResult(UUID nodeId, GridCachePessimisticCheckCommittedTxResponse<K, V> res) {
    if (!isDone()) {
      for (GridFuture<GridCacheCommittedTxInfo<K, V>> fut : pending()) {
        if (isMini(fut)) {
          MiniFuture f = (MiniFuture) fut;

          if (f.futureId().equals(res.miniId())) {
            assert f.nodeId().equals(nodeId);

            f.onResult(res);

            break;
          }
        }
      }
    }
  }

  /** {@inheritDoc} */
  @Override
  public GridUuid futureId() {
    return futId;
  }

  /** {@inheritDoc} */
  @Override
  public GridCacheVersion version() {
    return tx.xidVersion();
  }

  /** {@inheritDoc} */
  @Override
  public Collection<? extends GridNode> nodes() {
    return nodes.values();
  }

  /** {@inheritDoc} */
  @Override
  public boolean onNodeLeft(UUID nodeId) {
    for (GridFuture<?> fut : futures())
      if (isMini(fut)) {
        MiniFuture f = (MiniFuture) fut;

        if (f.nodeId().equals(nodeId)) {
          f.onNodeLeft();

          return true;
        }
      }

    return false;
  }

  /** {@inheritDoc} */
  @Override
  public boolean trackable() {
    return trackable;
  }

  /** {@inheritDoc} */
  @Override
  public void markNotTrackable() {
    trackable = false;
  }

  /** {@inheritDoc} */
  @Override
  public boolean onDone(@Nullable GridCacheCommittedTxInfo<K, V> res, @Nullable Throwable err) {
    if (super.onDone(res, err)) {
      cctx.mvcc().removeFuture(this);

      if (log.isDebugEnabled())
        log.debug(
            "Completing check committed tx future for transaction [tx="
                + tx
                + ", res="
                + res
                + ", err="
                + err
                + ']');

      if (err == null) cctx.tm().finishPessimisticTxOnRecovery(tx, res);
      else {
        if (log.isDebugEnabled())
          log.debug(
              "Failed to check prepared transactions, "
                  + "invalidating transaction [err="
                  + err
                  + ", tx="
                  + tx
                  + ']');

        if (nearCheck) return true;

        cctx.tm().salvageTx(tx);
      }

      return true;
    }

    return false;
  }

  /**
   * @param f Future.
   * @return {@code True} if mini-future.
   */
  private boolean isMini(GridFuture<?> f) {
    return f.getClass().equals(MiniFuture.class);
  }

  /** {@inheritDoc} */
  @Override
  public String toString() {
    return S.toString(
        GridCachePessimisticCheckCommittedTxFuture.class, this, "super", super.toString());
  }

  /** */
  private class MiniFuture extends GridFutureAdapter<GridCacheCommittedTxInfo<K, V>> {
    /** Mini future ID. */
    private final GridUuid futId = GridUuid.randomUuid();

    /** Node ID. */
    private UUID nodeId;

    /** Empty constructor required by {@link Externalizable} */
    public MiniFuture() {
      // No-op.
    }

    /** @param nodeId Node ID. */
    private MiniFuture(UUID nodeId) {
      super(cctx.kernalContext());

      this.nodeId = nodeId;
    }

    /** @return Node ID. */
    private UUID nodeId() {
      return nodeId;
    }

    /** @return Future ID. */
    private GridUuid futureId() {
      return futId;
    }

    /** @param e Error. */
    private void onError(Throwable e) {
      if (log.isDebugEnabled())
        log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']');

      onDone(e);
    }

    /** */
    private void onNodeLeft() {
      if (log.isDebugEnabled())
        log.debug("Transaction node left grid (will ignore) [fut=" + this + ']');

      if (nearCheck) {
        onDone(
            new GridTopologyException(
                "Failed to check near transaction state (near node left grid): " + nodeId));

        return;
      }

      onDone((GridCacheCommittedTxInfo<K, V>) null);
    }

    /** @param res Result callback. */
    private void onResult(GridCachePessimisticCheckCommittedTxResponse<K, V> res) {
      onDone(res.committedTxInfo());
    }

    /** {@inheritDoc} */
    @Override
    public String toString() {
      return S.toString(MiniFuture.class, this, "done", isDone(), "err", error());
    }
  }

  /** Single value reducer. */
  private static class SingleReducer<K, V>
      extends GridReducer<GridCacheCommittedTxInfo<K, V>, GridCacheCommittedTxInfo<K, V>> {
    /** */
    private AtomicReference<GridCacheCommittedTxInfo<K, V>> collected = new AtomicReference<>();

    /** {@inheritDoc} */
    @Override
    public boolean collect(@Nullable GridCacheCommittedTxInfo<K, V> info) {
      if (info != null) {
        collected.compareAndSet(null, info);

        // Stop collecting on first collected info.
        return false;
      }

      return true;
    }

    /** {@inheritDoc} */
    @Override
    public GridCacheCommittedTxInfo<K, V> reduce() {
      return collected.get();
    }
  }
}
  private class MiniFuture extends GridFutureAdapter<GridCacheCommittedTxInfo<K, V>> {
    /** Mini future ID. */
    private final GridUuid futId = GridUuid.randomUuid();

    /** Node ID. */
    private UUID nodeId;

    /** Empty constructor required by {@link Externalizable} */
    public MiniFuture() {
      // No-op.
    }

    /** @param nodeId Node ID. */
    private MiniFuture(UUID nodeId) {
      super(cctx.kernalContext());

      this.nodeId = nodeId;
    }

    /** @return Node ID. */
    private UUID nodeId() {
      return nodeId;
    }

    /** @return Future ID. */
    private GridUuid futureId() {
      return futId;
    }

    /** @param e Error. */
    private void onError(Throwable e) {
      if (log.isDebugEnabled())
        log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']');

      onDone(e);
    }

    /** */
    private void onNodeLeft() {
      if (log.isDebugEnabled())
        log.debug("Transaction node left grid (will ignore) [fut=" + this + ']');

      if (nearCheck) {
        onDone(
            new GridTopologyException(
                "Failed to check near transaction state (near node left grid): " + nodeId));

        return;
      }

      onDone((GridCacheCommittedTxInfo<K, V>) null);
    }

    /** @param res Result callback. */
    private void onResult(GridCachePessimisticCheckCommittedTxResponse<K, V> res) {
      onDone(res.committedTxInfo());
    }

    /** {@inheritDoc} */
    @Override
    public String toString() {
      return S.toString(MiniFuture.class, this, "done", isDone(), "err", error());
    }
  }
  /**
   * Creates and caches new deployment.
   *
   * @param meta Deployment metadata.
   * @param isCache Whether or not to cache.
   * @return New deployment.
   */
  private SharedDeployment createNewDeployment(GridDeploymentMetadata meta, boolean isCache) {
    assert Thread.holdsLock(mux);

    assert meta.parentLoader() == null;

    GridUuid ldrId = GridUuid.randomUuid();

    GridDeploymentClassLoader clsLdr;

    if (meta.deploymentMode() == CONTINUOUS || meta.participants() == null) {
      // Create peer class loader.
      // Note that we are passing empty list for local P2P exclude, as it really
      // does not make sense with shared deployment.
      clsLdr =
          new GridDeploymentClassLoader(
              ldrId,
              meta.userVersion(),
              meta.deploymentMode(),
              false,
              ctx,
              ctxLdr,
              meta.classLoaderId(),
              meta.senderNodeId(),
              meta.sequenceNumber(),
              comm,
              ctx.config().getNetworkTimeout(),
              log,
              ctx.config().getPeerClassLoadingClassPathExclude(),
              ctx.config().getPeerClassLoadingMissedResourcesCacheSize(),
              meta.deploymentMode() == CONTINUOUS /* enable class byte cache in CONTINUOUS mode */);

      if (meta.participants() != null)
        for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet())
          clsLdr.register(e.getKey(), e.getValue().get1(), e.getValue().get2());

      if (log.isDebugEnabled())
        log.debug(
            "Created class loader in CONTINUOUS mode or without participants "
                + "[ldr="
                + clsLdr
                + ", meta="
                + meta
                + ']');
    } else {
      assert meta.deploymentMode() == SHARED;

      // Create peer class loader.
      // Note that we are passing empty list for local P2P exclude, as it really
      // does not make sense with shared deployment.
      clsLdr =
          new GridDeploymentClassLoader(
              ldrId,
              meta.userVersion(),
              meta.deploymentMode(),
              false,
              ctx,
              ctxLdr,
              meta.participants(),
              comm,
              ctx.config().getNetworkTimeout(),
              log,
              ctx.config().getPeerClassLoadingClassPathExclude(),
              ctx.config().getPeerClassLoadingMissedResourcesCacheSize(),
              false);

      if (log.isDebugEnabled())
        log.debug(
            "Created classloader in SHARED mode with participants "
                + "[ldr="
                + clsLdr
                + ", meta="
                + meta
                + ']');
    }

    // Give this deployment a unique class loader to emphasize that this
    // ID is unique to this shared deployment and is not ID of loader on
    // sender node.
    SharedDeployment dep =
        new SharedDeployment(
            meta.deploymentMode(), clsLdr, ldrId, -1, meta.userVersion(), meta.alias());

    if (log.isDebugEnabled()) log.debug("Created new deployment: " + dep);

    if (isCache) {
      List<SharedDeployment> deps =
          F.addIfAbsent(cache, meta.userVersion(), new LinkedList<SharedDeployment>());

      assert deps != null;

      deps.add(dep);

      if (log.isDebugEnabled()) log.debug("Added deployment to cache: " + cache);
    }

    return dep;
  }
  /** {@inheritDoc} */
  @Override
  public GridDeployment getDeployment(GridDeploymentMetadata meta) {
    assert meta != null;

    assert ctx.config().isPeerClassLoadingEnabled();

    // Validate metadata.
    assert meta.classLoaderId() != null;
    assert meta.senderNodeId() != null;
    assert meta.sequenceNumber() >= -1;
    assert meta.parentLoader() == null;

    if (log.isDebugEnabled())
      log.debug("Starting to peer-load class based on deployment metadata: " + meta);

    while (true) {
      List<SharedDeployment> depsToCheck = null;

      SharedDeployment dep = null;

      synchronized (mux) {
        // Check obsolete request.
        if (isDeadClassLoader(meta)) return null;

        List<SharedDeployment> deps = cache.get(meta.userVersion());

        if (deps != null) {
          assert !deps.isEmpty();

          for (SharedDeployment d : deps) {
            if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())
                || meta.senderNodeId().equals(ctx.localNodeId())) {
              // Done.
              dep = d;

              break;
            }
          }

          if (dep == null) {
            GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta);

            if (!redeployCheck.get1()) {
              // Checking for redeployment encountered invalid state.
              if (log.isDebugEnabled())
                log.debug("Checking for redeployment encountered invalid state: " + meta);

              return null;
            }

            dep = redeployCheck.get2();

            if (dep == null) {
              // Find existing deployments that need to be checked
              // whether they should be reused for this request.
              for (SharedDeployment d : deps) {
                if (!d.isPendingUndeploy() && !d.isUndeployed()) {
                  if (depsToCheck == null) depsToCheck = new LinkedList<SharedDeployment>();

                  if (log.isDebugEnabled()) log.debug("Adding deployment to check: " + d);

                  depsToCheck.add(d);
                }
              }

              // If no deployment can be reused, create a new one.
              if (depsToCheck == null) {
                dep = createNewDeployment(meta, false);

                deps.add(dep);
              }
            }
          }
        } else {
          GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta);

          if (!redeployCheck.get1()) {
            // Checking for redeployment encountered invalid state.
            if (log.isDebugEnabled())
              log.debug("Checking for redeployment encountered invalid state: " + meta);

            return null;
          }

          dep = redeployCheck.get2();

          if (dep == null)
            // Create peer class loader.
            dep = createNewDeployment(meta, true);
        }
      }

      if (dep != null) {
        if (log.isDebugEnabled())
          log.debug("Found SHARED or CONTINUOUS deployment after first check: " + dep);

        // Cache the deployed class.
        Class<?> cls = dep.deployedClass(meta.className(), meta.alias());

        if (cls == null) {
          U.warn(
              log,
              "Failed to load peer class (ignore if class got undeployed during preloading) [alias="
                  + meta.alias()
                  + ", dep="
                  + dep
                  + ']');

          return null;
        }

        return dep;
      }

      assert meta.parentLoader() == null;
      assert depsToCheck != null;
      assert !depsToCheck.isEmpty();

      /*
       * Logic below must be performed outside of synchronization
       * because it involves network calls.
       */

      // Check if class can be loaded from existing nodes.
      // In most cases this loop will find something.
      for (SharedDeployment d : depsToCheck) {
        // Load class. Note, that remote node will not load this class.
        // The class will only be loaded on this node.
        Class<?> cls = d.deployedClass(meta.className(), meta.alias());

        if (cls != null) {
          synchronized (mux) {
            if (!d.isUndeployed() && !d.isPendingUndeploy()) {
              if (!addParticipant(d, meta)) return null;

              if (log.isDebugEnabled())
                log.debug(
                    "Acquired deployment after verifying it's availability on "
                        + "existing nodes [depCls="
                        + cls
                        + ", dep="
                        + d
                        + ", meta="
                        + meta
                        + ']');

              return d;
            }
          }
        } else if (log.isDebugEnabled()) {
          log.debug(
              "Deployment cannot be reused (class does not exist on participating nodes) [dep="
                  + d
                  + ", meta="
                  + meta
                  + ']');
        }
      }

      // We are here either because all participant nodes failed
      // or the class indeed should have a separate deployment.
      for (SharedDeployment d : depsToCheck) {
        // Temporary class loader.
        ClassLoader temp =
            new GridDeploymentClassLoader(
                GridUuid.randomUuid(),
                meta.userVersion(),
                meta.deploymentMode(),
                true,
                ctx,
                ctxLdr,
                meta.classLoaderId(),
                meta.senderNodeId(),
                meta.sequenceNumber(),
                comm,
                ctx.config().getNetworkTimeout(),
                log,
                ctx.config().getPeerClassLoadingClassPathExclude(),
                0,
                false);

        String path = U.classNameToResourceName(d.sampleClassName());

        // We check if any random class from existing deployment can be
        // loaded from sender node. If it can, then we reuse existing
        // deployment.
        InputStream rsrcIn = temp.getResourceAsStream(path);

        if (rsrcIn != null) {
          // We don't need the actual stream.
          U.closeQuiet(rsrcIn);

          synchronized (mux) {
            if (d.isUndeployed() || d.isPendingUndeploy()) continue;

            // Add new node prior to loading the class, so we attempt
            // to load the class from the latest node.
            if (!addParticipant(d, meta)) {
              if (log.isDebugEnabled())
                log.debug(
                    "Failed to add participant to deployment "
                        + "[meta="
                        + meta
                        + ", dep="
                        + dep
                        + ']');

              return null;
            }
          }

          Class<?> depCls = d.deployedClass(meta.className(), meta.alias());

          if (depCls == null) {
            U.error(
                log,
                "Failed to peer load class after loading it as a resource [alias="
                    + meta.alias()
                    + ", dep="
                    + dep
                    + ']');

            return null;
          }

          if (log.isDebugEnabled())
            log.debug(
                "Acquired deployment class after verifying other class "
                    + "availability on sender node [depCls="
                    + depCls
                    + ", rndCls="
                    + d.sampleClass()
                    + ", sampleClsName="
                    + d.sampleClassName()
                    + ", meta="
                    + meta
                    + ']');

          return d;
        } else if (log.isDebugEnabled())
          log.debug(
              "Deployment cannot be reused (random class could not be loaded from sender node) [dep="
                  + d
                  + ", meta="
                  + meta
                  + ']');
      }

      synchronized (mux) {
        if (log.isDebugEnabled())
          log.debug(
              "None of the existing class-loaders fit (will try to create a new one): " + meta);

        // Check obsolete request.
        if (isDeadClassLoader(meta)) return null;

        // Check that deployment picture has not changed.
        List<SharedDeployment> deps = cache.get(meta.userVersion());

        if (deps != null) {
          assert !deps.isEmpty();

          boolean retry = false;

          for (SharedDeployment d : deps) {
            // Double check if sender was already added.
            if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) {
              dep = d;

              retry = false;

              break;
            }

            // New deployment was added while outside of synchronization.
            // Need to recheck it again.
            if (!d.isPendingUndeploy() && !d.isUndeployed() && !depsToCheck.contains(d))
              retry = true;
          }

          if (retry) {
            if (log.isDebugEnabled()) log.debug("Retrying due to concurrency issues: " + meta);

            // Outer while loop.
            continue;
          }

          if (dep == null) {
            // No new deployments were added, so we can safely add ours.
            dep = createNewDeployment(meta, false);

            deps.add(dep);

            if (log.isDebugEnabled())
              log.debug(
                  "Adding new deployment within second check [dep=" + dep + ", meta=" + meta + ']');
          }
        } else {
          dep = createNewDeployment(meta, true);

          if (log.isDebugEnabled())
            log.debug(
                "Created new deployment within second check [dep=" + dep + ", meta=" + meta + ']');
        }
      }

      if (dep != null) {
        // Cache the deployed class.
        Class<?> cls = dep.deployedClass(meta.className(), meta.alias());

        if (cls == null) {
          U.warn(
              log,
              "Failed to load peer class (ignore if class got undeployed during preloading) [alias="
                  + meta.alias()
                  + ", dep="
                  + dep
                  + ']');

          return null;
        }
      }

      return dep;
    }
  }