/**
   * @param nodeId Sender node ID.
   * @param msg Prepare request.
   */
  @SuppressWarnings({"InstanceofCatchParameter"})
  private void processPrepareRequest(UUID nodeId, GridDistributedTxPrepareRequest<K, V> msg) {
    assert nodeId != null;
    assert msg != null;

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedTxPrepareResponse<K, V> res;

    try {
      tx =
          new GridReplicatedTxRemote<K, V>(
              ctx.deploy().globalLoader(),
              nodeId,
              msg.threadId(),
              msg.version(),
              msg.commitVersion(),
              msg.concurrency(),
              msg.isolation(),
              msg.isInvalidate(),
              msg.timeout(),
              msg.reads(),
              msg.writes(),
              ctx);

      tx = ctx.tm().onCreated(tx);

      if (tx == null || !ctx.tm().onStarted(tx))
        throw new GridCacheTxRollbackException("Attempt to start a completed transaction: " + tx);

      // Prepare prior to reordering, so the pending locks added
      // in prepare phase will get properly ordered as well.
      tx.prepare();

      // Add remote candidates and reorder completed and uncompleted versions.
      tx.addRemoteCandidates(
          msg.candidatesByKey(), msg.committedVersions(), msg.rolledbackVersions());

      if (msg.concurrency() == EVENTUALLY_CONSISTENT) {
        if (log.isDebugEnabled()) log.debug("Committing transaction during remote prepare: " + tx);

        tx.commit();

        if (log.isDebugEnabled()) log.debug("Committed transaction during remote prepare: " + tx);

        // Don't send response.
        return;
      }

      res = new GridDistributedTxPrepareResponse<K, V>(msg.version());

      Map<K, Collection<GridCacheMvccCandidate<K>>> cands = tx.localCandidates();

      // Add local candidates (completed version must be set below).
      res.candidates(cands);
    } catch (GridException e) {
      if (e instanceof GridCacheTxRollbackException) {
        if (log.isDebugEnabled())
          log.debug("Transaction was rolled back before prepare completed: " + tx);
      } else if (e instanceof GridCacheTxOptimisticException) {
        if (log.isDebugEnabled())
          log.debug("Optimistic failure for remote transaction (will rollback): " + tx);
      } else {
        U.error(log, "Failed to process prepare request: " + msg, e);
      }

      if (tx != null)
        // Automatically rollback remote transactions.
        tx.rollback();

      // Don't send response.
      if (msg.concurrency() == EVENTUALLY_CONSISTENT) return;

      res = new GridDistributedTxPrepareResponse<K, V>(msg.version());

      res.error(e);
    }

    // Add completed versions.
    res.completedVersions(
        ctx.tm().committedVersions(msg.version()), ctx.tm().rolledbackVersions(msg.version()));

    assert msg.concurrency() != EVENTUALLY_CONSISTENT;

    GridNode node = ctx.discovery().node(nodeId);

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send tx response to node (did the node leave grid?) [node="
                + node.id()
                + ", msg="
                + res
                + ']',
            e);

        if (tx != null) tx.rollback();
      }
    }
  }
  /**
   * Processes lock request.
   *
   * @param nodeId Sender node ID.
   * @param msg Lock request.
   */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) {
    assert !nodeId.equals(locNodeId);

    List<byte[]> keys = msg.keyBytes();

    int cnt = keys.size();

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedLockResponse res;

    ClassLoader ldr = null;

    try {
      ldr = ctx.deploy().globalLoader();

      if (ldr != null) {
        res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt);

        for (int i = 0; i < keys.size(); i++) {
          byte[] bytes = keys.get(i);
          K key = msg.keys().get(i);

          Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i);

          if (bytes == null) continue;

          if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

          GridDistributedCacheEntry<K, V> entry = null;

          while (true) {
            try {
              entry = entryexx(key);

              // Handle implicit locks for pessimistic transactions.
              if (msg.inTx()) {
                tx = ctx.tm().tx(msg.version());

                if (tx != null) {
                  if (msg.txRead()) tx.addRead(key, bytes);
                  else tx.addWrite(key, bytes);
                } else {
                  tx =
                      new GridReplicatedTxRemote<K, V>(
                          nodeId,
                          msg.threadId(),
                          msg.version(),
                          null,
                          PESSIMISTIC,
                          msg.isolation(),
                          msg.isInvalidate(),
                          msg.timeout(),
                          key,
                          bytes,
                          msg.txRead(),
                          ctx);

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + msg.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  msg.nodeId(),
                  null,
                  msg.threadId(),
                  msg.version(),
                  msg.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions());

              // Double-check in case if sender node left the grid.
              if (ctx.discovery().node(msg.nodeId()) == null) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Node requesting lock left grid (lock request will be ignored): " + msg);

                if (tx != null) tx.rollback();

                return;
              }

              res.setCandidates(
                  i,
                  entry.localCandidates(),
                  ctx.tm().committedVersions(msg.version()),
                  ctx.tm().rolledbackVersions(msg.version()));

              res.addValueBytes(
                  entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx);

              // Entry is legit.
              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsoleteVersion() != null
                  : "Obsolete flag not set on removed entry: " + entry;

              if (log.isDebugEnabled())
                log.debug(
                    "Received entry removed exception (will retry on renewed entry): " + entry);

              if (tx != null) {
                tx.clearEntry(entry.key());

                if (log.isDebugEnabled())
                  log.debug(
                      "Cleared removed entry from remote transaction (will retry) [entry="
                          + entry
                          + ", tx="
                          + tx
                          + ']');
              }
            }
          }
        }
      } else {
        String err = "Failed to acquire deployment class for message: " + msg;

        U.warn(log, err);

        res =
            new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err));
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Received lock request for completed transaction (will ignore): " + e);

      res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e);
    } catch (GridException e) {
      String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg;

      log.error(err, e);

      res =
          new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e));

      if (tx != null) tx.rollback();
    } catch (GridDistributedLockCancelledException ignored) {
      // Received lock request for cancelled lock.
      if (log.isDebugEnabled())
        log.debug("Received lock request for canceled lock (will ignore): " + msg);

      if (tx != null) tx.rollback();

      // Don't send response back.
      return;
    }

    GridNode node = ctx.discovery().node(msg.nodeId());

    boolean releaseAll = false;

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e);

        releaseAll = ldr != null;
      }
    }
    // If sender left grid, release all locks acquired so far.
    else releaseAll = ldr != null;

    // Release all locks because sender node left grid.
    if (releaseAll) {
      for (K key : msg.keys()) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId());

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock on removed entity during failure "
                      + "of replicated lock request handling (will retry): "
                      + entry);
          }
        }
      }

      U.warn(
          log, "Sender node left grid in the midst of lock acquisition (locks will be released).");
    }
  }