/** @param e Error. */
  void onError(Throwable e) {
    tx.commitError(e);

    if (err.compareAndSet(null, e)) {
      boolean marked = tx.setRollbackOnly();

      if (e instanceof GridCacheTxRollbackException) {
        if (marked) {
          try {
            tx.rollback();
          } catch (GridException ex) {
            U.error(log, "Failed to automatically rollback transaction: " + tx, ex);
          }
        }
      } else if (tx.implicit()
          && tx.isSystemInvalidate()) { // Finish implicit transaction on heuristic error.
        try {
          tx.close();
        } catch (GridException ex) {
          U.error(log, "Failed to invalidate transaction: " + tx, ex);
        }
      }

      onComplete();
    }
  }
  /** Initializes future. */
  @SuppressWarnings({"unchecked"})
  void finish() {
    if (mappings != null) {
      finish(mappings.values());

      markInitialized();

      if (!isSync()) {
        boolean complete = true;

        for (GridFuture<?> f : pending())
          // Mini-future in non-sync mode gets done when message gets sent.
          if (isMini(f) && !f.isDone()) complete = false;

        if (complete) onComplete();
      }
    } else {
      assert !commit;

      try {
        tx.rollback();
      } catch (GridException e) {
        U.error(log, "Failed to rollback empty transaction: " + tx, e);
      }

      markInitialized();
    }
  }
 /**
  * @param out Output stream.
  * @param err Error cause.
  */
 private void sendErrorResponse(ObjectOutput out, Exception err) {
   try {
     out.writeObject(new IpcSharedMemoryInitResponse(err));
   } catch (IOException e) {
     U.error(log, "Failed to send error response to client.", e);
   }
 }
  /**
   * @param nodeId Sender node ID.
   * @param msg Response to prepare request.
   */
  private void processPrepareResponse(UUID nodeId, GridDistributedTxPrepareResponse<K, V> msg) {
    assert nodeId != null;
    assert msg != null;

    GridReplicatedTxLocal<K, V> tx = ctx.tm().tx(msg.version());

    if (tx == null) {
      if (log.isDebugEnabled())
        log.debug(
            "Received prepare response for non-existing transaction [senderNodeId="
                + nodeId
                + ", res="
                + msg
                + ']');

      return;
    }

    GridReplicatedTxPrepareFuture<K, V> future = (GridReplicatedTxPrepareFuture<K, V>) tx.future();

    if (future != null) future.onResult(nodeId, msg);
    else
      U.error(
          log,
          "Received prepare response for transaction with no future [res="
              + msg
              + ", tx="
              + tx
              + ']');
  }
  /** {@inheritDoc} */
  @Override
  public boolean onDone(GridCacheTx tx, Throwable err) {
    if ((initialized() || err != null) && super.onDone(tx, err)) {
      if (error() instanceof GridCacheTxHeuristicException) {
        long topVer = this.tx.topologyVersion();

        for (GridCacheTxEntry<K, V> e : this.tx.writeMap().values()) {
          try {
            if (e.op() != NOOP && !cctx.affinity().localNode(e.key(), topVer)) {
              GridCacheEntryEx<K, V> cacheEntry = cctx.cache().peekEx(e.key());

              if (cacheEntry != null) cacheEntry.invalidate(null, this.tx.xidVersion());
            }
          } catch (Throwable t) {
            U.error(log, "Failed to invalidate entry.", t);

            if (t instanceof Error) throw (Error) t;
          }
        }
      }

      // Don't forget to clean up.
      cctx.mvcc().removeFuture(this);

      return true;
    }

    return false;
  }
    /** @param workTokDir Token directory (common for multiple nodes). */
    private void cleanupResources(File workTokDir) {
      RandomAccessFile lockFile = null;

      FileLock lock = null;

      try {
        lockFile = new RandomAccessFile(new File(workTokDir, LOCK_FILE_NAME), "rw");

        lock = lockFile.getChannel().lock();

        if (lock != null) processTokenDirectory(workTokDir);
        else if (log.isDebugEnabled())
          log.debug(
              "Token directory is being processed concurrently: " + workTokDir.getAbsolutePath());
      } catch (OverlappingFileLockException ignored) {
        if (log.isDebugEnabled())
          log.debug(
              "Token directory is being processed concurrently: " + workTokDir.getAbsolutePath());
      } catch (FileLockInterruptionException ignored) {
        Thread.currentThread().interrupt();
      } catch (IOException e) {
        U.error(log, "Failed to process directory: " + workTokDir.getAbsolutePath(), e);
      } finally {
        U.releaseQuiet(lock);
        U.closeQuiet(lockFile);
      }
    }
  /** Stops Jetty. */
  private void stopJetty() {
    // Jetty does not really stop the server if port is busy.
    try {
      if (httpSrv != null) {
        // If server was successfully started, deregister ports.
        if (httpSrv.isStarted()) ctx.ports().deregisterPorts(getClass());

        // Record current interrupted status of calling thread.
        boolean interrupted = Thread.interrupted();

        try {
          httpSrv.stop();
        } finally {
          // Reset interrupted flag on calling thread.
          if (interrupted) Thread.currentThread().interrupt();
        }
      }
    } catch (InterruptedException ignored) {
      if (log.isDebugEnabled()) log.debug("Thread has been interrupted.");

      Thread.currentThread().interrupt();
    } catch (Exception e) {
      U.error(log, "Failed to stop Jetty HTTP server.", e);
    }
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    try {
      // Send request to remove from remote nodes.
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      req.version(ver);

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  // If there is only local node in this lock's topology,
                  // then there is no reason to distribute the request.
                  if (nodes.isEmpty()) continue;

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (nodes.isEmpty()) return;

      req.completedVersions(ctx.tm().committedVersions(ver), ctx.tm().rolledbackVersions(ver));

      if (!req.keyBytes().isEmpty())
        // We don't wait for reply to this message.
        ctx.io().safeSend(nodes, req, null);
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /**
   * Processes unlock request.
   *
   * @param nodeId Sender node ID.
   * @param req Unlock request.
   */
  @SuppressWarnings({"unchecked"})
  private void processUnlockRequest(UUID nodeId, GridDistributedUnlockRequest req) {
    assert nodeId != null;

    try {
      ClassLoader ldr = ctx.deploy().globalLoader();
      List<byte[]> keys = req.keyBytes();

      for (byte[] keyBytes : keys) {
        K key = (K) U.unmarshal(ctx.marshaller(), new ByteArrayInputStream(keyBytes), ldr);

        while (true) {
          boolean created = false;

          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          if (entry == null) {
            entry = entryexx(key);

            created = true;
          }

          try {
            entry.doneRemote(
                req.version(), req.version(), req.committedVersions(), req.rolledbackVersions());

            // Note that we don't reorder completed versions here,
            // as there is no point to reorder relative to the version
            // we are about to remove.
            if (entry.removeLock(req.version())) {
              if (log.isDebugEnabled())
                log.debug("Removed lock [lockId=" + req.version() + ", key=" + key + ']');

              if (created && entry.markObsolete(req.version())) removeIfObsolete(entry.key());
            } else if (log.isDebugEnabled())
              log.debug(
                  "Received unlock request for unknown candidate "
                      + "(added to cancelled locks set): "
                      + req);

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Received remove lock request for removed entry (will retry) [entry="
                      + entry
                      + ", req="
                      + req
                      + ']');
          }
        }
      }
    } catch (GridException e) {
      U.error(log, "Failed to unmarshal unlock key (unlock will not be performed): " + req, e);
    }
  }
  /**
   * Notifies single listener.
   *
   * @param lsnr Listener.
   */
  private void notifyListener(GridInClosure<? super GridFuture<R>> lsnr) {
    assert lsnr != null;

    try {
      lsnr.apply(this);
    } catch (IllegalStateException ignore) {
      U.warn(
          null,
          "Failed to notify listener (grid is stopped) [grid="
              + ctx.gridName()
              + ", lsnr="
              + lsnr
              + ']');
    } catch (RuntimeException e) {
      U.error(log, "Failed to notify listener: " + lsnr, e);

      throw e;
    } catch (Error e) {
      U.error(log, "Failed to notify listener: " + lsnr, e);

      throw e;
    }
  }
    void cancelAll() {
      GridException err =
          new GridException("Data loader has been cancelled: " + GridDataLoaderImpl.this);

      for (GridFuture<?> f : locFuts) {
        try {
          f.cancel();
        } catch (GridException e) {
          U.error(log, "Failed to cancel mini-future.", e);
        }
      }

      for (GridFutureAdapter<?> f : reqs.values()) f.onDone(err);
    }
  /** @param e Error. */
  void onError(Throwable e) {
    tx.commitError(e);

    if (err.compareAndSet(null, e)) {
      boolean marked = tx.setRollbackOnly();

      if (e instanceof GridCacheTxRollbackException)
        if (marked) {
          try {
            tx.rollback();
          } catch (GridException ex) {
            U.error(log, "Failed to automatically rollback transaction: " + tx, ex);
          }
        }

      onComplete();
    }
  }
  /**
   * Processes lock request.
   *
   * @param nodeId Sender node ID.
   * @param msg Lock request.
   */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) {
    assert !nodeId.equals(locNodeId);

    List<byte[]> keys = msg.keyBytes();

    int cnt = keys.size();

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedLockResponse res;

    ClassLoader ldr = null;

    try {
      ldr = ctx.deploy().globalLoader();

      if (ldr != null) {
        res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt);

        for (int i = 0; i < keys.size(); i++) {
          byte[] bytes = keys.get(i);
          K key = msg.keys().get(i);

          Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i);

          if (bytes == null) continue;

          if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

          GridDistributedCacheEntry<K, V> entry = null;

          while (true) {
            try {
              entry = entryexx(key);

              // Handle implicit locks for pessimistic transactions.
              if (msg.inTx()) {
                tx = ctx.tm().tx(msg.version());

                if (tx != null) {
                  if (msg.txRead()) tx.addRead(key, bytes);
                  else tx.addWrite(key, bytes);
                } else {
                  tx =
                      new GridReplicatedTxRemote<K, V>(
                          nodeId,
                          msg.threadId(),
                          msg.version(),
                          null,
                          PESSIMISTIC,
                          msg.isolation(),
                          msg.isInvalidate(),
                          msg.timeout(),
                          key,
                          bytes,
                          msg.txRead(),
                          ctx);

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + msg.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  msg.nodeId(),
                  null,
                  msg.threadId(),
                  msg.version(),
                  msg.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions());

              // Double-check in case if sender node left the grid.
              if (ctx.discovery().node(msg.nodeId()) == null) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Node requesting lock left grid (lock request will be ignored): " + msg);

                if (tx != null) tx.rollback();

                return;
              }

              res.setCandidates(
                  i,
                  entry.localCandidates(),
                  ctx.tm().committedVersions(msg.version()),
                  ctx.tm().rolledbackVersions(msg.version()));

              res.addValueBytes(
                  entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx);

              // Entry is legit.
              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsoleteVersion() != null
                  : "Obsolete flag not set on removed entry: " + entry;

              if (log.isDebugEnabled())
                log.debug(
                    "Received entry removed exception (will retry on renewed entry): " + entry);

              if (tx != null) {
                tx.clearEntry(entry.key());

                if (log.isDebugEnabled())
                  log.debug(
                      "Cleared removed entry from remote transaction (will retry) [entry="
                          + entry
                          + ", tx="
                          + tx
                          + ']');
              }
            }
          }
        }
      } else {
        String err = "Failed to acquire deployment class for message: " + msg;

        U.warn(log, err);

        res =
            new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err));
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Received lock request for completed transaction (will ignore): " + e);

      res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e);
    } catch (GridException e) {
      String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg;

      log.error(err, e);

      res =
          new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e));

      if (tx != null) tx.rollback();
    } catch (GridDistributedLockCancelledException ignored) {
      // Received lock request for cancelled lock.
      if (log.isDebugEnabled())
        log.debug("Received lock request for canceled lock (will ignore): " + msg);

      if (tx != null) tx.rollback();

      // Don't send response back.
      return;
    }

    GridNode node = ctx.discovery().node(msg.nodeId());

    boolean releaseAll = false;

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e);

        releaseAll = ldr != null;
      }
    }
    // If sender left grid, release all locks acquired so far.
    else releaseAll = ldr != null;

    // Release all locks because sender node left grid.
    if (releaseAll) {
      for (K key : msg.keys()) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId());

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock on removed entity during failure "
                      + "of replicated lock request handling (will retry): "
                      + entry);
          }
        }
      }

      U.warn(
          log, "Sender node left grid in the midst of lock acquisition (locks will be released).");
    }
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    try {
      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridNode, GridNearUnlockRequest<K, V>> map = null;

      for (K key : keys) {
        // Send request to remove from remote nodes.
        GridNearUnlockRequest<K, V> req = null;

        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                if (affNodes == null) {
                  affNodes = CU.allNodes(ctx, cand.topologyVersion());

                  keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                  map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size());
                }

                GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

                if (!primary.isLocal()) {
                  req = map.get(primary);

                  if (req == null) {
                    map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                    req.version(ver);
                  }
                }

                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  if (primary.isLocal()) {
                    dht.removeLocks(primary.id(), ver, F.asList(key), true);

                    assert req == null;

                    continue;
                  }

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (map == null || map.isEmpty()) return;

      Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver);
      Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver);

      for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (!req.keyBytes().isEmpty()) {
          req.completedVersions(committed, rolledback);

          // We don't wait for reply to this message.
          ctx.io().send(n, req);
        }
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return;

    try {
      GridCacheVersion ver = null;

      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null;

      Collection<K> locKeys = new LinkedList<K>();

      GridCacheVersion obsoleteVer = ctx.versions().next();

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While.

          try {
            GridCacheMvccCandidate<K> cand =
                entry.candidate(ctx.nodeId(), Thread.currentThread().getId());

            if (cand != null) {
              ver = cand.version();

              if (affNodes == null) {
                affNodes = CU.allNodes(ctx, cand.topologyVersion());

                keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size());
              }

              // Send request to remove from remote nodes.
              GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

              GridNearUnlockRequest<K, V> req = map.get(primary);

              if (req == null) {
                map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                req.version(ver);
              }

              // Remove candidate from local node first.
              GridCacheMvccCandidate<K> rmv = entry.removeLock();

              if (rmv != null) {
                if (!rmv.reentry()) {
                  if (ver != null && !ver.equals(rmv.version()))
                    throw new GridException(
                        "Failed to unlock (if keys were locked separately, "
                            + "then they need to be unlocked separately): "
                            + keys);

                  if (!primary.isLocal()) {
                    assert req != null;

                    req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                  } else locKeys.add(key);

                  if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
                } else if (log.isDebugEnabled())
                  log.debug(
                      "Current thread still owns lock (or there are no other nodes)"
                          + " [lock="
                          + rmv
                          + ", curThreadId="
                          + Thread.currentThread().getId()
                          + ']');
              }

              // Try to evict near entry if it's dht-mapped locally.
              evictNearEntry(entry, obsoleteVer);
            }

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug("Attempted to unlock removed entry (will retry): " + entry);
          }
        }
      }

      if (ver == null) return;

      for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridRichNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
        else if (!req.keyBytes().isEmpty())
          // We don't wait for reply to this message.
          ctx.io().send(n, req);
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
    /**
     * @param entries Entries to submit.
     * @param curFut Current future.
     * @throws GridInterruptedException If interrupted.
     */
    private void submit(final List<Map.Entry<K, V>> entries, final GridFutureAdapter<Object> curFut)
        throws GridInterruptedException {
      assert entries != null;
      assert !entries.isEmpty();
      assert curFut != null;

      incrementActiveTasks();

      GridFuture<Object> fut;
      if (isLocNode) {
        fut =
            ctx.closure()
                .callLocalSafe(
                    new GridDataLoadUpdateJob<>(ctx, log, cacheName, entries, false, updater),
                    false);

        locFuts.add(fut);

        fut.listenAsync(
            new GridInClosure<GridFuture<Object>>() {
              @Override
              public void apply(GridFuture<Object> t) {
                try {
                  boolean rmv = locFuts.remove(t);

                  assert rmv;

                  curFut.onDone(t.get());
                } catch (GridException e) {
                  curFut.onDone(e);
                }
              }
            });
      } else {
        byte[] entriesBytes;

        try {
          entriesBytes = ctx.config().getMarshaller().marshal(entries);

          if (updaterBytes == null) {
            assert updater != null;

            updaterBytes = ctx.config().getMarshaller().marshal(updater);
          }

          if (topicBytes == null) topicBytes = ctx.config().getMarshaller().marshal(topic);
        } catch (GridException e) {
          U.error(log, "Failed to marshal (request will not be sent).", e);

          return;
        }

        GridDeployment dep = null;
        GridPeerDeployAware jobPda0 = null;

        if (ctx.deploy().enabled()) {
          try {
            jobPda0 = jobPda;

            assert jobPda0 != null;

            dep = ctx.deploy().deploy(jobPda0.deployClass(), jobPda0.classLoader());
          } catch (GridException e) {
            U.error(
                log,
                "Failed to deploy class (request will not be sent): " + jobPda0.deployClass(),
                e);

            return;
          }

          if (dep == null)
            U.warn(log, "Failed to deploy class (request will be sent): " + jobPda0.deployClass());
        }

        long reqId = idGen.incrementAndGet();

        fut = curFut;

        reqs.put(reqId, (GridFutureAdapter<Object>) fut);

        GridDataLoadRequest<Object, Object> req =
            new GridDataLoadRequest<>(
                reqId,
                topicBytes,
                cacheName,
                updaterBytes,
                entriesBytes,
                true,
                dep != null ? dep.deployMode() : null,
                dep != null ? jobPda0.deployClass().getName() : null,
                dep != null ? dep.userVersion() : null,
                dep != null ? dep.participants() : null,
                dep != null ? dep.classLoaderId() : null,
                dep == null);

        try {
          ctx.io().send(node, TOPIC_DATALOAD, req, PUBLIC_POOL);

          if (log.isDebugEnabled())
            log.debug("Sent request to node [nodeId=" + node.id() + ", req=" + req + ']');
        } catch (GridException e) {
          if (ctx.discovery().alive(node) && ctx.discovery().pingNode(node.id()))
            ((GridFutureAdapter<Object>) fut).onDone(e);
          else
            ((GridFutureAdapter<Object>) fut)
                .onDone(
                    new GridTopologyException(
                        "Failed to send " + "request (node has left): " + node.id()));
        }
      }
    }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return;

    Collection<? extends GridNode> nodes = ctx.remoteNodes(keys);

    try {
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      for (K key : keys) {
        GridDistributedCacheEntry<K, V> entry = entryexx(key);

        if (!ctx.isAll(entry.wrap(false), filter)) continue;

        // Unlock local lock first.
        GridCacheMvccCandidate<K> rmv = entry.removeLock();

        if (rmv != null && !nodes.isEmpty()) {
          if (!rmv.reentry()) {
            req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);

            // We are assuming that lock ID is the same for all keys.
            req.version(rmv.version());

            if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
          } else {
            if (log.isDebugEnabled())
              log.debug(
                  "Locally unlocked lock reentry without distributing to other nodes [removed="
                      + rmv
                      + ", entry="
                      + entry
                      + ']');
          }
        } else {
          if (log.isDebugEnabled())
            log.debug(
                "Current thread still owns lock (or there are no other nodes) [lock="
                    + rmv
                    + ", curThreadId="
                    + Thread.currentThread().getId()
                    + ']');
        }
      }

      // Don't proceed of no keys to unlock.
      if (req.keyBytes().isEmpty()) {
        if (log.isDebugEnabled())
          log.debug("No keys to unlock locally (was it reentry unlock?): " + keys);

        return;
      }

      // We don't wait for reply to this message. Receiving side will have
      // to make sure that unlock requests don't come before lock requests.
      ctx.io().safeSend(nodes, req, null);
    } catch (GridException e) {
      U.error(log, "Failed to unlock keys: " + keys, e);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  @Override
  protected GridFuture<Boolean> lockAllAsync(
      Collection<? extends K> keys,
      long timeout,
      GridCacheTxLocalEx<K, V> tx,
      boolean isInvalidate,
      boolean isRead,
      boolean retval,
      GridCacheTxIsolation isolation,
      GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return new GridFinishedFuture<Boolean>(ctx.kernalContext(), true);

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    final GridReplicatedLockFuture<K, V> fut =
        new GridReplicatedLockFuture<K, V>(ctx, keys, tx, this, nodes, timeout, filter);

    GridDistributedLockRequest<K, V> req =
        new GridDistributedLockRequest<K, V>(
            locNodeId,
            Thread.currentThread().getId(),
            fut.futureId(),
            fut.version(),
            tx != null,
            isRead,
            isolation,
            isInvalidate,
            timeout,
            keys.size());

    try {
      // Must add future before redying locks.
      if (!ctx.mvcc().addFuture(fut))
        throw new IllegalStateException("Duplicate future ID: " + fut);

      boolean distribute = false;

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = null;

          try {
            entry = entryexx(key);

            if (!ctx.isAll(entry.wrap(false), filter)) {
              if (log.isDebugEnabled())
                log.debug("Entry being locked did not pass filter (will not lock): " + entry);

              fut.onDone(false);

              return fut;
            }

            // Removed exception may be thrown here.
            GridCacheMvccCandidate<K> cand = fut.addEntry(entry);

            if (cand != null) {
              req.addKeyBytes(
                  key,
                  cand.reentry() ? null : entry.getOrMarshalKeyBytes(),
                  retval,
                  entry.localCandidates(fut.version()),
                  ctx);

              req.completedVersions(
                  ctx.tm().committedVersions(fut.version()),
                  ctx.tm().rolledbackVersions(fut.version()));

              distribute = !cand.reentry();
            } else if (fut.isDone()) return fut;

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
          }
        }
      }

      // If nothing to distribute at this point,
      // then all locks are reentries.
      if (!distribute) fut.complete(true);

      if (nodes.isEmpty()) fut.readyLocks();

      // No reason to send request if all locks are locally re-entered,
      // or if timeout is negative and local locks could not be acquired.
      if (fut.isDone()) return fut;

      try {
        ctx.io()
            .safeSend(
                fut.nodes(),
                req,
                new P1<GridNode>() {
                  @Override
                  public boolean apply(GridNode node) {
                    fut.onNodeLeft(node.id());

                    return !fut.isDone();
                  }
                });
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send lock request to node [nodes="
                + U.toShortString(nodes)
                + ", req="
                + req
                + ']',
            e);

        fut.onError(e);
      }

      return fut;
    } catch (GridException e) {
      Throwable err = new GridException("Failed to acquire asynchronous lock for keys: " + keys, e);

      // Clean-up.
      fut.onError(err);

      ctx.mvcc().removeFuture(fut);

      return fut;
    }
  }
  /**
   * @param nodeId Sender node ID.
   * @param req Finish transaction message.
   */
  @SuppressWarnings({"CatchGenericClass"})
  private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) {
    assert nodeId != null;
    assert req != null;

    GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version());

    try {
      ClassLoader ldr = ctx.deploy().globalLoader();

      if (req.commit()) {
        // If lock was acquired explicitly.
        if (tx == null) {
          // Create transaction and add entries.
          tx =
              ctx.tm()
                  .onCreated(
                      new GridReplicatedTxRemote<K, V>(
                          ldr,
                          nodeId,
                          req.threadId(),
                          req.version(),
                          req.commitVersion(),
                          PESSIMISTIC,
                          READ_COMMITTED,
                          req.isInvalidate(),
                          /*timeout */ 0,
                          /*read entries*/ null,
                          req.writes(),
                          ctx));

          if (tx == null || !ctx.tm().onStarted(tx))
            throw new GridCacheTxRollbackException(
                "Attempt to start a completed " + "transaction: " + req);
        } else {
          boolean set = tx.commitVersion(req.commitVersion());

          assert set;
        }

        Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes();

        if (!F.isEmpty(writeEntries)) {
          // In OPTIMISTIC mode, we get the values at PREPARE stage.
          assert tx.concurrency() == PESSIMISTIC;

          for (GridCacheTxEntry<K, V> entry : writeEntries) {
            // Unmarshal write entries.
            entry.unmarshal(ctx, ldr);

            if (log.isDebugEnabled())
              log.debug(
                  "Unmarshalled transaction entry from pessimistic transaction [key="
                      + entry.key()
                      + ", value="
                      + entry.value()
                      + ", tx="
                      + tx
                      + ']');

            if (!tx.setWriteValue(entry))
              U.warn(
                  log,
                  "Received entry to commit that was not present in transaction [entry="
                      + entry
                      + ", tx="
                      + tx
                      + ']');
          }
        }

        // Add completed versions.
        tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions());

        if (tx.pessimistic()) tx.prepare();

        tx.commit();
      } else if (tx != null) {
        tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions());

        tx.rollback();
      }

      if (req.replyRequired()) {
        GridCacheMessage<K, V> res =
            new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId());

        try {
          ctx.io().send(nodeId, res);
        } catch (Throwable e) {
          // Double-check.
          if (ctx.discovery().node(nodeId) == null) {
            if (log.isDebugEnabled())
              log.debug(
                  "Node left while sending finish response [nodeId="
                      + nodeId
                      + ", res="
                      + res
                      + ']');
          } else
            U.error(
                log,
                "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']',
                e);
        }
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Attempted to start a completed transaction (will ignore): " + e);
    } catch (Throwable e) {
      U.error(
          log,
          "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']',
          e);

      if (tx != null) tx.rollback();
    }
  }
  /** {@inheritDoc} */
  @Override
  public GridDeployment getDeployment(GridDeploymentMetadata meta) {
    assert meta != null;

    assert ctx.config().isPeerClassLoadingEnabled();

    // Validate metadata.
    assert meta.classLoaderId() != null;
    assert meta.senderNodeId() != null;
    assert meta.sequenceNumber() >= -1;
    assert meta.parentLoader() == null;

    if (log.isDebugEnabled())
      log.debug("Starting to peer-load class based on deployment metadata: " + meta);

    while (true) {
      List<SharedDeployment> depsToCheck = null;

      SharedDeployment dep = null;

      synchronized (mux) {
        // Check obsolete request.
        if (isDeadClassLoader(meta)) return null;

        List<SharedDeployment> deps = cache.get(meta.userVersion());

        if (deps != null) {
          assert !deps.isEmpty();

          for (SharedDeployment d : deps) {
            if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())
                || meta.senderNodeId().equals(ctx.localNodeId())) {
              // Done.
              dep = d;

              break;
            }
          }

          if (dep == null) {
            GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta);

            if (!redeployCheck.get1()) {
              // Checking for redeployment encountered invalid state.
              if (log.isDebugEnabled())
                log.debug("Checking for redeployment encountered invalid state: " + meta);

              return null;
            }

            dep = redeployCheck.get2();

            if (dep == null) {
              // Find existing deployments that need to be checked
              // whether they should be reused for this request.
              for (SharedDeployment d : deps) {
                if (!d.isPendingUndeploy() && !d.isUndeployed()) {
                  if (depsToCheck == null) depsToCheck = new LinkedList<SharedDeployment>();

                  if (log.isDebugEnabled()) log.debug("Adding deployment to check: " + d);

                  depsToCheck.add(d);
                }
              }

              // If no deployment can be reused, create a new one.
              if (depsToCheck == null) {
                dep = createNewDeployment(meta, false);

                deps.add(dep);
              }
            }
          }
        } else {
          GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta);

          if (!redeployCheck.get1()) {
            // Checking for redeployment encountered invalid state.
            if (log.isDebugEnabled())
              log.debug("Checking for redeployment encountered invalid state: " + meta);

            return null;
          }

          dep = redeployCheck.get2();

          if (dep == null)
            // Create peer class loader.
            dep = createNewDeployment(meta, true);
        }
      }

      if (dep != null) {
        if (log.isDebugEnabled())
          log.debug("Found SHARED or CONTINUOUS deployment after first check: " + dep);

        // Cache the deployed class.
        Class<?> cls = dep.deployedClass(meta.className(), meta.alias());

        if (cls == null) {
          U.warn(
              log,
              "Failed to load peer class (ignore if class got undeployed during preloading) [alias="
                  + meta.alias()
                  + ", dep="
                  + dep
                  + ']');

          return null;
        }

        return dep;
      }

      assert meta.parentLoader() == null;
      assert depsToCheck != null;
      assert !depsToCheck.isEmpty();

      /*
       * Logic below must be performed outside of synchronization
       * because it involves network calls.
       */

      // Check if class can be loaded from existing nodes.
      // In most cases this loop will find something.
      for (SharedDeployment d : depsToCheck) {
        // Load class. Note, that remote node will not load this class.
        // The class will only be loaded on this node.
        Class<?> cls = d.deployedClass(meta.className(), meta.alias());

        if (cls != null) {
          synchronized (mux) {
            if (!d.isUndeployed() && !d.isPendingUndeploy()) {
              if (!addParticipant(d, meta)) return null;

              if (log.isDebugEnabled())
                log.debug(
                    "Acquired deployment after verifying it's availability on "
                        + "existing nodes [depCls="
                        + cls
                        + ", dep="
                        + d
                        + ", meta="
                        + meta
                        + ']');

              return d;
            }
          }
        } else if (log.isDebugEnabled()) {
          log.debug(
              "Deployment cannot be reused (class does not exist on participating nodes) [dep="
                  + d
                  + ", meta="
                  + meta
                  + ']');
        }
      }

      // We are here either because all participant nodes failed
      // or the class indeed should have a separate deployment.
      for (SharedDeployment d : depsToCheck) {
        // Temporary class loader.
        ClassLoader temp =
            new GridDeploymentClassLoader(
                GridUuid.randomUuid(),
                meta.userVersion(),
                meta.deploymentMode(),
                true,
                ctx,
                ctxLdr,
                meta.classLoaderId(),
                meta.senderNodeId(),
                meta.sequenceNumber(),
                comm,
                ctx.config().getNetworkTimeout(),
                log,
                ctx.config().getPeerClassLoadingClassPathExclude(),
                0,
                false);

        String path = U.classNameToResourceName(d.sampleClassName());

        // We check if any random class from existing deployment can be
        // loaded from sender node. If it can, then we reuse existing
        // deployment.
        InputStream rsrcIn = temp.getResourceAsStream(path);

        if (rsrcIn != null) {
          // We don't need the actual stream.
          U.closeQuiet(rsrcIn);

          synchronized (mux) {
            if (d.isUndeployed() || d.isPendingUndeploy()) continue;

            // Add new node prior to loading the class, so we attempt
            // to load the class from the latest node.
            if (!addParticipant(d, meta)) {
              if (log.isDebugEnabled())
                log.debug(
                    "Failed to add participant to deployment "
                        + "[meta="
                        + meta
                        + ", dep="
                        + dep
                        + ']');

              return null;
            }
          }

          Class<?> depCls = d.deployedClass(meta.className(), meta.alias());

          if (depCls == null) {
            U.error(
                log,
                "Failed to peer load class after loading it as a resource [alias="
                    + meta.alias()
                    + ", dep="
                    + dep
                    + ']');

            return null;
          }

          if (log.isDebugEnabled())
            log.debug(
                "Acquired deployment class after verifying other class "
                    + "availability on sender node [depCls="
                    + depCls
                    + ", rndCls="
                    + d.sampleClass()
                    + ", sampleClsName="
                    + d.sampleClassName()
                    + ", meta="
                    + meta
                    + ']');

          return d;
        } else if (log.isDebugEnabled())
          log.debug(
              "Deployment cannot be reused (random class could not be loaded from sender node) [dep="
                  + d
                  + ", meta="
                  + meta
                  + ']');
      }

      synchronized (mux) {
        if (log.isDebugEnabled())
          log.debug(
              "None of the existing class-loaders fit (will try to create a new one): " + meta);

        // Check obsolete request.
        if (isDeadClassLoader(meta)) return null;

        // Check that deployment picture has not changed.
        List<SharedDeployment> deps = cache.get(meta.userVersion());

        if (deps != null) {
          assert !deps.isEmpty();

          boolean retry = false;

          for (SharedDeployment d : deps) {
            // Double check if sender was already added.
            if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) {
              dep = d;

              retry = false;

              break;
            }

            // New deployment was added while outside of synchronization.
            // Need to recheck it again.
            if (!d.isPendingUndeploy() && !d.isUndeployed() && !depsToCheck.contains(d))
              retry = true;
          }

          if (retry) {
            if (log.isDebugEnabled()) log.debug("Retrying due to concurrency issues: " + meta);

            // Outer while loop.
            continue;
          }

          if (dep == null) {
            // No new deployments were added, so we can safely add ours.
            dep = createNewDeployment(meta, false);

            deps.add(dep);

            if (log.isDebugEnabled())
              log.debug(
                  "Adding new deployment within second check [dep=" + dep + ", meta=" + meta + ']');
          }
        } else {
          dep = createNewDeployment(meta, true);

          if (log.isDebugEnabled())
            log.debug(
                "Created new deployment within second check [dep=" + dep + ", meta=" + meta + ']');
        }
      }

      if (dep != null) {
        // Cache the deployed class.
        Class<?> cls = dep.deployedClass(meta.className(), meta.alias());

        if (cls == null) {
          U.warn(
              log,
              "Failed to load peer class (ignore if class got undeployed during preloading) [alias="
                  + meta.alias()
                  + ", dep="
                  + dep
                  + ']');

          return null;
        }
      }

      return dep;
    }
  }
  /**
   * @param nodeId Sender node ID.
   * @param msg Prepare request.
   */
  @SuppressWarnings({"InstanceofCatchParameter"})
  private void processPrepareRequest(UUID nodeId, GridDistributedTxPrepareRequest<K, V> msg) {
    assert nodeId != null;
    assert msg != null;

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedTxPrepareResponse<K, V> res;

    try {
      tx =
          new GridReplicatedTxRemote<K, V>(
              ctx.deploy().globalLoader(),
              nodeId,
              msg.threadId(),
              msg.version(),
              msg.commitVersion(),
              msg.concurrency(),
              msg.isolation(),
              msg.isInvalidate(),
              msg.timeout(),
              msg.reads(),
              msg.writes(),
              ctx);

      tx = ctx.tm().onCreated(tx);

      if (tx == null || !ctx.tm().onStarted(tx))
        throw new GridCacheTxRollbackException("Attempt to start a completed transaction: " + tx);

      // Prepare prior to reordering, so the pending locks added
      // in prepare phase will get properly ordered as well.
      tx.prepare();

      // Add remote candidates and reorder completed and uncompleted versions.
      tx.addRemoteCandidates(
          msg.candidatesByKey(), msg.committedVersions(), msg.rolledbackVersions());

      if (msg.concurrency() == EVENTUALLY_CONSISTENT) {
        if (log.isDebugEnabled()) log.debug("Committing transaction during remote prepare: " + tx);

        tx.commit();

        if (log.isDebugEnabled()) log.debug("Committed transaction during remote prepare: " + tx);

        // Don't send response.
        return;
      }

      res = new GridDistributedTxPrepareResponse<K, V>(msg.version());

      Map<K, Collection<GridCacheMvccCandidate<K>>> cands = tx.localCandidates();

      // Add local candidates (completed version must be set below).
      res.candidates(cands);
    } catch (GridException e) {
      if (e instanceof GridCacheTxRollbackException) {
        if (log.isDebugEnabled())
          log.debug("Transaction was rolled back before prepare completed: " + tx);
      } else if (e instanceof GridCacheTxOptimisticException) {
        if (log.isDebugEnabled())
          log.debug("Optimistic failure for remote transaction (will rollback): " + tx);
      } else {
        U.error(log, "Failed to process prepare request: " + msg, e);
      }

      if (tx != null)
        // Automatically rollback remote transactions.
        tx.rollback();

      // Don't send response.
      if (msg.concurrency() == EVENTUALLY_CONSISTENT) return;

      res = new GridDistributedTxPrepareResponse<K, V>(msg.version());

      res.error(e);
    }

    // Add completed versions.
    res.completedVersions(
        ctx.tm().committedVersions(msg.version()), ctx.tm().rolledbackVersions(msg.version()));

    assert msg.concurrency() != EVENTUALLY_CONSISTENT;

    GridNode node = ctx.discovery().node(nodeId);

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send tx response to node (did the node leave grid?) [node="
                + node.id()
                + ", msg="
                + res
                + ']',
            e);

        if (tx != null) tx.rollback();
      }
    }
  }
  /**
   * Removes obsolete deployments in case of redeploy.
   *
   * @param meta Request metadata.
   * @return List of shares deployment.
   */
  private GridTuple2<Boolean, SharedDeployment> checkRedeploy(GridDeploymentMetadata meta) {
    assert Thread.holdsLock(mux);

    SharedDeployment newDep = null;

    for (List<SharedDeployment> deps : cache.values()) {
      for (SharedDeployment dep : deps) {
        if (!dep.isUndeployed() && !dep.isPendingUndeploy()) {
          long undeployTimeout = ctx.config().getNetworkTimeout();

          SharedDeployment doomed = null;

          // Only check deployments with no participants.
          if (!dep.hasParticipants()) {
            // In case of SHARED deployment it is possible to get hear if
            // unmarshalling happens during undeploy. In this case, we
            // simply don't do anything.
            if (dep.deployMode() == CONTINUOUS) {
              if (dep.existingDeployedClass(meta.className()) != null) {
                // Change from shared deploy to shared undeploy or user version change.
                // Simply remove all deployments with no participating nodes.
                if (meta.deploymentMode() == SHARED
                    || !meta.userVersion().equals(dep.userVersion())) doomed = dep;
              }
            }
          }
          // If there are participants, we undeploy if class loader ID on some node changed.
          else if (dep.existingDeployedClass(meta.className()) != null) {
            GridTuple2<GridUuid, Long> ldr = dep.getClassLoaderId(meta.senderNodeId());

            if (ldr != null) {
              if (!ldr.get1().equals(meta.classLoaderId())) {
                // If deployed sequence number is less, then schedule for undeployment.
                if (ldr.get2() < meta.sequenceNumber()) {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Received request for a class with newer sequence number "
                            + "(will schedule current class for undeployment) [newSeq="
                            + meta.sequenceNumber()
                            + ", oldSeq="
                            + ldr.get2()
                            + ", senderNodeId="
                            + meta.senderNodeId()
                            + ", newClsLdrId="
                            + meta.classLoaderId()
                            + ", oldClsLdrId="
                            + ldr.get1()
                            + ']');

                  doomed = dep;
                } else if (ldr.get2() > meta.sequenceNumber()) {
                  long time = System.currentTimeMillis() - dep.timestamp();

                  if (newDep == null && time < ctx.config().getNetworkTimeout()) {
                    // Set undeployTimeout, so the class will be scheduled
                    // for undeployment.
                    undeployTimeout = ctx.config().getNetworkTimeout() - time;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Received execution request for a stale class (will deploy and "
                              + "schedule undeployment in "
                              + undeployTimeout
                              + "ms) "
                              + "[curSeq="
                              + ldr.get2()
                              + ", staleSeq="
                              + meta.sequenceNumber()
                              + ", cls="
                              + meta.className()
                              + ", senderNodeId="
                              + meta.senderNodeId()
                              + ", curLdrId="
                              + ldr.get1()
                              + ", staleLdrId="
                              + meta.classLoaderId()
                              + ']');

                    // We got the redeployed class before the old one.
                    // Simply create a temporary deployment for the sender node,
                    // and schedule undeploy for it.
                    newDep = createNewDeployment(meta, false);

                    doomed = newDep;
                  } else {
                    U.warn(
                        log,
                        "Received execution request for a class that has been redeployed "
                            + "(will ignore): "
                            + meta.alias());

                    if (log.isDebugEnabled())
                      log.debug(
                          "Received execution request for a class that has been redeployed "
                              + "(will ignore) [alias="
                              + meta.alias()
                              + ", dep="
                              + dep
                              + ']');

                    return F.t(false, null);
                  }
                } else {
                  U.error(
                      log,
                      "Sequence number does not correspond to class loader ID [seqNum="
                          + meta.sequenceNumber()
                          + ", dep="
                          + dep
                          + ']');

                  return F.t(false, null);
                }
              }
            }
          }

          if (doomed != null) {
            doomed.onUndeployScheduled();

            if (log.isDebugEnabled()) log.debug("Deployment was scheduled for undeploy: " + doomed);

            // Lifespan time.
            final long endTime = System.currentTimeMillis() + undeployTimeout;

            // Deployment to undeploy.
            final SharedDeployment undep = doomed;

            ctx.timeout()
                .addTimeoutObject(
                    new GridTimeoutObject() {
                      @Override
                      public GridUuid timeoutId() {
                        return undep.classLoaderId();
                      }

                      @Override
                      public long endTime() {
                        return endTime < 0 ? Long.MAX_VALUE : endTime;
                      }

                      @Override
                      public void onTimeout() {
                        boolean removed = false;

                        // Hot redeployment.
                        synchronized (mux) {
                          assert undep.isPendingUndeploy();

                          if (!undep.isUndeployed()) {
                            undep.undeploy();

                            undep.onRemoved();

                            removed = true;

                            Collection<SharedDeployment> deps = cache.get(undep.userVersion());

                            if (deps != null) {
                              for (Iterator<SharedDeployment> i = deps.iterator(); i.hasNext(); )
                                if (i.next() == undep) i.remove();

                              if (deps.isEmpty()) cache.remove(undep.userVersion());
                            }

                            if (log.isInfoEnabled())
                              log.info(
                                  "Undeployed class loader due to deployment mode change, "
                                      + "user version change, or hot redeployment: "
                                      + undep);
                          }
                        }

                        // Outside synchronization.
                        if (removed) undep.recordUndeployed(null);
                      }
                    });
          }
        }
      }
    }

    if (newDep != null) {
      List<SharedDeployment> list =
          F.addIfAbsent(cache, meta.userVersion(), F.<SharedDeployment>newList());

      assert list != null;

      list.add(newDep);
    }

    return F.t(true, newDep);
  }
Beispiel #23
0
  /** {@inheritDoc} */
  @Override
  public GridFuture<GridCacheTxEx<K, V>> prepareAsync() {
    GridNearTxPrepareFuture<K, V> fut = prepFut.get();

    if (fut == null) {
      // Future must be created before any exception can be thrown.
      if (!prepFut.compareAndSet(null, fut = new GridNearTxPrepareFuture<K, V>(cctx, this)))
        return prepFut.get();
    } else
      // Prepare was called explicitly.
      return fut;

    if (!state(PREPARING)) {
      if (setRollbackOnly()) {
        if (timedOut())
          fut.onError(
              new GridCacheTxTimeoutException(
                  "Transaction timed out and was rolled back: " + this));
        else
          fut.onError(
              new GridException(
                  "Invalid transaction state for prepare [state="
                      + state()
                      + ", tx="
                      + this
                      + ']'));
      } else
        fut.onError(
            new GridCacheTxRollbackException(
                "Invalid transaction state for prepare [state=" + state() + ", tx=" + this + ']'));

      return fut;
    }

    // For pessimistic mode we don't distribute prepare request.
    if (pessimistic()) {
      try {
        userPrepare();

        if (!state(PREPARED)) {
          setRollbackOnly();

          fut.onError(
              new GridException(
                  "Invalid transaction state for commit [state=" + state() + ", tx=" + this + ']'));

          return fut;
        }

        fut.complete();

        return fut;
      } catch (GridException e) {
        fut.onError(e);

        return fut;
      }
    }

    try {
      cctx.topology().readLock();

      try {
        topologyVersion(cctx.topology().topologyVersion());

        userPrepare();
      } finally {
        cctx.topology().readUnlock();
      }

      // This will attempt to locally commit
      // EVENTUALLY CONSISTENT transactions.
      fut.onPreparedEC();

      // Make sure to add future before calling prepare.
      cctx.mvcc().addFuture(fut);

      fut.prepare();
    } catch (GridCacheTxTimeoutException e) {
      fut.onError(e);
    } catch (GridCacheTxOptimisticException e) {
      fut.onError(e);
    } catch (GridException e) {
      setRollbackOnly();

      String msg = "Failed to prepare transaction (will attempt rollback): " + this;

      log.error(msg, e);

      try {
        rollback();
      } catch (GridException e1) {
        U.error(log, "Failed to rollback transaction: " + this, e1);
      }

      fut.onError(new GridCacheTxRollbackException(msg, e));
    }

    return fut;
  }
  /**
   * @param gridName Grid instance name. Can be {@code null}.
   * @param exec Executor service.
   * @param parentLog Parent logger.
   */
  static void runBackgroundCheck(String gridName, Executor exec, GridLogger parentLog) {
    assert exec != null;
    assert parentLog != null;

    final GridLogger log = parentLog.getLogger(GridDiagnostic.class);

    try {
      exec.execute(
          new GridWorker(gridName, "grid-diagnostic-1", log) {
            @Override
            public void body() {
              try {
                InetAddress localHost = U.getLocalHost();

                if (!localHost.isReachable(REACH_TIMEOUT)) {
                  U.warn(
                      log,
                      "Default local host is unreachable. This may lead to delays on "
                          + "grid network operations. Check your OS network setting to correct it.",
                      "Default local host is unreachable.");
                }
              } catch (IOException ignore) {
                U.warn(
                    log,
                    "Failed to perform network diagnostics. It is usually caused by serious "
                        + "network configuration problem. Check your OS network setting to correct it.",
                    "Failed to perform network diagnostics.");
              }
            }
          });

      exec.execute(
          new GridWorker(gridName, "grid-diagnostic-2", log) {
            @Override
            public void body() {
              try {
                InetAddress localHost = U.getLocalHost();

                if (localHost.isLoopbackAddress()) {
                  U.warn(
                      log,
                      "Default local host is a loopback address. This can be a sign of "
                          + "potential network configuration problem.",
                      "Default local host is a loopback address.");
                }
              } catch (IOException ignore) {
                U.warn(
                    log,
                    "Failed to perform network diagnostics. It is usually caused by serious "
                        + "network configuration problem. Check your OS network setting to correct it.",
                    "Failed to perform network diagnostics.");
              }
            }
          });

      exec.execute(
          new GridWorker(gridName, "grid-diagnostic-3", log) {
            @Override
            public void body() {
              String jdkStrLow = U.jdkString().toLowerCase();

              if (jdkStrLow.contains("jrockit") && jdkStrLow.contains("1.5.")) {
                U.warn(
                    log,
                    "BEA JRockit VM ver. 1.5.x has shown problems with NIO functionality in our "
                        + "tests that were not reproducible in other VMs. We recommend using Sun VM. Should you "
                        + "have further questions please contact us at [email protected]",
                    "BEA JRockit VM ver. 1.5.x is not supported.");
              }
            }
          });

      exec.execute(
          new GridWorker(gridName, "grid-diagnostic-4", log) {
            @Override
            public void body() {
              // Sufficiently tested OS.
              if (!U.isSufficientlyTestedOs()) {
                U.warn(
                    log,
                    "This operating system has been tested less rigorously: "
                        + U.osString()
                        + ". Our team will appreciate the feedback if you experience any problems running "
                        + "gridgain in this environment. You can always send your feedback to [email protected]",
                    "This OS is tested less rigorously: " + U.osString());
              }
            }
          });

      exec.execute(
          new GridWorker(gridName, "grid-diagnostic-5", log) {
            @Override
            public void body() {
              // Fix for GG-1075.
              if (U.allLocalMACs() == null)
                U.warn(
                    log,
                    "No live network interfaces detected. If IP-multicast discovery is used - "
                        + "make sure to add 127.0.0.1 as a local address.",
                    "No live network interfaces. Add 127.0.0.1 as a local address.");
            }
          });

      exec.execute(
          new GridWorker(gridName, "grid-diagnostic-6", log) {
            @Override
            public void body() {
              if (System.getProperty("com.sun.management.jmxremote") != null) {
                String portStr = System.getProperty("com.sun.management.jmxremote.port");

                if (portStr != null)
                  try {
                    Integer.parseInt(portStr);

                    return;
                  } catch (NumberFormatException ignore) {
                  }

                U.warn(
                    log,
                    "JMX remote management is enabled but JMX port is either not set or invalid. "
                        + "Check system property 'com.sun.management.jmxremote.port' to make sure it specifies "
                        + "valid TCP/IP port.",
                    "JMX remote port is invalid - JMX management is off.");
              }
            }
          });
    } catch (RejectedExecutionException e) {
      U.error(
          log,
          "Failed to start background network diagnostics check due to thread pool execution "
              + "rejection. In most cases it indicates a severe configuration problem with GridGain.",
          "Failed to start background network diagnostics.",
          e);
    }
  }
  /**
   * Process HTTP request.
   *
   * @param act Action.
   * @param req Http request.
   * @param res Http response.
   */
  private void processRequest(String act, HttpServletRequest req, HttpServletResponse res) {
    res.setContentType("application/json");
    res.setCharacterEncoding("UTF-8");

    GridRestCommand cmd = command(req);

    if (cmd == null) {
      res.setStatus(HttpServletResponse.SC_BAD_REQUEST);

      return;
    }

    if (!authChecker.apply(req.getHeader("X-Signature"))) {
      res.setStatus(HttpServletResponse.SC_UNAUTHORIZED);

      return;
    }

    GridRestResponse cmdRes;

    Map<String, Object> params = parameters(req);

    try {
      GridRestRequest cmdReq = createRequest(cmd, params, req);

      if (log.isDebugEnabled()) log.debug("Initialized command request: " + cmdReq);

      cmdRes = hnd.handle(cmdReq);

      if (cmdRes == null)
        throw new IllegalStateException("Received null result from handler: " + hnd);

      byte[] sesTok = cmdRes.sessionTokenBytes();

      if (sesTok != null) cmdRes.setSessionToken(U.byteArray2HexString(sesTok));

      res.setStatus(HttpServletResponse.SC_OK);
    } catch (Exception e) {
      res.setStatus(HttpServletResponse.SC_OK);

      U.error(log, "Failed to process HTTP request [action=" + act + ", req=" + req + ']', e);

      cmdRes = new GridRestResponse(STATUS_FAILED, e.getMessage());
    } catch (Throwable e) {
      U.error(log, "Failed to process HTTP request [action=" + act + ", req=" + req + ']', e);

      throw e;
    }

    JsonConfig cfg = new GridJettyJsonConfig();

    // Workaround for not needed transformation of string into JSON object.
    if (cmdRes.getResponse() instanceof String)
      cfg.registerJsonValueProcessor(cmdRes.getClass(), "response", SKIP_STR_VAL_PROC);

    if (cmdRes.getResponse() instanceof GridClientTaskResultBean
        && ((GridClientTaskResultBean) cmdRes.getResponse()).getResult() instanceof String)
      cfg.registerJsonValueProcessor(cmdRes.getResponse().getClass(), "result", SKIP_STR_VAL_PROC);

    JSON json;

    try {
      json = JSONSerializer.toJSON(cmdRes, cfg);
    } catch (JSONException e) {
      U.error(log, "Failed to convert response to JSON: " + cmdRes, e);

      json = JSONSerializer.toJSON(new GridRestResponse(STATUS_FAILED, e.getMessage()), cfg);
    }

    try {
      if (log.isDebugEnabled())
        log.debug("Parsed command response into JSON object: " + json.toString(2));

      res.getWriter().write(json.toString());

      if (log.isDebugEnabled())
        log.debug(
            "Processed HTTP request [action=" + act + ", jsonRes=" + cmdRes + ", req=" + req + ']');
    } catch (IOException e) {
      U.error(log, "Failed to send HTTP response: " + json.toString(2), e);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("ErrorNotRethrown")
  @Override
  public IpcEndpoint accept() throws IgniteCheckedException {
    while (!Thread.currentThread().isInterrupted()) {
      Socket sock = null;

      boolean accepted = false;

      try {
        sock = srvSock.accept();

        accepted = true;

        InputStream inputStream = sock.getInputStream();
        ObjectInputStream in = new ObjectInputStream(inputStream);

        ObjectOutputStream out = new ObjectOutputStream(sock.getOutputStream());

        IpcSharedMemorySpace inSpace = null;

        IpcSharedMemorySpace outSpace = null;

        boolean err = true;

        try {
          IpcSharedMemoryInitRequest req = (IpcSharedMemoryInitRequest) in.readObject();

          if (log.isDebugEnabled()) log.debug("Processing request: " + req);

          IgnitePair<String> p = inOutToken(req.pid(), size);

          String file1 = p.get1();
          String file2 = p.get2();

          assert file1 != null;
          assert file2 != null;

          // Create tokens.
          new File(file1).createNewFile();
          new File(file2).createNewFile();

          if (log.isDebugEnabled()) log.debug("Created token files: " + p);

          inSpace = new IpcSharedMemorySpace(file1, req.pid(), pid, size, true, log);

          outSpace = new IpcSharedMemorySpace(file2, pid, req.pid(), size, false, log);

          IpcSharedMemoryClientEndpoint ret =
              new IpcSharedMemoryClientEndpoint(inSpace, outSpace, log);

          out.writeObject(
              new IpcSharedMemoryInitResponse(
                  file2, outSpace.sharedMemoryId(), file1, inSpace.sharedMemoryId(), pid, size));

          err = !in.readBoolean();

          endpoints.add(ret);

          return ret;
        } catch (UnsatisfiedLinkError e) {
          throw IpcSharedMemoryUtils.linkError(e);
        } catch (IOException e) {
          if (log.isDebugEnabled())
            log.debug(
                "Failed to process incoming connection "
                    + "(was connection closed by another party):"
                    + e.getMessage());
        } catch (ClassNotFoundException e) {
          U.error(log, "Failed to process incoming connection.", e);
        } catch (ClassCastException e) {
          String msg =
              "Failed to process incoming connection (most probably, shared memory "
                  + "rest endpoint has been configured by mistake).";

          LT.warn(log, null, msg);

          sendErrorResponse(out, e);
        } catch (IpcOutOfSystemResourcesException e) {
          if (!omitOutOfResourcesWarn) LT.warn(log, null, OUT_OF_RESOURCES_MSG);

          sendErrorResponse(out, e);
        } catch (IgniteCheckedException e) {
          LT.error(log, e, "Failed to process incoming shared memory connection.");

          sendErrorResponse(out, e);
        } finally {
          // Exception has been thrown, need to free system resources.
          if (err) {
            if (inSpace != null) inSpace.forceClose();

            // Safety.
            if (outSpace != null) outSpace.forceClose();
          }
        }
      } catch (IOException e) {
        if (!Thread.currentThread().isInterrupted() && !accepted)
          throw new IgniteCheckedException("Failed to accept incoming connection.", e);

        if (!closed)
          LT.error(
              log, null, "Failed to process incoming shared memory connection: " + e.getMessage());
      } finally {
        U.closeQuiet(sock);
      }
    } // while

    throw new IgniteInterruptedCheckedException("Socket accept was interrupted.");
  }