/**
   * Creates new HTTP requests handler.
   *
   * @param hnd Handler.
   * @param authChecker Authentication checking closure.
   * @param log Logger.
   */
  GridJettyRestHandler(
      GridRestProtocolHandler hnd, GridClosure<String, Boolean> authChecker, GridLogger log) {
    assert hnd != null;
    assert log != null;

    this.hnd = hnd;
    this.log = log;
    this.authChecker = authChecker;

    // Init default page and favicon.
    try {
      initDefaultPage();

      if (log.isDebugEnabled()) log.debug("Initialized default page.");
    } catch (IOException e) {
      U.warn(log, "Failed to initialize default page: " + e.getMessage());
    }

    try {
      initFavicon();

      if (log.isDebugEnabled())
        log.debug(
            favicon != null ? "Initialized favicon, size: " + favicon.length : "Favicon is null.");
    } catch (IOException e) {
      U.warn(log, "Failed to initialize favicon: " + e.getMessage());
    }
  }
  /** {@inheritDoc} */
  @Override
  public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException {
    ExecutorService exec =
        new ThreadPoolExecutor(
            threadsCnt,
            threadsCnt,
            0L,
            MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(batchQueueSize),
            new BlockingRejectedExecutionHandler());

    Iterator<I> iter = inputIterator(args);

    Collection<I> buf = new ArrayList<>(batchSize);

    try {
      while (iter.hasNext()) {
        if (Thread.currentThread().isInterrupted()) {
          U.warn(log, "Working thread was interrupted while loading data.");

          break;
        }

        buf.add(iter.next());

        if (buf.size() == batchSize) {
          exec.submit(new Worker(c, buf, args));

          buf = new ArrayList<>(batchSize);
        }
      }

      if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args));
    } catch (RejectedExecutionException ignored) {
      // Because of custom RejectedExecutionHandler.
      assert false : "RejectedExecutionException was thrown while it shouldn't.";
    } finally {
      exec.shutdown();

      try {
        exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS);
      } catch (InterruptedException ignored) {
        U.warn(log, "Working thread was interrupted while waiting for put operations to complete.");

        Thread.currentThread().interrupt();
      }
    }
  }
  /** @return Nodes to execute on. */
  private Collection<ClusterNode> nodes() {
    CacheMode cacheMode = cctx.config().getCacheMode();

    switch (cacheMode) {
      case LOCAL:
        if (prj != null)
          U.warn(
              log,
              "Ignoring query projection because it's executed over LOCAL cache "
                  + "(only local node will be queried): "
                  + this);

        return Collections.singletonList(cctx.localNode());

      case REPLICATED:
        if (prj != null || partition() != null) return nodes(cctx, prj, partition());

        return cctx.affinityNode()
            ? Collections.singletonList(cctx.localNode())
            : Collections.singletonList(F.rand(nodes(cctx, null, partition())));

      case PARTITIONED:
        return nodes(cctx, prj, partition());

      default:
        throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode);
    }
  }
  /** @return Nodes to execute on. */
  private Collection<GridNode> nodes() {
    GridCacheMode cacheMode = cctx.config().getCacheMode();

    switch (cacheMode) {
      case LOCAL:
        if (prj != null)
          U.warn(
              log,
              "Ignoring query projection because it's executed over LOCAL cache "
                  + "(only local node will be queried): "
                  + this);

        return Collections.singletonList(cctx.localNode());

      case REPLICATED:
        if (prj != null) return nodes(cctx, prj);

        GridCacheDistributionMode mode = cctx.config().getDistributionMode();

        return mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED
            ? Collections.singletonList(cctx.localNode())
            : Collections.singletonList(F.rand(nodes(cctx, null)));

      case PARTITIONED:
        return nodes(cctx, prj);

      default:
        throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode);
    }
  }
 /** Checks reversing. */
 private void checkReversing() {
   if (!keepGoing)
     U.warn(
         log,
         "Suspect logic - reversing listener return status (was 'true', then 'false', "
             + "now 'true' again).");
 }
  /**
   * Processes cache query response.
   *
   * @param sndId Sender node id.
   * @param res Query response.
   */
  @SuppressWarnings("unchecked")
  private void processQueryResponse(UUID sndId, GridCacheQueryResponse res) {
    if (log.isDebugEnabled()) log.debug("Received query response: " + res);

    GridCacheQueryFutureAdapter fut = getQueryFuture(res.requestId());

    if (fut != null)
      if (res.fields())
        ((GridCacheDistributedFieldsQueryFuture) fut)
            .onPage(
                sndId,
                res.metadata(),
                (Collection<Map<String, Object>>) ((Collection) res.data()),
                res.error(),
                res.isFinished());
      else fut.onPage(sndId, res.data(), res.error(), res.isFinished());
    else if (!cancelled.contains(res.requestId()))
      U.warn(
          log,
          "Received response for finished or unknown query [rmtNodeId="
              + sndId
              + ", res="
              + res
              + ']');
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void listenAsync(@Nullable final GridInClosure<? super GridFuture<R>> lsnr) {
    if (lsnr != null) {
      checkValid();

      boolean done;

      synchronized (mux) {
        done = this.done;

        if (!done) lsnrs.add(lsnr);
      }

      if (done) {
        try {
          if (syncNotify) notifyListener(lsnr);
          else
            ctx.closure()
                .runLocalSafe(
                    new GPR() {
                      @Override
                      public void run() {
                        notifyListener(lsnr);
                      }
                    },
                    true);
        } catch (IllegalStateException ignore) {
          U.warn(
              null,
              "Future notification will not proceed because grid is stopped: " + ctx.gridName());
        }
      }
    }
  }
  /**
   * @param nodeId Sender node ID.
   * @param msg Finish transaction response.
   */
  private void processFinishResponse(UUID nodeId, GridDistributedTxFinishResponse<K, V> msg) {
    GridReplicatedTxCommitFuture<K, V> fut =
        (GridReplicatedTxCommitFuture<K, V>)
            ctx.mvcc().<GridCacheTx>future(msg.xid().id(), msg.futureId());

    if (fut != null) fut.onResult(nodeId);
    else U.warn(log, "Received finish response for unknown transaction: " + msg);
  }
Beispiel #9
0
  /**
   * @param nodeId Sender.
   * @param res Result.
   */
  void onResult(UUID nodeId, GridNearLockResponse<K, V> res) {
    if (!isDone()) {
      if (log.isDebugEnabled())
        log.debug(
            "Received lock response from node [nodeId="
                + nodeId
                + ", res="
                + res
                + ", fut="
                + this
                + ']');

      for (GridFuture<Boolean> fut : pending()) {
        if (isMini(fut)) {
          MiniFuture mini = (MiniFuture) fut;

          if (mini.futureId().equals(res.miniId())) {
            assert mini.node().id().equals(nodeId);

            if (log.isDebugEnabled())
              log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']');

            mini.onResult(res);

            if (log.isDebugEnabled())
              log.debug(
                  "Future after processed lock response [fut="
                      + this
                      + ", mini="
                      + mini
                      + ", res="
                      + res
                      + ']');

            return;
          }
        }
      }

      U.warn(
          log,
          "Failed to find mini future for response (perhaps due to stale message) [res="
              + res
              + ", fut="
              + this
              + ']');
    } else if (log.isDebugEnabled())
      log.debug(
          "Ignoring lock response from node (future is done) [nodeId="
              + nodeId
              + ", res="
              + res
              + ", fut="
              + this
              + ']');
  }
    /** {@inheritDoc} */
    @Override
    public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
      try {
        if (executor.isShutdown()) throw new RejectedExecutionException();
        else executor.getQueue().put(r);
      } catch (InterruptedException ignored) {
        U.warn(log, "Working thread was interrupted while loading data.");

        Thread.currentThread().interrupt();
      }
    }
  /**
   * Adds new participant to deployment.
   *
   * @param dep Shared deployment.
   * @param meta Request metadata.
   * @return {@code True} if participant was added.
   */
  private boolean addParticipant(SharedDeployment dep, GridDeploymentMetadata meta) {
    assert dep != null;
    assert meta != null;

    assert Thread.holdsLock(mux);

    if (!checkModeMatch(dep, meta)) return false;

    if (meta.participants() != null) {
      for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet()) {
        dep.addParticipant(e.getKey(), e.getValue().get1(), e.getValue().get2());

        if (log.isDebugEnabled())
          log.debug(
              "Added new participant [nodeId="
                  + e.getKey()
                  + ", clsLdrId="
                  + e.getValue().get1()
                  + ", seqNum="
                  + e.getValue().get2()
                  + ']');
      }
    }

    if (dep.deployMode() == CONTINUOUS || meta.participants() == null) {
      if (!dep.addParticipant(meta.senderNodeId(), meta.classLoaderId(), meta.sequenceNumber())) {
        U.warn(
            log,
            "Failed to create shared mode deployment "
                + "(requested class loader was already undeployed, did sender node leave grid?) "
                + "[clsLdrId="
                + meta.classLoaderId()
                + ", senderNodeId="
                + meta.senderNodeId()
                + ']');

        return false;
      }

      if (log.isDebugEnabled())
        log.debug(
            "Added new participant [nodeId="
                + meta.senderNodeId()
                + ", clsLdrId="
                + meta.classLoaderId()
                + ", seqNum="
                + meta.sequenceNumber()
                + ']');
    }

    return true;
  }
  /** {@inheritDoc} */
  @Override
  public void onDiscoveryDataReceived(UUID nodeId, UUID rmtNodeId, Serializable data) {
    Map<String, Serializable> discData = (Map<String, Serializable>) data;

    if (discData != null) {
      for (Map.Entry<String, Serializable> e : discData.entrySet()) {
        PluginProvider provider = plugins.get(e.getKey());

        if (provider != null) provider.receiveDiscoveryData(nodeId, e.getValue());
        else U.warn(log, "Received discovery data for unknown plugin: " + e.getKey());
      }
    }
  }
  /**
   * Checks if deployment modes match.
   *
   * @param dep Shared deployment.
   * @param meta Request metadata.
   * @return {@code True} if shared deployment modes match.
   */
  private boolean checkModeMatch(GridDeploymentInfo dep, GridDeploymentMetadata meta) {
    if (dep.deployMode() != meta.deploymentMode()) {
      U.warn(
          log,
          "Received invalid deployment mode (will not deploy, make sure that all nodes "
              + "executing the same classes in shared mode have identical GridDeploymentMode parameter) [mode="
              + meta.deploymentMode()
              + ", expected="
              + dep.deployMode()
              + ']');

      return false;
    }

    return true;
  }
  /**
   * Processes lock response.
   *
   * @param nodeId Sender node ID.
   * @param res Lock response.
   */
  private void processLockResponse(UUID nodeId, GridDistributedLockResponse<K, V> res) {
    GridReplicatedLockFuture<K, V> fut = futurex(res.lockId(), res.futureId());

    if (fut == null) {
      U.warn(log, "Received lock response for non-existing future (will ignore): " + res);
    } else {
      fut.onResult(nodeId, res);

      if (fut.isDone()) {
        ctx.mvcc().removeFuture(fut);

        if (log.isDebugEnabled())
          log.debug("Received all replies for future (future was removed): " + fut);
      }
    }
  }
Beispiel #15
0
    /** @param e Error. */
    void onResult(Throwable e) {
      if (rcvRes.compareAndSet(false, true)) {
        if (log.isDebugEnabled())
          log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']');

        // Fail.
        onDone(e);
      } else
        U.warn(
            log,
            "Received error after another result has been processed [fut="
                + GridNearLockFuture.this
                + ", mini="
                + this
                + ']',
            e);
    }
  private void dumpPendingObjects() {
    U.warn(
        log,
        "Failed to wait for partition release future. Dumping pending objects that might be the cause: "
            + cctx.localNodeId());

    U.warn(log, "Pending transactions:");

    for (IgniteInternalTx tx : cctx.tm().activeTransactions()) U.warn(log, ">>> " + tx);

    U.warn(log, "Pending explicit locks:");

    for (GridCacheExplicitLockSpan lockSpan : cctx.mvcc().activeExplicitLocks())
      U.warn(log, ">>> " + lockSpan);

    U.warn(log, "Pending cache futures:");

    for (GridCacheFuture<?> fut : cctx.mvcc().activeFutures()) U.warn(log, ">>> " + fut);

    U.warn(log, "Pending atomic cache futures:");

    for (GridCacheFuture<?> fut : cctx.mvcc().atomicFutures()) U.warn(log, ">>> " + fut);
  }
  /**
   * Send delete message to all meta cache nodes in the grid.
   *
   * @param msg Message to send.
   */
  private void sendDeleteMessage(IgfsDeleteMessage msg) {
    assert msg != null;

    Collection<ClusterNode> nodes = meta.metaCacheNodes();

    for (ClusterNode node : nodes) {
      try {
        igfsCtx.send(node, topic, msg, GridIoPolicy.SYSTEM_POOL);
      } catch (IgniteCheckedException e) {
        U.warn(
            log,
            "Failed to send IGFS delete message to node [nodeId="
                + node.id()
                + ", msg="
                + msg
                + ", err="
                + e.getMessage()
                + ']');
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public void close() {
    closed = true;

    U.closeQuiet(srvSock);

    if (gcWorker != null) {
      U.cancel(gcWorker);

      // This method may be called from already interrupted thread.
      // Need to ensure cleaning on close.
      boolean interrupted = Thread.interrupted();

      try {
        U.join(gcWorker);
      } catch (IgniteInterruptedCheckedException e) {
        U.warn(log, "Interrupted when stopping GC worker.", e);
      } finally {
        if (interrupted) Thread.currentThread().interrupt();
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public void start() throws GridException {
    if (ctx.config().isDaemon()) return;

    startSpi();

    if (enabled()) {
      getSpi()
          .setExternalCollisionListener(
              new GridCollisionExternalListener() {
                @Override
                public void onExternalCollision() {
                  GridCollisionExternalListener lsnr = extLsnr.get();

                  if (lsnr != null) lsnr.onExternalCollision();
                }
              });
    } else
      U.warn(log, "Collision resolution is disabled (all jobs will be activated upon arrival).");

    if (log.isDebugEnabled()) log.debug(startInfo());
  }
  /**
   * Notifies single listener.
   *
   * @param lsnr Listener.
   */
  private void notifyListener(GridInClosure<? super GridFuture<R>> lsnr) {
    assert lsnr != null;

    try {
      lsnr.apply(this);
    } catch (IllegalStateException ignore) {
      U.warn(
          null,
          "Failed to notify listener (grid is stopped) [grid="
              + ctx.gridName()
              + ", lsnr="
              + lsnr
              + ']');
    } catch (RuntimeException e) {
      U.error(log, "Failed to notify listener: " + lsnr, e);

      throw e;
    } catch (Error e) {
      U.error(log, "Failed to notify listener: " + lsnr, e);

      throw e;
    }
  }
  /** {@inheritDoc} */
  @Override
  public void execute(@Nullable GridProjection prj) throws GridException {
    if (cb == null)
      throw new IllegalStateException("Mandatory local callback is not set for the query: " + this);

    if (prj == null) prj = ctx.grid();

    prj = prj.forCache(ctx.name());

    if (prj.nodes().isEmpty())
      throw new GridTopologyException("Failed to execute query (projection is empty): " + this);

    GridCacheMode mode = ctx.config().getCacheMode();

    if (mode == LOCAL || mode == REPLICATED) {
      Collection<GridNode> nodes = prj.nodes();

      GridNode node = nodes.contains(ctx.localNode()) ? ctx.localNode() : F.rand(nodes);

      assert node != null;

      if (nodes.size() > 1 && !ctx.cache().isDrSystemCache()) {
        if (node.id().equals(ctx.localNodeId()))
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on local node. "
                  + "Will execute query locally: "
                  + this);
        else
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on single node. "
                  + "Will execute query on remote node [qry="
                  + this
                  + ", node="
                  + node
                  + ']');
      }

      prj = prj.forNode(node);
    }

    closeLock.lock();

    try {
      if (routineId != null)
        throw new IllegalStateException("Continuous query can't be executed twice.");

      guard.block();

      GridContinuousHandler hnd =
          new GridCacheContinuousQueryHandler<>(ctx.name(), topic, cb, filter, prjPred);

      routineId =
          ctx.kernalContext()
              .continuous()
              .startRoutine(hnd, bufSize, timeInterval, autoUnsubscribe, prj.predicate())
              .get();
    } finally {
      closeLock.unlock();
    }
  }
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
Beispiel #23
0
  /** {@inheritDoc} */
  @SuppressWarnings({"CatchGenericClass", "ThrowableInstanceNeverThrown"})
  @Override
  public void finish(boolean commit) throws GridException {
    if (log.isDebugEnabled())
      log.debug("Finishing near local tx [tx=" + this + ", commit=" + commit + "]");

    if (commit) {
      if (!state(COMMITTING)) {
        GridCacheTxState state = state();

        if (state != COMMITTING && state != COMMITTED)
          throw new GridException(
              "Invalid transaction state for commit [state=" + state() + ", tx=" + this + ']');
        else {
          if (log.isDebugEnabled())
            log.debug(
                "Invalid transaction state for commit (another thread is committing): " + this);

          return;
        }
      }
    } else {
      if (!state(ROLLING_BACK)) {
        if (log.isDebugEnabled())
          log.debug(
              "Invalid transaction state for rollback [state=" + state() + ", tx=" + this + ']');

        return;
      }
    }

    GridException err = null;

    // Commit to DB first. This way if there is a failure, transaction
    // won't be committed.
    try {
      if (commit && !isRollbackOnly()) userCommit();
      else userRollback();
    } catch (GridException e) {
      err = e;

      commit = false;

      // If heuristic error.
      if (!isRollbackOnly()) {
        invalidate = true;

        U.warn(
            log,
            "Set transaction invalidation flag to true due to error [tx="
                + this
                + ", err="
                + err
                + ']');
      }
    }

    if (err != null) {
      state(UNKNOWN);

      throw err;
    } else {
      if (!state(commit ? COMMITTED : ROLLED_BACK)) {
        state(UNKNOWN);

        throw new GridException("Invalid transaction state for commit or rollback: " + this);
      }
    }
  }
  /**
   * Initializes store.
   *
   * @throws GridException If failed to initialize.
   */
  private void init() throws GridException {
    if (initGuard.compareAndSet(false, true)) {
      if (log.isDebugEnabled()) log.debug("Initializing cache store.");

      try {
        if (sesFactory != null)
          // Session factory has been provided - nothing to do.
          return;

        if (!F.isEmpty(hibernateCfgPath)) {
          try {
            URL url = new URL(hibernateCfgPath);

            sesFactory = new Configuration().configure(url).buildSessionFactory();

            if (log.isDebugEnabled()) log.debug("Configured session factory using URL: " + url);

            // Session factory has been successfully initialized.
            return;
          } catch (MalformedURLException e) {
            if (log.isDebugEnabled())
              log.debug("Caught malformed URL exception: " + e.getMessage());
          }

          // Provided path is not a valid URL. File?
          File cfgFile = new File(hibernateCfgPath);

          if (cfgFile.exists()) {
            sesFactory = new Configuration().configure(cfgFile).buildSessionFactory();

            if (log.isDebugEnabled())
              log.debug("Configured session factory using file: " + hibernateCfgPath);

            // Session factory has been successfully initialized.
            return;
          }

          // Provided path is not a file. Classpath resource?
          sesFactory = new Configuration().configure(hibernateCfgPath).buildSessionFactory();

          if (log.isDebugEnabled())
            log.debug("Configured session factory using classpath resource: " + hibernateCfgPath);
        } else {
          if (hibernateProps == null) {
            U.warn(
                log, "No Hibernate configuration has been provided for store (will use default).");

            hibernateProps = new Properties();

            hibernateProps.setProperty("hibernate.connection.url", DFLT_CONN_URL);
            hibernateProps.setProperty("hibernate.show_sql", DFLT_SHOW_SQL);
            hibernateProps.setProperty("hibernate.hbm2ddl.auto", DFLT_HBM2DDL_AUTO);
          }

          Configuration cfg = new Configuration();

          cfg.setProperties(hibernateProps);

          assert resourceAvailable(MAPPING_RESOURCE);

          cfg.addResource(MAPPING_RESOURCE);

          sesFactory = cfg.buildSessionFactory();

          if (log.isDebugEnabled())
            log.debug("Configured session factory using properties: " + hibernateProps);
        }
      } catch (HibernateException e) {
        throw new GridException("Failed to initialize store.", e);
      } finally {
        initLatch.countDown();
      }
    } else if (initLatch.getCount() > 0) U.await(initLatch);

    if (sesFactory == null) throw new GridException("Cache store was not properly initialized.");
  }
  /** {@inheritDoc} */
  @Nullable
  @SuppressWarnings({"UnusedCatchParameter"})
  @Override
  public GridDeployment getDeployment(GridDeploymentMetadata meta) {
    GridDeployment dep;

    Class<?> cls = null;

    String alias = meta.alias();

    synchronized (mux) {
      // Validate metadata.
      assert meta.alias() != null;

      dep = getDeployment(meta.alias());

      if (dep != null) {
        if (log.isDebugEnabled()) {
          log.debug("Acquired deployment class from local cache: " + dep);
        }

        return dep;
      }

      GridDeploymentResource rsrc = spi.findResource(meta.alias());

      if (rsrc != null) {
        dep =
            deploy(
                ctx.config().getDeploymentMode(),
                rsrc.getClassLoader(),
                rsrc.getResourceClass(),
                alias);

        if (dep == null) {
          return null;
        }

        if (log.isDebugEnabled()) {
          log.debug("Acquired deployment class from SPI: " + dep);
        }
      }
      // Auto-deploy.
      else {
        ClassLoader ldr = meta.classLoader();

        if (ldr == null) {
          ldr = Thread.currentThread().getContextClassLoader();

          // Safety.
          if (ldr == null) {
            ldr = ctxLdr;
          }
        }

        // Don't auto-deploy locally in case of nested execution.
        if (ldr instanceof GridDeploymentClassLoader) {
          return null;
        }

        try {
          // Check that class can be loaded.
          cls = ldr.loadClass(meta.alias());

          spi.register(ldr, cls);

          rsrc = spi.findResource(alias);

          if (rsrc != null && rsrc.getResourceClass().equals(cls)) {
            if (log.isDebugEnabled()) {
              log.debug("Retrieved auto-loaded resource from spi: " + rsrc);
            }

            dep = deploy(ctx.config().getDeploymentMode(), ldr, cls, alias);

            if (dep == null) {
              return null;
            }
          } else {
            U.warn(
                log,
                "Failed to find resource from deployment SPI even after registering it: "
                    + meta.alias());

            return null;
          }
        } catch (ClassNotFoundException e) {
          if (log.isDebugEnabled()) {
            log.debug(
                "Failed to load class for local auto-deployment [ldr="
                    + ldr
                    + ", meta="
                    + meta
                    + ']');
          }

          return null;
        } catch (GridSpiException e) {
          U.error(log, "Failed to deploy local class: " + meta.alias(), e);

          return null;
        }
      }
    }

    if (cls != null) {
      recordDeploy(cls, alias, meta.isRecord());

      dep.addDeployedClass(cls, meta.className(), meta.alias());
    }

    if (log.isDebugEnabled()) {
      log.debug("Acquired deployment class: " + dep);
    }

    return dep;
  }
  /**
   * @param nodeId Primary node ID.
   * @param req Request.
   * @return Remote transaction.
   * @throws GridException If failed.
   * @throws GridDistributedLockCancelledException If lock has been cancelled.
   */
  @SuppressWarnings({"RedundantTypeArguments"})
  @Nullable
  public GridNearTxRemote<K, V> startRemoteTxForFinish(
      UUID nodeId, GridDhtTxFinishRequest<K, V> req)
      throws GridException, GridDistributedLockCancelledException {
    GridNearTxRemote<K, V> tx = null;

    ClassLoader ldr = ctx.deploy().globalLoader();

    if (ldr != null) {
      for (GridCacheTxEntry<K, V> txEntry : req.nearWrites()) {
        GridDistributedCacheEntry<K, V> entry = null;

        while (true) {
          try {
            entry = peekExx(txEntry.key());

            if (entry != null) {
              entry.keyBytes(txEntry.keyBytes());

              // Handle implicit locks for pessimistic transactions.
              tx = ctx.tm().tx(req.version());

              if (tx != null) {
                if (tx.local()) return null;

                if (tx.markFinalizing()) tx.addWrite(txEntry.key(), txEntry.keyBytes());
                else return null;
              } else {
                tx =
                    new GridNearTxRemote<K, V>(
                        nodeId,
                        req.nearNodeId(),
                        req.threadId(),
                        req.version(),
                        null,
                        PESSIMISTIC,
                        req.isolation(),
                        req.isInvalidate(),
                        0,
                        txEntry.key(),
                        txEntry.keyBytes(),
                        txEntry.value(),
                        txEntry.valueBytes(),
                        ctx);

                if (tx.empty()) return tx;

                tx = ctx.tm().onCreated(tx);

                if (tx == null || !ctx.tm().onStarted(tx))
                  throw new GridCacheTxRollbackException(
                      "Failed to acquire lock "
                          + "(transaction has been completed): "
                          + req.version());

                if (!tx.markFinalizing()) return null;
              }

              // Add remote candidate before reordering.
              if (txEntry.explicitVersion() == null)
                entry.addRemote(
                    req.nearNodeId(),
                    nodeId,
                    req.threadId(),
                    req.version(),
                    0,
                    tx.ec(),
                    /*tx*/ true,
                    tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  Collections.<GridCacheMvccCandidate<K>>emptyList(),
                  req.version(),
                  req.committedVersions(),
                  req.rolledbackVersions());
            }

            // Double-check in case if sender node left the grid.
            if (ctx.discovery().node(req.nearNodeId()) == null) {
              if (log.isDebugEnabled())
                log.debug("Node requesting lock left grid (lock request will be ignored): " + req);

              if (tx != null) tx.rollback();

              return null;
            }

            // Entry is legit.
            break;
          } catch (GridCacheEntryRemovedException ignored) {
            assert entry.obsoleteVersion() != null
                : "Obsolete flag not set on removed entry: " + entry;

            if (log.isDebugEnabled())
              log.debug("Received entry removed exception (will retry on renewed entry): " + entry);

            if (tx != null) {
              tx.clearEntry(entry.key());

              if (log.isDebugEnabled())
                log.debug(
                    "Cleared removed entry from remote transaction (will retry) [entry="
                        + entry
                        + ", tx="
                        + tx
                        + ']');
            }
          }
        }
      }
    } else {
      String err = "Failed to acquire deployment class loader for message: " + req;

      U.warn(log, err);

      throw new GridException(err);
    }

    return tx;
  }
  /**
   * @param nodeId Primary node ID.
   * @param req Request.
   * @return Remote transaction.
   * @throws GridException If failed.
   * @throws GridDistributedLockCancelledException If lock has been cancelled.
   */
  @SuppressWarnings({"RedundantTypeArguments"})
  @Nullable
  public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req)
      throws GridException, GridDistributedLockCancelledException {
    List<byte[]> nearKeyBytes = req.nearKeyBytes();

    GridNearTxRemote<K, V> tx = null;

    ClassLoader ldr = ctx.deploy().globalLoader();

    if (ldr != null) {
      for (int i = 0; i < nearKeyBytes.size(); i++) {
        byte[] bytes = nearKeyBytes.get(i);

        if (bytes == null) continue;

        K key = req.nearKeys().get(i);

        Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i);

        if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

        GridNearCacheEntry<K, V> entry = null;

        while (true) {
          try {
            entry = peekExx(key);

            if (entry != null) {
              entry.keyBytes(bytes);

              // Handle implicit locks for pessimistic transactions.
              if (req.inTx()) {
                tx = ctx.tm().tx(req.version());

                if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/);
                else {
                  tx =
                      new GridNearTxRemote<K, V>(
                          nodeId,
                          req.nearNodeId(),
                          req.threadId(),
                          req.version(),
                          null,
                          PESSIMISTIC,
                          req.isolation(),
                          req.isInvalidate(),
                          req.timeout(),
                          key,
                          bytes,
                          null, // Value.
                          null, // Value bytes.
                          ctx);

                  if (tx.empty()) return tx;

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + req.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  req.nodeId(),
                  nodeId,
                  req.threadId(),
                  req.version(),
                  req.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, req.version(), req.committedVersions(), req.rolledbackVersions());

              entry.orderOwned(req.version(), req.owned(entry.key()));
            }

            // Double-check in case if sender node left the grid.
            if (ctx.discovery().node(req.nodeId()) == null) {
              if (log.isDebugEnabled())
                log.debug("Node requesting lock left grid (lock request will be ignored): " + req);

              if (tx != null) tx.rollback();

              return null;
            }

            // Entry is legit.
            break;
          } catch (GridCacheEntryRemovedException ignored) {
            assert entry.obsoleteVersion() != null
                : "Obsolete flag not set on removed entry: " + entry;

            if (log.isDebugEnabled())
              log.debug("Received entry removed exception (will retry on renewed entry): " + entry);

            if (tx != null) {
              tx.clearEntry(entry.key());

              if (log.isDebugEnabled())
                log.debug(
                    "Cleared removed entry from remote transaction (will retry) [entry="
                        + entry
                        + ", tx="
                        + tx
                        + ']');
            }
          }
        }
      }
    } else {
      String err = "Failed to acquire deployment class loader for message: " + req;

      U.warn(log, err);

      throw new GridException(err);
    }

    return tx;
  }
    /**
     * @param entries Entries to submit.
     * @param curFut Current future.
     * @throws GridInterruptedException If interrupted.
     */
    private void submit(final List<Map.Entry<K, V>> entries, final GridFutureAdapter<Object> curFut)
        throws GridInterruptedException {
      assert entries != null;
      assert !entries.isEmpty();
      assert curFut != null;

      incrementActiveTasks();

      GridFuture<Object> fut;
      if (isLocNode) {
        fut =
            ctx.closure()
                .callLocalSafe(
                    new GridDataLoadUpdateJob<>(ctx, log, cacheName, entries, false, updater),
                    false);

        locFuts.add(fut);

        fut.listenAsync(
            new GridInClosure<GridFuture<Object>>() {
              @Override
              public void apply(GridFuture<Object> t) {
                try {
                  boolean rmv = locFuts.remove(t);

                  assert rmv;

                  curFut.onDone(t.get());
                } catch (GridException e) {
                  curFut.onDone(e);
                }
              }
            });
      } else {
        byte[] entriesBytes;

        try {
          entriesBytes = ctx.config().getMarshaller().marshal(entries);

          if (updaterBytes == null) {
            assert updater != null;

            updaterBytes = ctx.config().getMarshaller().marshal(updater);
          }

          if (topicBytes == null) topicBytes = ctx.config().getMarshaller().marshal(topic);
        } catch (GridException e) {
          U.error(log, "Failed to marshal (request will not be sent).", e);

          return;
        }

        GridDeployment dep = null;
        GridPeerDeployAware jobPda0 = null;

        if (ctx.deploy().enabled()) {
          try {
            jobPda0 = jobPda;

            assert jobPda0 != null;

            dep = ctx.deploy().deploy(jobPda0.deployClass(), jobPda0.classLoader());
          } catch (GridException e) {
            U.error(
                log,
                "Failed to deploy class (request will not be sent): " + jobPda0.deployClass(),
                e);

            return;
          }

          if (dep == null)
            U.warn(log, "Failed to deploy class (request will be sent): " + jobPda0.deployClass());
        }

        long reqId = idGen.incrementAndGet();

        fut = curFut;

        reqs.put(reqId, (GridFutureAdapter<Object>) fut);

        GridDataLoadRequest<Object, Object> req =
            new GridDataLoadRequest<>(
                reqId,
                topicBytes,
                cacheName,
                updaterBytes,
                entriesBytes,
                true,
                dep != null ? dep.deployMode() : null,
                dep != null ? jobPda0.deployClass().getName() : null,
                dep != null ? dep.userVersion() : null,
                dep != null ? dep.participants() : null,
                dep != null ? dep.classLoaderId() : null,
                dep == null);

        try {
          ctx.io().send(node, TOPIC_DATALOAD, req, PUBLIC_POOL);

          if (log.isDebugEnabled())
            log.debug("Sent request to node [nodeId=" + node.id() + ", req=" + req + ']');
        } catch (GridException e) {
          if (ctx.discovery().alive(node) && ctx.discovery().pingNode(node.id()))
            ((GridFutureAdapter<Object>) fut).onDone(e);
          else
            ((GridFutureAdapter<Object>) fut)
                .onDone(
                    new GridTopologyException(
                        "Failed to send " + "request (node has left): " + node.id()));
        }
      }
    }
  /**
   * @param nodeId Sender node ID.
   * @param req Finish transaction message.
   */
  @SuppressWarnings({"CatchGenericClass"})
  private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) {
    assert nodeId != null;
    assert req != null;

    GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version());

    try {
      ClassLoader ldr = ctx.deploy().globalLoader();

      if (req.commit()) {
        // If lock was acquired explicitly.
        if (tx == null) {
          // Create transaction and add entries.
          tx =
              ctx.tm()
                  .onCreated(
                      new GridReplicatedTxRemote<K, V>(
                          ldr,
                          nodeId,
                          req.threadId(),
                          req.version(),
                          req.commitVersion(),
                          PESSIMISTIC,
                          READ_COMMITTED,
                          req.isInvalidate(),
                          /*timeout */ 0,
                          /*read entries*/ null,
                          req.writes(),
                          ctx));

          if (tx == null || !ctx.tm().onStarted(tx))
            throw new GridCacheTxRollbackException(
                "Attempt to start a completed " + "transaction: " + req);
        } else {
          boolean set = tx.commitVersion(req.commitVersion());

          assert set;
        }

        Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes();

        if (!F.isEmpty(writeEntries)) {
          // In OPTIMISTIC mode, we get the values at PREPARE stage.
          assert tx.concurrency() == PESSIMISTIC;

          for (GridCacheTxEntry<K, V> entry : writeEntries) {
            // Unmarshal write entries.
            entry.unmarshal(ctx, ldr);

            if (log.isDebugEnabled())
              log.debug(
                  "Unmarshalled transaction entry from pessimistic transaction [key="
                      + entry.key()
                      + ", value="
                      + entry.value()
                      + ", tx="
                      + tx
                      + ']');

            if (!tx.setWriteValue(entry))
              U.warn(
                  log,
                  "Received entry to commit that was not present in transaction [entry="
                      + entry
                      + ", tx="
                      + tx
                      + ']');
          }
        }

        // Add completed versions.
        tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions());

        if (tx.pessimistic()) tx.prepare();

        tx.commit();
      } else if (tx != null) {
        tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions());

        tx.rollback();
      }

      if (req.replyRequired()) {
        GridCacheMessage<K, V> res =
            new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId());

        try {
          ctx.io().send(nodeId, res);
        } catch (Throwable e) {
          // Double-check.
          if (ctx.discovery().node(nodeId) == null) {
            if (log.isDebugEnabled())
              log.debug(
                  "Node left while sending finish response [nodeId="
                      + nodeId
                      + ", res="
                      + res
                      + ']');
          } else
            U.error(
                log,
                "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']',
                e);
        }
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Attempted to start a completed transaction (will ignore): " + e);
    } catch (Throwable e) {
      U.error(
          log,
          "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']',
          e);

      if (tx != null) tx.rollback();
    }
  }
  /**
   * Processes lock request.
   *
   * @param nodeId Sender node ID.
   * @param msg Lock request.
   */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) {
    assert !nodeId.equals(locNodeId);

    List<byte[]> keys = msg.keyBytes();

    int cnt = keys.size();

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedLockResponse res;

    ClassLoader ldr = null;

    try {
      ldr = ctx.deploy().globalLoader();

      if (ldr != null) {
        res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt);

        for (int i = 0; i < keys.size(); i++) {
          byte[] bytes = keys.get(i);
          K key = msg.keys().get(i);

          Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i);

          if (bytes == null) continue;

          if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

          GridDistributedCacheEntry<K, V> entry = null;

          while (true) {
            try {
              entry = entryexx(key);

              // Handle implicit locks for pessimistic transactions.
              if (msg.inTx()) {
                tx = ctx.tm().tx(msg.version());

                if (tx != null) {
                  if (msg.txRead()) tx.addRead(key, bytes);
                  else tx.addWrite(key, bytes);
                } else {
                  tx =
                      new GridReplicatedTxRemote<K, V>(
                          nodeId,
                          msg.threadId(),
                          msg.version(),
                          null,
                          PESSIMISTIC,
                          msg.isolation(),
                          msg.isInvalidate(),
                          msg.timeout(),
                          key,
                          bytes,
                          msg.txRead(),
                          ctx);

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + msg.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  msg.nodeId(),
                  null,
                  msg.threadId(),
                  msg.version(),
                  msg.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions());

              // Double-check in case if sender node left the grid.
              if (ctx.discovery().node(msg.nodeId()) == null) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Node requesting lock left grid (lock request will be ignored): " + msg);

                if (tx != null) tx.rollback();

                return;
              }

              res.setCandidates(
                  i,
                  entry.localCandidates(),
                  ctx.tm().committedVersions(msg.version()),
                  ctx.tm().rolledbackVersions(msg.version()));

              res.addValueBytes(
                  entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx);

              // Entry is legit.
              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsoleteVersion() != null
                  : "Obsolete flag not set on removed entry: " + entry;

              if (log.isDebugEnabled())
                log.debug(
                    "Received entry removed exception (will retry on renewed entry): " + entry);

              if (tx != null) {
                tx.clearEntry(entry.key());

                if (log.isDebugEnabled())
                  log.debug(
                      "Cleared removed entry from remote transaction (will retry) [entry="
                          + entry
                          + ", tx="
                          + tx
                          + ']');
              }
            }
          }
        }
      } else {
        String err = "Failed to acquire deployment class for message: " + msg;

        U.warn(log, err);

        res =
            new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err));
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Received lock request for completed transaction (will ignore): " + e);

      res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e);
    } catch (GridException e) {
      String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg;

      log.error(err, e);

      res =
          new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e));

      if (tx != null) tx.rollback();
    } catch (GridDistributedLockCancelledException ignored) {
      // Received lock request for cancelled lock.
      if (log.isDebugEnabled())
        log.debug("Received lock request for canceled lock (will ignore): " + msg);

      if (tx != null) tx.rollback();

      // Don't send response back.
      return;
    }

    GridNode node = ctx.discovery().node(msg.nodeId());

    boolean releaseAll = false;

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e);

        releaseAll = ldr != null;
      }
    }
    // If sender left grid, release all locks acquired so far.
    else releaseAll = ldr != null;

    // Release all locks because sender node left grid.
    if (releaseAll) {
      for (K key : msg.keys()) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId());

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock on removed entity during failure "
                      + "of replicated lock request handling (will retry): "
                      + entry);
          }
        }
      }

      U.warn(
          log, "Sender node left grid in the midst of lock acquisition (locks will be released).");
    }
  }