/**
   * @param nodeId Sender node ID.
   * @param msg Finish transaction response.
   */
  private void processFinishResponse(UUID nodeId, GridDistributedTxFinishResponse<K, V> msg) {
    GridReplicatedTxCommitFuture<K, V> fut =
        (GridReplicatedTxCommitFuture<K, V>)
            ctx.mvcc().<GridCacheTx>future(msg.xid().id(), msg.futureId());

    if (fut != null) fut.onResult(nodeId);
    else U.warn(log, "Received finish response for unknown transaction: " + msg);
  }
예제 #2
0
  /**
   * @param nodeId Sender.
   * @param res Result.
   */
  void onResult(UUID nodeId, GridNearLockResponse<K, V> res) {
    if (!isDone()) {
      if (log.isDebugEnabled())
        log.debug(
            "Received lock response from node [nodeId="
                + nodeId
                + ", res="
                + res
                + ", fut="
                + this
                + ']');

      for (GridFuture<Boolean> fut : pending()) {
        if (isMini(fut)) {
          MiniFuture mini = (MiniFuture) fut;

          if (mini.futureId().equals(res.miniId())) {
            assert mini.node().id().equals(nodeId);

            if (log.isDebugEnabled())
              log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']');

            mini.onResult(res);

            if (log.isDebugEnabled())
              log.debug(
                  "Future after processed lock response [fut="
                      + this
                      + ", mini="
                      + mini
                      + ", res="
                      + res
                      + ']');

            return;
          }
        }
      }

      U.warn(
          log,
          "Failed to find mini future for response (perhaps due to stale message) [res="
              + res
              + ", fut="
              + this
              + ']');
    } else if (log.isDebugEnabled())
      log.debug(
          "Ignoring lock response from node (future is done) [nodeId="
              + nodeId
              + ", res="
              + res
              + ", fut="
              + this
              + ']');
  }
  /**
   * Processes lock response.
   *
   * @param nodeId Sender node ID.
   * @param res Lock response.
   */
  private void processLockResponse(UUID nodeId, GridDistributedLockResponse<K, V> res) {
    GridReplicatedLockFuture<K, V> fut = futurex(res.lockId(), res.futureId());

    if (fut == null) {
      U.warn(log, "Received lock response for non-existing future (will ignore): " + res);
    } else {
      fut.onResult(nodeId, res);

      if (fut.isDone()) {
        ctx.mvcc().removeFuture(fut);

        if (log.isDebugEnabled())
          log.debug("Received all replies for future (future was removed): " + fut);
      }
    }
  }
예제 #4
0
    /** @param e Error. */
    void onResult(Throwable e) {
      if (rcvRes.compareAndSet(false, true)) {
        if (log.isDebugEnabled())
          log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']');

        // Fail.
        onDone(e);
      } else
        U.warn(
            log,
            "Received error after another result has been processed [fut="
                + GridNearLockFuture.this
                + ", mini="
                + this
                + ']',
            e);
    }
예제 #5
0
  /** {@inheritDoc} */
  @SuppressWarnings({"CatchGenericClass", "ThrowableInstanceNeverThrown"})
  @Override
  public void finish(boolean commit) throws GridException {
    if (log.isDebugEnabled())
      log.debug("Finishing near local tx [tx=" + this + ", commit=" + commit + "]");

    if (commit) {
      if (!state(COMMITTING)) {
        GridCacheTxState state = state();

        if (state != COMMITTING && state != COMMITTED)
          throw new GridException(
              "Invalid transaction state for commit [state=" + state() + ", tx=" + this + ']');
        else {
          if (log.isDebugEnabled())
            log.debug(
                "Invalid transaction state for commit (another thread is committing): " + this);

          return;
        }
      }
    } else {
      if (!state(ROLLING_BACK)) {
        if (log.isDebugEnabled())
          log.debug(
              "Invalid transaction state for rollback [state=" + state() + ", tx=" + this + ']');

        return;
      }
    }

    GridException err = null;

    // Commit to DB first. This way if there is a failure, transaction
    // won't be committed.
    try {
      if (commit && !isRollbackOnly()) userCommit();
      else userRollback();
    } catch (GridException e) {
      err = e;

      commit = false;

      // If heuristic error.
      if (!isRollbackOnly()) {
        invalidate = true;

        U.warn(
            log,
            "Set transaction invalidation flag to true due to error [tx="
                + this
                + ", err="
                + err
                + ']');
      }
    }

    if (err != null) {
      state(UNKNOWN);

      throw err;
    } else {
      if (!state(commit ? COMMITTED : ROLLED_BACK)) {
        state(UNKNOWN);

        throw new GridException("Invalid transaction state for commit or rollback: " + this);
      }
    }
  }
  /**
   * @param nodeId Primary node ID.
   * @param req Request.
   * @return Remote transaction.
   * @throws GridException If failed.
   * @throws GridDistributedLockCancelledException If lock has been cancelled.
   */
  @SuppressWarnings({"RedundantTypeArguments"})
  @Nullable
  public GridNearTxRemote<K, V> startRemoteTxForFinish(
      UUID nodeId, GridDhtTxFinishRequest<K, V> req)
      throws GridException, GridDistributedLockCancelledException {
    GridNearTxRemote<K, V> tx = null;

    ClassLoader ldr = ctx.deploy().globalLoader();

    if (ldr != null) {
      for (GridCacheTxEntry<K, V> txEntry : req.nearWrites()) {
        GridDistributedCacheEntry<K, V> entry = null;

        while (true) {
          try {
            entry = peekExx(txEntry.key());

            if (entry != null) {
              entry.keyBytes(txEntry.keyBytes());

              // Handle implicit locks for pessimistic transactions.
              tx = ctx.tm().tx(req.version());

              if (tx != null) {
                if (tx.local()) return null;

                if (tx.markFinalizing()) tx.addWrite(txEntry.key(), txEntry.keyBytes());
                else return null;
              } else {
                tx =
                    new GridNearTxRemote<K, V>(
                        nodeId,
                        req.nearNodeId(),
                        req.threadId(),
                        req.version(),
                        null,
                        PESSIMISTIC,
                        req.isolation(),
                        req.isInvalidate(),
                        0,
                        txEntry.key(),
                        txEntry.keyBytes(),
                        txEntry.value(),
                        txEntry.valueBytes(),
                        ctx);

                if (tx.empty()) return tx;

                tx = ctx.tm().onCreated(tx);

                if (tx == null || !ctx.tm().onStarted(tx))
                  throw new GridCacheTxRollbackException(
                      "Failed to acquire lock "
                          + "(transaction has been completed): "
                          + req.version());

                if (!tx.markFinalizing()) return null;
              }

              // Add remote candidate before reordering.
              if (txEntry.explicitVersion() == null)
                entry.addRemote(
                    req.nearNodeId(),
                    nodeId,
                    req.threadId(),
                    req.version(),
                    0,
                    tx.ec(),
                    /*tx*/ true,
                    tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  Collections.<GridCacheMvccCandidate<K>>emptyList(),
                  req.version(),
                  req.committedVersions(),
                  req.rolledbackVersions());
            }

            // Double-check in case if sender node left the grid.
            if (ctx.discovery().node(req.nearNodeId()) == null) {
              if (log.isDebugEnabled())
                log.debug("Node requesting lock left grid (lock request will be ignored): " + req);

              if (tx != null) tx.rollback();

              return null;
            }

            // Entry is legit.
            break;
          } catch (GridCacheEntryRemovedException ignored) {
            assert entry.obsoleteVersion() != null
                : "Obsolete flag not set on removed entry: " + entry;

            if (log.isDebugEnabled())
              log.debug("Received entry removed exception (will retry on renewed entry): " + entry);

            if (tx != null) {
              tx.clearEntry(entry.key());

              if (log.isDebugEnabled())
                log.debug(
                    "Cleared removed entry from remote transaction (will retry) [entry="
                        + entry
                        + ", tx="
                        + tx
                        + ']');
            }
          }
        }
      }
    } else {
      String err = "Failed to acquire deployment class loader for message: " + req;

      U.warn(log, err);

      throw new GridException(err);
    }

    return tx;
  }
  /**
   * @param nodeId Primary node ID.
   * @param req Request.
   * @return Remote transaction.
   * @throws GridException If failed.
   * @throws GridDistributedLockCancelledException If lock has been cancelled.
   */
  @SuppressWarnings({"RedundantTypeArguments"})
  @Nullable
  public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req)
      throws GridException, GridDistributedLockCancelledException {
    List<byte[]> nearKeyBytes = req.nearKeyBytes();

    GridNearTxRemote<K, V> tx = null;

    ClassLoader ldr = ctx.deploy().globalLoader();

    if (ldr != null) {
      for (int i = 0; i < nearKeyBytes.size(); i++) {
        byte[] bytes = nearKeyBytes.get(i);

        if (bytes == null) continue;

        K key = req.nearKeys().get(i);

        Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i);

        if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

        GridNearCacheEntry<K, V> entry = null;

        while (true) {
          try {
            entry = peekExx(key);

            if (entry != null) {
              entry.keyBytes(bytes);

              // Handle implicit locks for pessimistic transactions.
              if (req.inTx()) {
                tx = ctx.tm().tx(req.version());

                if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/);
                else {
                  tx =
                      new GridNearTxRemote<K, V>(
                          nodeId,
                          req.nearNodeId(),
                          req.threadId(),
                          req.version(),
                          null,
                          PESSIMISTIC,
                          req.isolation(),
                          req.isInvalidate(),
                          req.timeout(),
                          key,
                          bytes,
                          null, // Value.
                          null, // Value bytes.
                          ctx);

                  if (tx.empty()) return tx;

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + req.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  req.nodeId(),
                  nodeId,
                  req.threadId(),
                  req.version(),
                  req.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, req.version(), req.committedVersions(), req.rolledbackVersions());

              entry.orderOwned(req.version(), req.owned(entry.key()));
            }

            // Double-check in case if sender node left the grid.
            if (ctx.discovery().node(req.nodeId()) == null) {
              if (log.isDebugEnabled())
                log.debug("Node requesting lock left grid (lock request will be ignored): " + req);

              if (tx != null) tx.rollback();

              return null;
            }

            // Entry is legit.
            break;
          } catch (GridCacheEntryRemovedException ignored) {
            assert entry.obsoleteVersion() != null
                : "Obsolete flag not set on removed entry: " + entry;

            if (log.isDebugEnabled())
              log.debug("Received entry removed exception (will retry on renewed entry): " + entry);

            if (tx != null) {
              tx.clearEntry(entry.key());

              if (log.isDebugEnabled())
                log.debug(
                    "Cleared removed entry from remote transaction (will retry) [entry="
                        + entry
                        + ", tx="
                        + tx
                        + ']');
            }
          }
        }
      }
    } else {
      String err = "Failed to acquire deployment class loader for message: " + req;

      U.warn(log, err);

      throw new GridException(err);
    }

    return tx;
  }
예제 #8
0
    /**
     * @param entries Entries to submit.
     * @param curFut Current future.
     * @throws GridInterruptedException If interrupted.
     */
    private void submit(final List<Map.Entry<K, V>> entries, final GridFutureAdapter<Object> curFut)
        throws GridInterruptedException {
      assert entries != null;
      assert !entries.isEmpty();
      assert curFut != null;

      incrementActiveTasks();

      GridFuture<Object> fut;
      if (isLocNode) {
        fut =
            ctx.closure()
                .callLocalSafe(
                    new GridDataLoadUpdateJob<>(ctx, log, cacheName, entries, false, updater),
                    false);

        locFuts.add(fut);

        fut.listenAsync(
            new GridInClosure<GridFuture<Object>>() {
              @Override
              public void apply(GridFuture<Object> t) {
                try {
                  boolean rmv = locFuts.remove(t);

                  assert rmv;

                  curFut.onDone(t.get());
                } catch (GridException e) {
                  curFut.onDone(e);
                }
              }
            });
      } else {
        byte[] entriesBytes;

        try {
          entriesBytes = ctx.config().getMarshaller().marshal(entries);

          if (updaterBytes == null) {
            assert updater != null;

            updaterBytes = ctx.config().getMarshaller().marshal(updater);
          }

          if (topicBytes == null) topicBytes = ctx.config().getMarshaller().marshal(topic);
        } catch (GridException e) {
          U.error(log, "Failed to marshal (request will not be sent).", e);

          return;
        }

        GridDeployment dep = null;
        GridPeerDeployAware jobPda0 = null;

        if (ctx.deploy().enabled()) {
          try {
            jobPda0 = jobPda;

            assert jobPda0 != null;

            dep = ctx.deploy().deploy(jobPda0.deployClass(), jobPda0.classLoader());
          } catch (GridException e) {
            U.error(
                log,
                "Failed to deploy class (request will not be sent): " + jobPda0.deployClass(),
                e);

            return;
          }

          if (dep == null)
            U.warn(log, "Failed to deploy class (request will be sent): " + jobPda0.deployClass());
        }

        long reqId = idGen.incrementAndGet();

        fut = curFut;

        reqs.put(reqId, (GridFutureAdapter<Object>) fut);

        GridDataLoadRequest<Object, Object> req =
            new GridDataLoadRequest<>(
                reqId,
                topicBytes,
                cacheName,
                updaterBytes,
                entriesBytes,
                true,
                dep != null ? dep.deployMode() : null,
                dep != null ? jobPda0.deployClass().getName() : null,
                dep != null ? dep.userVersion() : null,
                dep != null ? dep.participants() : null,
                dep != null ? dep.classLoaderId() : null,
                dep == null);

        try {
          ctx.io().send(node, TOPIC_DATALOAD, req, PUBLIC_POOL);

          if (log.isDebugEnabled())
            log.debug("Sent request to node [nodeId=" + node.id() + ", req=" + req + ']');
        } catch (GridException e) {
          if (ctx.discovery().alive(node) && ctx.discovery().pingNode(node.id()))
            ((GridFutureAdapter<Object>) fut).onDone(e);
          else
            ((GridFutureAdapter<Object>) fut)
                .onDone(
                    new GridTopologyException(
                        "Failed to send " + "request (node has left): " + node.id()));
        }
      }
    }
  /**
   * @param nodeId Sender node ID.
   * @param req Finish transaction message.
   */
  @SuppressWarnings({"CatchGenericClass"})
  private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) {
    assert nodeId != null;
    assert req != null;

    GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version());

    try {
      ClassLoader ldr = ctx.deploy().globalLoader();

      if (req.commit()) {
        // If lock was acquired explicitly.
        if (tx == null) {
          // Create transaction and add entries.
          tx =
              ctx.tm()
                  .onCreated(
                      new GridReplicatedTxRemote<K, V>(
                          ldr,
                          nodeId,
                          req.threadId(),
                          req.version(),
                          req.commitVersion(),
                          PESSIMISTIC,
                          READ_COMMITTED,
                          req.isInvalidate(),
                          /*timeout */ 0,
                          /*read entries*/ null,
                          req.writes(),
                          ctx));

          if (tx == null || !ctx.tm().onStarted(tx))
            throw new GridCacheTxRollbackException(
                "Attempt to start a completed " + "transaction: " + req);
        } else {
          boolean set = tx.commitVersion(req.commitVersion());

          assert set;
        }

        Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes();

        if (!F.isEmpty(writeEntries)) {
          // In OPTIMISTIC mode, we get the values at PREPARE stage.
          assert tx.concurrency() == PESSIMISTIC;

          for (GridCacheTxEntry<K, V> entry : writeEntries) {
            // Unmarshal write entries.
            entry.unmarshal(ctx, ldr);

            if (log.isDebugEnabled())
              log.debug(
                  "Unmarshalled transaction entry from pessimistic transaction [key="
                      + entry.key()
                      + ", value="
                      + entry.value()
                      + ", tx="
                      + tx
                      + ']');

            if (!tx.setWriteValue(entry))
              U.warn(
                  log,
                  "Received entry to commit that was not present in transaction [entry="
                      + entry
                      + ", tx="
                      + tx
                      + ']');
          }
        }

        // Add completed versions.
        tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions());

        if (tx.pessimistic()) tx.prepare();

        tx.commit();
      } else if (tx != null) {
        tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions());

        tx.rollback();
      }

      if (req.replyRequired()) {
        GridCacheMessage<K, V> res =
            new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId());

        try {
          ctx.io().send(nodeId, res);
        } catch (Throwable e) {
          // Double-check.
          if (ctx.discovery().node(nodeId) == null) {
            if (log.isDebugEnabled())
              log.debug(
                  "Node left while sending finish response [nodeId="
                      + nodeId
                      + ", res="
                      + res
                      + ']');
          } else
            U.error(
                log,
                "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']',
                e);
        }
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Attempted to start a completed transaction (will ignore): " + e);
    } catch (Throwable e) {
      U.error(
          log,
          "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']',
          e);

      if (tx != null) tx.rollback();
    }
  }
  /**
   * Processes lock request.
   *
   * @param nodeId Sender node ID.
   * @param msg Lock request.
   */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) {
    assert !nodeId.equals(locNodeId);

    List<byte[]> keys = msg.keyBytes();

    int cnt = keys.size();

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedLockResponse res;

    ClassLoader ldr = null;

    try {
      ldr = ctx.deploy().globalLoader();

      if (ldr != null) {
        res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt);

        for (int i = 0; i < keys.size(); i++) {
          byte[] bytes = keys.get(i);
          K key = msg.keys().get(i);

          Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i);

          if (bytes == null) continue;

          if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

          GridDistributedCacheEntry<K, V> entry = null;

          while (true) {
            try {
              entry = entryexx(key);

              // Handle implicit locks for pessimistic transactions.
              if (msg.inTx()) {
                tx = ctx.tm().tx(msg.version());

                if (tx != null) {
                  if (msg.txRead()) tx.addRead(key, bytes);
                  else tx.addWrite(key, bytes);
                } else {
                  tx =
                      new GridReplicatedTxRemote<K, V>(
                          nodeId,
                          msg.threadId(),
                          msg.version(),
                          null,
                          PESSIMISTIC,
                          msg.isolation(),
                          msg.isInvalidate(),
                          msg.timeout(),
                          key,
                          bytes,
                          msg.txRead(),
                          ctx);

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + msg.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  msg.nodeId(),
                  null,
                  msg.threadId(),
                  msg.version(),
                  msg.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions());

              // Double-check in case if sender node left the grid.
              if (ctx.discovery().node(msg.nodeId()) == null) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Node requesting lock left grid (lock request will be ignored): " + msg);

                if (tx != null) tx.rollback();

                return;
              }

              res.setCandidates(
                  i,
                  entry.localCandidates(),
                  ctx.tm().committedVersions(msg.version()),
                  ctx.tm().rolledbackVersions(msg.version()));

              res.addValueBytes(
                  entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx);

              // Entry is legit.
              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsoleteVersion() != null
                  : "Obsolete flag not set on removed entry: " + entry;

              if (log.isDebugEnabled())
                log.debug(
                    "Received entry removed exception (will retry on renewed entry): " + entry);

              if (tx != null) {
                tx.clearEntry(entry.key());

                if (log.isDebugEnabled())
                  log.debug(
                      "Cleared removed entry from remote transaction (will retry) [entry="
                          + entry
                          + ", tx="
                          + tx
                          + ']');
              }
            }
          }
        }
      } else {
        String err = "Failed to acquire deployment class for message: " + msg;

        U.warn(log, err);

        res =
            new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err));
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Received lock request for completed transaction (will ignore): " + e);

      res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e);
    } catch (GridException e) {
      String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg;

      log.error(err, e);

      res =
          new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e));

      if (tx != null) tx.rollback();
    } catch (GridDistributedLockCancelledException ignored) {
      // Received lock request for cancelled lock.
      if (log.isDebugEnabled())
        log.debug("Received lock request for canceled lock (will ignore): " + msg);

      if (tx != null) tx.rollback();

      // Don't send response back.
      return;
    }

    GridNode node = ctx.discovery().node(msg.nodeId());

    boolean releaseAll = false;

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e);

        releaseAll = ldr != null;
      }
    }
    // If sender left grid, release all locks acquired so far.
    else releaseAll = ldr != null;

    // Release all locks because sender node left grid.
    if (releaseAll) {
      for (K key : msg.keys()) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId());

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock on removed entity during failure "
                      + "of replicated lock request handling (will retry): "
                      + entry);
          }
        }
      }

      U.warn(
          log, "Sender node left grid in the midst of lock acquisition (locks will be released).");
    }
  }