/**
   * @param cctx Context.
   * @param tx Transaction.
   * @param failedNodeId ID of failed node started transaction.
   */
  @SuppressWarnings("ConstantConditions")
  public GridCachePessimisticCheckCommittedTxFuture(
      GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) {
    super(cctx.kernalContext(), new SingleReducer<K, V>());

    this.cctx = cctx;
    this.tx = tx;
    this.failedNodeId = failedNodeId;

    log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class);

    nodes = new GridLeanMap<>();

    for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node);
  }
Exemplo n.º 2
0
    /** @param e Node left exception. */
    void onResult(GridTopologyException e) {
      if (isDone()) return;

      if (rcvRes.compareAndSet(false, true)) {
        if (log.isDebugEnabled())
          log.debug(
              "Remote node left grid while sending or waiting for reply (will fail): " + this);

        if (tx != null) tx.removeMapping(node.id());

        // Primary node left the grid, so fail the future.
        GridNearLockFuture.this.onDone(newTopologyException(e, node.id()));

        onDone(true);
      }
    }
Exemplo n.º 3
0
  /**
   * @param mapping Mappings.
   * @param key Key to map.
   * @param topVer Topology version.
   * @return Near lock mapping.
   * @throws GridException If mapping for key failed.
   */
  private GridNearLockMapping<K, V> map(
      K key, @Nullable GridNearLockMapping<K, V> mapping, long topVer) throws GridException {
    assert mapping == null || mapping.node() != null;

    GridNode primary = cctx.affinity().primary(key, topVer);

    if (cctx.discovery().node(primary.id()) == null)
      // If primary node left the grid before lock acquisition, fail the whole future.
      throw newTopologyException(null, primary.id());

    if (inTx() && tx.groupLock() && !primary.isLocal())
      throw new GridException(
          "Failed to start group lock transaction (local node is not primary for "
              + " key) [key="
              + key
              + ", primaryNodeId="
              + primary.id()
              + ']');

    if (mapping == null || !primary.id().equals(mapping.node().id()))
      mapping = new GridNearLockMapping<>(primary, key);
    else mapping.addKey(key);

    return mapping;
  }
Exemplo n.º 4
0
    void onNodeLeft() {
      assert !isLocNode;
      assert bufMappings.get(node.id()) != this;

      if (log.isDebugEnabled())
        log.debug("Forcibly completing futures (node has left): " + node.id());

      Exception e =
          new GridTopologyException(
              "Failed to wait for request completion " + "(node has left): " + node.id());

      for (GridFutureAdapter<Object> f : reqs.values()) f.onDone(e);

      // Make sure to complete current future.
      GridFutureAdapter<Object> curFut0;

      synchronized (this) {
        curFut0 = curFut;
      }

      curFut0.onDone(e);
    }
Exemplo n.º 5
0
    /** @param node Node. */
    Buffer(GridNode node) {
      assert node != null;

      this.node = node;

      locFuts = new GridConcurrentHashSet<>();
      reqs = new ConcurrentHashMap8<>();

      // Cache local node flag.
      isLocNode = node.equals(ctx.discovery().localNode());

      entries = newEntries();
      curFut = new GridFutureAdapter<>(ctx);
      curFut.listenAsync(signalC);

      sem = new Semaphore(parallelOps);
    }
  /** {@inheritDoc} */
  @Override
  public void execute(@Nullable GridProjection prj) throws GridException {
    if (cb == null)
      throw new IllegalStateException("Mandatory local callback is not set for the query: " + this);

    if (prj == null) prj = ctx.grid();

    prj = prj.forCache(ctx.name());

    if (prj.nodes().isEmpty())
      throw new GridTopologyException("Failed to execute query (projection is empty): " + this);

    GridCacheMode mode = ctx.config().getCacheMode();

    if (mode == LOCAL || mode == REPLICATED) {
      Collection<GridNode> nodes = prj.nodes();

      GridNode node = nodes.contains(ctx.localNode()) ? ctx.localNode() : F.rand(nodes);

      assert node != null;

      if (nodes.size() > 1 && !ctx.cache().isDrSystemCache()) {
        if (node.id().equals(ctx.localNodeId()))
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on local node. "
                  + "Will execute query locally: "
                  + this);
        else
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on single node. "
                  + "Will execute query on remote node [qry="
                  + this
                  + ", node="
                  + node
                  + ']');
      }

      prj = prj.forNode(node);
    }

    closeLock.lock();

    try {
      if (routineId != null)
        throw new IllegalStateException("Continuous query can't be executed twice.");

      guard.block();

      GridContinuousHandler hnd =
          new GridCacheContinuousQueryHandler<>(ctx.name(), topic, cb, filter, prjPred);

      routineId =
          ctx.kernalContext()
              .continuous()
              .startRoutine(hnd, bufSize, timeInterval, autoUnsubscribe, prj.predicate())
              .get();
    } finally {
      closeLock.unlock();
    }
  }
Exemplo n.º 7
0
    /**
     * @param entries Entries to submit.
     * @param curFut Current future.
     * @throws GridInterruptedException If interrupted.
     */
    private void submit(final List<Map.Entry<K, V>> entries, final GridFutureAdapter<Object> curFut)
        throws GridInterruptedException {
      assert entries != null;
      assert !entries.isEmpty();
      assert curFut != null;

      incrementActiveTasks();

      GridFuture<Object> fut;
      if (isLocNode) {
        fut =
            ctx.closure()
                .callLocalSafe(
                    new GridDataLoadUpdateJob<>(ctx, log, cacheName, entries, false, updater),
                    false);

        locFuts.add(fut);

        fut.listenAsync(
            new GridInClosure<GridFuture<Object>>() {
              @Override
              public void apply(GridFuture<Object> t) {
                try {
                  boolean rmv = locFuts.remove(t);

                  assert rmv;

                  curFut.onDone(t.get());
                } catch (GridException e) {
                  curFut.onDone(e);
                }
              }
            });
      } else {
        byte[] entriesBytes;

        try {
          entriesBytes = ctx.config().getMarshaller().marshal(entries);

          if (updaterBytes == null) {
            assert updater != null;

            updaterBytes = ctx.config().getMarshaller().marshal(updater);
          }

          if (topicBytes == null) topicBytes = ctx.config().getMarshaller().marshal(topic);
        } catch (GridException e) {
          U.error(log, "Failed to marshal (request will not be sent).", e);

          return;
        }

        GridDeployment dep = null;
        GridPeerDeployAware jobPda0 = null;

        if (ctx.deploy().enabled()) {
          try {
            jobPda0 = jobPda;

            assert jobPda0 != null;

            dep = ctx.deploy().deploy(jobPda0.deployClass(), jobPda0.classLoader());
          } catch (GridException e) {
            U.error(
                log,
                "Failed to deploy class (request will not be sent): " + jobPda0.deployClass(),
                e);

            return;
          }

          if (dep == null)
            U.warn(log, "Failed to deploy class (request will be sent): " + jobPda0.deployClass());
        }

        long reqId = idGen.incrementAndGet();

        fut = curFut;

        reqs.put(reqId, (GridFutureAdapter<Object>) fut);

        GridDataLoadRequest<Object, Object> req =
            new GridDataLoadRequest<>(
                reqId,
                topicBytes,
                cacheName,
                updaterBytes,
                entriesBytes,
                true,
                dep != null ? dep.deployMode() : null,
                dep != null ? jobPda0.deployClass().getName() : null,
                dep != null ? dep.userVersion() : null,
                dep != null ? dep.participants() : null,
                dep != null ? dep.classLoaderId() : null,
                dep == null);

        try {
          ctx.io().send(node, TOPIC_DATALOAD, req, PUBLIC_POOL);

          if (log.isDebugEnabled())
            log.debug("Sent request to node [nodeId=" + node.id() + ", req=" + req + ']');
        } catch (GridException e) {
          if (ctx.discovery().alive(node) && ctx.discovery().pingNode(node.id()))
            ((GridFutureAdapter<Object>) fut).onDone(e);
          else
            ((GridFutureAdapter<Object>) fut)
                .onDone(
                    new GridTopologyException(
                        "Failed to send " + "request (node has left): " + node.id()));
        }
      }
    }
  /**
   * @param nodeId Sender node ID.
   * @param msg Prepare request.
   */
  @SuppressWarnings({"InstanceofCatchParameter"})
  private void processPrepareRequest(UUID nodeId, GridDistributedTxPrepareRequest<K, V> msg) {
    assert nodeId != null;
    assert msg != null;

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedTxPrepareResponse<K, V> res;

    try {
      tx =
          new GridReplicatedTxRemote<K, V>(
              ctx.deploy().globalLoader(),
              nodeId,
              msg.threadId(),
              msg.version(),
              msg.commitVersion(),
              msg.concurrency(),
              msg.isolation(),
              msg.isInvalidate(),
              msg.timeout(),
              msg.reads(),
              msg.writes(),
              ctx);

      tx = ctx.tm().onCreated(tx);

      if (tx == null || !ctx.tm().onStarted(tx))
        throw new GridCacheTxRollbackException("Attempt to start a completed transaction: " + tx);

      // Prepare prior to reordering, so the pending locks added
      // in prepare phase will get properly ordered as well.
      tx.prepare();

      // Add remote candidates and reorder completed and uncompleted versions.
      tx.addRemoteCandidates(
          msg.candidatesByKey(), msg.committedVersions(), msg.rolledbackVersions());

      if (msg.concurrency() == EVENTUALLY_CONSISTENT) {
        if (log.isDebugEnabled()) log.debug("Committing transaction during remote prepare: " + tx);

        tx.commit();

        if (log.isDebugEnabled()) log.debug("Committed transaction during remote prepare: " + tx);

        // Don't send response.
        return;
      }

      res = new GridDistributedTxPrepareResponse<K, V>(msg.version());

      Map<K, Collection<GridCacheMvccCandidate<K>>> cands = tx.localCandidates();

      // Add local candidates (completed version must be set below).
      res.candidates(cands);
    } catch (GridException e) {
      if (e instanceof GridCacheTxRollbackException) {
        if (log.isDebugEnabled())
          log.debug("Transaction was rolled back before prepare completed: " + tx);
      } else if (e instanceof GridCacheTxOptimisticException) {
        if (log.isDebugEnabled())
          log.debug("Optimistic failure for remote transaction (will rollback): " + tx);
      } else {
        U.error(log, "Failed to process prepare request: " + msg, e);
      }

      if (tx != null)
        // Automatically rollback remote transactions.
        tx.rollback();

      // Don't send response.
      if (msg.concurrency() == EVENTUALLY_CONSISTENT) return;

      res = new GridDistributedTxPrepareResponse<K, V>(msg.version());

      res.error(e);
    }

    // Add completed versions.
    res.completedVersions(
        ctx.tm().committedVersions(msg.version()), ctx.tm().rolledbackVersions(msg.version()));

    assert msg.concurrency() != EVENTUALLY_CONSISTENT;

    GridNode node = ctx.discovery().node(nodeId);

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send tx response to node (did the node leave grid?) [node="
                + node.id()
                + ", msg="
                + res
                + ']',
            e);

        if (tx != null) tx.rollback();
      }
    }
  }
  /**
   * Processes lock request.
   *
   * @param nodeId Sender node ID.
   * @param msg Lock request.
   */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) {
    assert !nodeId.equals(locNodeId);

    List<byte[]> keys = msg.keyBytes();

    int cnt = keys.size();

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedLockResponse res;

    ClassLoader ldr = null;

    try {
      ldr = ctx.deploy().globalLoader();

      if (ldr != null) {
        res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt);

        for (int i = 0; i < keys.size(); i++) {
          byte[] bytes = keys.get(i);
          K key = msg.keys().get(i);

          Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i);

          if (bytes == null) continue;

          if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

          GridDistributedCacheEntry<K, V> entry = null;

          while (true) {
            try {
              entry = entryexx(key);

              // Handle implicit locks for pessimistic transactions.
              if (msg.inTx()) {
                tx = ctx.tm().tx(msg.version());

                if (tx != null) {
                  if (msg.txRead()) tx.addRead(key, bytes);
                  else tx.addWrite(key, bytes);
                } else {
                  tx =
                      new GridReplicatedTxRemote<K, V>(
                          nodeId,
                          msg.threadId(),
                          msg.version(),
                          null,
                          PESSIMISTIC,
                          msg.isolation(),
                          msg.isInvalidate(),
                          msg.timeout(),
                          key,
                          bytes,
                          msg.txRead(),
                          ctx);

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + msg.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  msg.nodeId(),
                  null,
                  msg.threadId(),
                  msg.version(),
                  msg.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions());

              // Double-check in case if sender node left the grid.
              if (ctx.discovery().node(msg.nodeId()) == null) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Node requesting lock left grid (lock request will be ignored): " + msg);

                if (tx != null) tx.rollback();

                return;
              }

              res.setCandidates(
                  i,
                  entry.localCandidates(),
                  ctx.tm().committedVersions(msg.version()),
                  ctx.tm().rolledbackVersions(msg.version()));

              res.addValueBytes(
                  entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx);

              // Entry is legit.
              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsoleteVersion() != null
                  : "Obsolete flag not set on removed entry: " + entry;

              if (log.isDebugEnabled())
                log.debug(
                    "Received entry removed exception (will retry on renewed entry): " + entry);

              if (tx != null) {
                tx.clearEntry(entry.key());

                if (log.isDebugEnabled())
                  log.debug(
                      "Cleared removed entry from remote transaction (will retry) [entry="
                          + entry
                          + ", tx="
                          + tx
                          + ']');
              }
            }
          }
        }
      } else {
        String err = "Failed to acquire deployment class for message: " + msg;

        U.warn(log, err);

        res =
            new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err));
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Received lock request for completed transaction (will ignore): " + e);

      res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e);
    } catch (GridException e) {
      String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg;

      log.error(err, e);

      res =
          new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e));

      if (tx != null) tx.rollback();
    } catch (GridDistributedLockCancelledException ignored) {
      // Received lock request for cancelled lock.
      if (log.isDebugEnabled())
        log.debug("Received lock request for canceled lock (will ignore): " + msg);

      if (tx != null) tx.rollback();

      // Don't send response back.
      return;
    }

    GridNode node = ctx.discovery().node(msg.nodeId());

    boolean releaseAll = false;

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e);

        releaseAll = ldr != null;
      }
    }
    // If sender left grid, release all locks acquired so far.
    else releaseAll = ldr != null;

    // Release all locks because sender node left grid.
    if (releaseAll) {
      for (K key : msg.keys()) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId());

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock on removed entity during failure "
                      + "of replicated lock request handling (will retry): "
                      + entry);
          }
        }
      }

      U.warn(
          log, "Sender node left grid in the midst of lock acquisition (locks will be released).");
    }
  }
Exemplo n.º 10
0
  /**
   * Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
   * remote primary node.
   *
   * @param mappings Queue of mappings.
   * @throws GridException If mapping can not be completed.
   */
  private void proceedMapping(final ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings)
      throws GridException {
    GridNearLockMapping<K, V> map = mappings.poll();

    // If there are no more mappings to process, complete the future.
    if (map == null) return;

    final GridNearLockRequest<K, V> req = map.request();
    final Collection<K> mappedKeys = map.distributedKeys();
    final GridNode node = map.node();

    if (filter != null && filter.length != 0) req.filter(filter, cctx);

    if (node.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (log.isDebugEnabled()) log.debug("Before locally locking near request: " + req);

      GridFuture<GridNearLockResponse<K, V>> fut;

      if (CU.DHT_ENABLED) fut = dht().lockAllAsync(cctx.localNode(), req, filter);
      else {
        // Create dummy values for testing.
        GridNearLockResponse<K, V> res =
            new GridNearLockResponse<>(lockVer, futId, null, false, 1, null);

        res.addValueBytes(null, null, true, lockVer, lockVer, cctx);

        fut = new GridFinishedFuture<>(ctx, res);
      }

      // Add new future.
      add(
          new GridEmbeddedFuture<>(
              cctx.kernalContext(),
              fut,
              new C2<GridNearLockResponse<K, V>, Exception, Boolean>() {
                @Override
                public Boolean apply(GridNearLockResponse<K, V> res, Exception e) {
                  if (CU.isLockTimeoutOrCancelled(e)
                      || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false;

                  if (e != null) {
                    onError(e);

                    return false;
                  }

                  if (res == null) {
                    onError(new GridException("Lock response is null for future: " + this));

                    return false;
                  }

                  if (res.error() != null) {
                    onError(res.error());

                    return false;
                  }

                  if (log.isDebugEnabled())
                    log.debug(
                        "Acquired lock for local DHT mapping [locId="
                            + cctx.nodeId()
                            + ", mappedKeys="
                            + mappedKeys
                            + ", fut="
                            + GridNearLockFuture.this
                            + ']');

                  try {
                    int i = 0;

                    for (K k : mappedKeys) {
                      while (true) {
                        GridNearCacheEntry<K, V> entry =
                            cctx.near().entryExx(k, req.topologyVersion());

                        try {
                          GridTuple3<GridCacheVersion, V, byte[]> oldValTup =
                              valMap.get(entry.key());

                          boolean hasBytes = entry.hasValue();
                          V oldVal = entry.rawGet();
                          V newVal = res.value(i);
                          byte[] newBytes = res.valueBytes(i);

                          GridCacheVersion dhtVer = res.dhtVersion(i);
                          GridCacheVersion mappedVer = res.mappedVersion(i);

                          // On local node don't record twice if DHT cache already recorded.
                          boolean record =
                              retval && oldValTup != null && oldValTup.get1().equals(dhtVer);

                          if (newVal == null) {
                            if (oldValTup != null) {
                              if (oldValTup.get1().equals(dhtVer)) {
                                newVal = oldValTup.get2();

                                newBytes = oldValTup.get3();
                              }

                              oldVal = oldValTup.get2();
                            }
                          }

                          // Lock is held at this point, so we can set the
                          // returned value if any.
                          entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

                          entry.readyNearLock(
                              lockVer,
                              mappedVer,
                              res.committedVersions(),
                              res.rolledbackVersions(),
                              res.pending());

                          if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                            boolean pass = res.filterResult(i);

                            tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
                          }

                          if (record) {
                            if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
                              cctx.events()
                                  .addEvent(
                                      entry.partition(),
                                      entry.key(),
                                      tx,
                                      null,
                                      EVT_CACHE_OBJECT_READ,
                                      newVal,
                                      newVal != null,
                                      oldVal,
                                      hasBytes,
                                      CU.subjectId(tx, cctx));

                            cctx.cache().metrics0().onRead(oldVal != null);
                          }

                          if (log.isDebugEnabled())
                            log.debug(
                                "Processed response for entry [res="
                                    + res
                                    + ", entry="
                                    + entry
                                    + ']');

                          break; // Inner while loop.
                        } catch (GridCacheEntryRemovedException ignored) {
                          if (log.isDebugEnabled())
                            log.debug(
                                "Failed to add candidates because entry was "
                                    + "removed (will renew).");

                          // Replace old entry with new one.
                          entries.set(
                              i,
                              (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
                        }
                      }

                      i++; // Increment outside of while loop.
                    }

                    // Proceed and add new future (if any) before completing embedded future.
                    proceedMapping(mappings);
                  } catch (GridException ex) {
                    onError(ex);

                    return false;
                  }

                  return true;
                }
              }));
    } else {
      final MiniFuture fut = new MiniFuture(node, mappedKeys, mappings);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      GridFuture<?> txSync = null;

      if (inTx()) txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());

      if (txSync == null || txSync.isDone()) {
        try {
          if (log.isDebugEnabled())
            log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');

          cctx.io().send(node, req);
        } catch (GridTopologyException ex) {
          assert fut != null;

          fut.onResult(ex);
        }
      } else {
        txSync.listenAsync(
            new CI1<GridFuture<?>>() {
              @Override
              public void apply(GridFuture<?> t) {
                try {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Sending near lock request [node=" + node.id() + ", req=" + req + ']');

                  cctx.io().send(node, req);
                } catch (GridTopologyException ex) {
                  assert fut != null;

                  fut.onResult(ex);
                } catch (GridException e) {
                  onError(e);
                }
              }
            });
      }
    }
  }
Exemplo n.º 11
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }
Exemplo n.º 12
0
 /** {@inheritDoc} */
 @Override
 public String toString() {
   return S.toString(MiniFuture.class, this, "node", node.id(), "super", super.toString());
 }
Exemplo n.º 13
0
    /** @param res Result callback. */
    void onResult(GridNearLockResponse<K, V> res) {
      if (rcvRes.compareAndSet(false, true)) {
        if (res.error() != null) {
          if (log.isDebugEnabled())
            log.debug(
                "Finishing mini future with an error due to error in response [miniFut="
                    + this
                    + ", res="
                    + res
                    + ']');

          // Fail.
          if (res.error() instanceof GridCacheLockTimeoutException) onDone(false);
          else onDone(res.error());

          return;
        }

        int i = 0;

        long topVer = topSnapshot.get().topologyVersion();

        for (K k : keys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, topVer);

            try {
              if (res.dhtVersion(i) == null) {
                onDone(
                    new GridException(
                        "Failed to receive DHT version from remote node "
                            + "(will fail the lock): "
                            + res));

                return;
              }

              GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key());

              V oldVal = entry.rawGet();
              boolean hasOldVal = false;
              V newVal = res.value(i);
              byte[] newBytes = res.valueBytes(i);

              boolean readRecordable = false;

              if (retval) {
                readRecordable = cctx.events().isRecordable(EVT_CACHE_OBJECT_READ);

                if (readRecordable) hasOldVal = entry.hasValue();
              }

              GridCacheVersion dhtVer = res.dhtVersion(i);
              GridCacheVersion mappedVer = res.mappedVersion(i);

              if (newVal == null) {
                if (oldValTup != null) {
                  if (oldValTup.get1().equals(dhtVer)) {
                    newVal = oldValTup.get2();

                    newBytes = oldValTup.get3();
                  }

                  oldVal = oldValTup.get2();
                }
              }

              // Lock is held at this point, so we can set the
              // returned value if any.
              entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

              if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                boolean pass = res.filterResult(i);

                tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
              }

              entry.readyNearLock(
                  lockVer,
                  mappedVer,
                  res.committedVersions(),
                  res.rolledbackVersions(),
                  res.pending());

              if (retval) {
                if (readRecordable)
                  cctx.events()
                      .addEvent(
                          entry.partition(),
                          entry.key(),
                          tx,
                          null,
                          EVT_CACHE_OBJECT_READ,
                          newVal,
                          newVal != null || newBytes != null,
                          oldVal,
                          hasOldVal,
                          CU.subjectId(tx, cctx));

                cctx.cache().metrics0().onRead(false);
              }

              if (log.isDebugEnabled())
                log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']');

              break; // Inner while loop.
            } catch (GridCacheEntryRemovedException ignored) {
              if (log.isDebugEnabled())
                log.debug("Failed to add candidates because entry was removed (will renew).");

              // Replace old entry with new one.
              entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
            } catch (GridException e) {
              onDone(e);

              return;
            }
          }

          i++;
        }

        try {
          proceedMapping(mappings);
        } catch (GridException e) {
          onDone(e);
        }

        onDone(true);
      }
    }
  /** Initializes future. */
  public void prepare() {
    if (log.isDebugEnabled())
      log.debug("Checking if transaction was committed on remote nodes: " + tx);

    // Check local node first (local node can be a backup node for some part of this transaction).
    long originatingThreadId = tx.threadId();

    if (tx instanceof GridCacheTxRemoteEx)
      originatingThreadId = ((GridCacheTxRemoteEx) tx).remoteThreadId();

    GridCacheCommittedTxInfo<K, V> txInfo =
        cctx.tm().txCommitted(tx.nearXidVersion(), tx.eventNodeId(), originatingThreadId);

    if (txInfo != null) {
      onDone(txInfo);

      markInitialized();

      return;
    }

    Collection<GridNode> checkNodes = CU.remoteNodes(cctx, tx.topologyVersion());

    if (tx instanceof GridDhtTxRemote) {
      // If we got primary node failure and near node has not failed.
      if (tx.nodeId().equals(failedNodeId) && !tx.eventNodeId().equals(failedNodeId)) {
        nearCheck = true;

        GridNode nearNode = cctx.discovery().node(tx.eventNodeId());

        if (nearNode == null) {
          // Near node failed, separate check prepared future will take care of it.
          onDone(
              new GridTopologyException(
                  "Failed to check near transaction state (near node left grid): "
                      + tx.eventNodeId()));

          return;
        }

        checkNodes = Collections.singletonList(nearNode);
      }
    }

    for (GridNode rmtNode : checkNodes) {
      // Skip left nodes and local node.
      if (rmtNode.id().equals(failedNodeId)) continue;

      /*
       * Send message to all cache nodes in the topology.
       */

      MiniFuture fut = new MiniFuture(rmtNode.id());

      GridCachePessimisticCheckCommittedTxRequest<K, V> req =
          new GridCachePessimisticCheckCommittedTxRequest<>(
              tx, originatingThreadId, futureId(), fut.futureId());

      add(fut);

      try {
        cctx.io().send(rmtNode.id(), req);
      } catch (GridTopologyException ignored) {
        fut.onNodeLeft();
      } catch (GridException e) {
        fut.onError(e);

        break;
      }
    }

    markInitialized();
  }
  /** @param m Mapping. */
  private void finish(GridDistributedTxMapping<K, V> m) {
    GridNode n = m.node();

    assert !m.empty();

    GridNearTxFinishRequest<K, V> req =
        new GridNearTxFinishRequest<>(
            futId,
            tx.xidVersion(),
            tx.threadId(),
            commit,
            tx.isInvalidate(),
            m.explicitLock(),
            tx.topologyVersion(),
            null,
            null,
            null,
            tx.size(),
            commit && tx.pessimistic() ? m.writes() : null,
            commit && tx.pessimistic() ? tx.writeEntries() : null,
            commit ? tx.syncCommit() : tx.syncRollback(),
            tx.subjectId(),
            tx.taskNameHash());

    // If this is the primary node for the keys.
    if (n.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (CU.DHT_ENABLED) {
        GridFuture<GridCacheTx> fut =
            commit ? dht().commitTx(n.id(), req) : dht().rollbackTx(n.id(), req);

        // Add new future.
        add(fut);
      } else
        // Add done future for testing.
        add(new GridFinishedFuture<GridCacheTx>(ctx));
    } else {
      MiniFuture fut = new MiniFuture(m);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      if (tx.pessimistic()) cctx.tm().beforeFinishRemote(n.id(), tx.threadId());

      try {
        cctx.io().send(n, req);

        // If we don't wait for result, then mark future as done.
        if (!isSync() && !m.explicitLock()) fut.onDone();
      } catch (GridTopologyException e) {
        // Remove previous mapping.
        mappings.remove(m.node().id());

        fut.onResult(e);
      } catch (GridException e) {
        // Fail the whole thing.
        fut.onResult(e);
      }
    }
  }