/** @return Reentry candidate. */
  public GridCacheMvccCandidate<K> reenter() {
    GridCacheMvccCandidate<K> old = reentry;

    GridCacheMvccCandidate<K> reentry =
        new GridCacheMvccCandidate<>(
            parent,
            nodeId,
            otherNodeId,
            otherVer,
            threadId,
            ver,
            timeout,
            local(),
            /*reentry*/ true,
            tx(),
            singleImplicit(),
            nearLocal(),
            dhtLocal());

    reentry.topVer = topVer;

    if (old != null) reentry.reentry = old;

    this.reentry = reentry;

    return reentry;
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    try {
      // Send request to remove from remote nodes.
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      req.version(ver);

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  // If there is only local node in this lock's topology,
                  // then there is no reason to distribute the request.
                  if (nodes.isEmpty()) continue;

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (nodes.isEmpty()) return;

      req.completedVersions(ctx.tm().committedVersions(ver), ctx.tm().rolledbackVersions(ver));

      if (!req.keyBytes().isEmpty())
        // We don't wait for reply to this message.
        ctx.io().safeSend(nodes, req, null);
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
예제 #3
0
  /**
   * @param ver Version.
   * @param mappings Mappings to set.
   * @return Candidate, if one existed for the version, or {@code null} if candidate was not found.
   * @throws GridCacheEntryRemovedException If removed.
   */
  public GridCacheMvccCandidate<K> mappings(GridCacheVersion ver, Collection<UUID> mappings)
      throws GridCacheEntryRemovedException {
    synchronized (mux) {
      checkObsolete();

      GridCacheMvccCandidate<K> cand = mvcc.candidate(ver);

      if (cand != null) cand.mappedNodeIds(mappings);

      return cand;
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public boolean equals(Object o) {
    if (o == null) return false;

    if (o == this) return true;

    GridCacheMvccCandidate<K> other = (GridCacheMvccCandidate<K>) o;

    assert key() != null && other.key() != null
        : "Key is null [this=" + this + ", other=" + o + ']';

    return ver.equals(other.ver) && key().equals(other.key());
  }
예제 #5
0
  /**
   * Add local candidate.
   *
   * @param nearNodeId Near node ID.
   * @param nearVer Near version.
   * @param threadId Owning thread ID.
   * @param ver Lock version.
   * @param timeout Timeout to acquire lock.
   * @param reenter Reentry flag.
   * @param ec Eventually consistent flag.
   * @param tx Tx flag.
   * @return New candidate.
   * @throws GridCacheEntryRemovedException If entry has been removed.
   * @throws GridDistributedLockCancelledException If lock was cancelled.
   */
  @Nullable
  public GridCacheMvccCandidate<K> addDhtLocal(
      UUID nearNodeId,
      GridCacheVersion nearVer,
      long threadId,
      GridCacheVersion ver,
      long timeout,
      boolean reenter,
      boolean ec,
      boolean tx)
      throws GridCacheEntryRemovedException, GridDistributedLockCancelledException {
    GridCacheMvccCandidate<K> cand;
    GridCacheMvccCandidate<K> prev;
    GridCacheMvccCandidate<K> owner;

    V val;

    synchronized (mux) {
      // Check removed locks prior to obsolete flag.
      checkRemoved(ver);

      checkObsolete();

      prev = mvcc.anyOwner();

      boolean emptyBefore = mvcc.isEmpty();

      cand =
          mvcc.addLocal(this, nearNodeId, nearVer, threadId, ver, timeout, reenter, ec, tx, true);

      owner = mvcc.anyOwner();

      boolean emptyAfter = mvcc.isEmpty();

      if (prev != owner) mux.notifyAll();

      checkCallbacks(emptyBefore, emptyAfter);

      val = rawGet();
    }

    // Don't link reentries.
    if (cand != null && !cand.reentry())
      // Link with other candidates in the same thread.
      cctx.mvcc().addNext(cand);

    checkOwnerChanged(prev, owner, val);

    return cand;
  }
  /** {@inheritDoc} */
  @Override
  public String toString() {
    GridCacheMvccCandidate<?> prev = previous();
    GridCacheMvccCandidate<?> next = next();

    return S.toString(
        GridCacheMvccCandidate.class,
        this,
        "key",
        parent == null ? null : parent.key(),
        "masks",
        Mask.toString(flags()),
        "prevVer",
        (prev == null ? null : prev.version()),
        "nextVer",
        (next == null ? null : next.version()));
  }
예제 #7
0
  /**
   * Callback for whenever entry lock ownership changes.
   *
   * @param entry Entry whose lock ownership changed.
   */
  @Override
  public boolean onOwnerChanged(GridCacheEntryEx<K, V> entry, GridCacheMvccCandidate<K> owner) {
    if (owner != null && owner.version().equals(lockVer)) {
      onDone(true);

      return true;
    }

    return false;
  }
  /** {@inheritDoc} */
  @Override
  public int compareTo(GridCacheMvccCandidate<K> o) {
    if (o == this) return 0;

    int c = ver.compareTo(o.ver);

    // This is done, so compare and equals methods will be consistent.
    if (c == 0) return key().equals(o.key()) ? 0 : id < o.id ? -1 : 1;

    return c;
  }
예제 #9
0
  /**
   * Adds entry to future.
   *
   * @param topVer Topology version.
   * @param entry Entry to add.
   * @param dhtNodeId DHT node ID.
   * @return Lock candidate.
   * @throws GridCacheEntryRemovedException If entry was removed.
   */
  @Nullable
  private GridCacheMvccCandidate<K> addEntry(
      long topVer, GridNearCacheEntry<K, V> entry, UUID dhtNodeId)
      throws GridCacheEntryRemovedException {
    // Check if lock acquisition is timed out.
    if (timedOut) return null;

    // Add local lock first, as it may throw GridCacheEntryRemovedException.
    GridCacheMvccCandidate<K> c =
        entry.addNearLocal(
            dhtNodeId, threadId, lockVer, timeout, !inTx(), inTx(), implicitSingleTx());

    if (inTx()) {
      GridCacheTxEntry<K, V> txEntry = tx.entry(entry.key());

      txEntry.cached(entry, txEntry.keyBytes());
    }

    if (c != null) c.topologyVersion(topVer);

    synchronized (mux) {
      entries.add(entry);
    }

    if (c == null && timeout < 0) {
      if (log.isDebugEnabled()) log.debug("Failed to acquire lock with negative timeout: " + entry);

      onFailed(false);

      return null;
    }

    // Double check if lock acquisition has already timed out.
    if (timedOut) {
      entry.removeLock(lockVer);

      return null;
    }

    return c;
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    try {
      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridNode, GridNearUnlockRequest<K, V>> map = null;

      for (K key : keys) {
        // Send request to remove from remote nodes.
        GridNearUnlockRequest<K, V> req = null;

        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                if (affNodes == null) {
                  affNodes = CU.allNodes(ctx, cand.topologyVersion());

                  keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                  map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size());
                }

                GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

                if (!primary.isLocal()) {
                  req = map.get(primary);

                  if (req == null) {
                    map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                    req.version(ver);
                  }
                }

                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  if (primary.isLocal()) {
                    dht.removeLocks(primary.id(), ver, F.asList(key), true);

                    assert req == null;

                    continue;
                  }

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (map == null || map.isEmpty()) return;

      Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver);
      Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver);

      for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (!req.keyBytes().isEmpty()) {
          req.completedVersions(committed, rolledback);

          // We don't wait for reply to this message.
          ctx.io().send(n, req);
        }
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return;

    try {
      GridCacheVersion ver = null;

      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null;

      Collection<K> locKeys = new LinkedList<K>();

      GridCacheVersion obsoleteVer = ctx.versions().next();

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While.

          try {
            GridCacheMvccCandidate<K> cand =
                entry.candidate(ctx.nodeId(), Thread.currentThread().getId());

            if (cand != null) {
              ver = cand.version();

              if (affNodes == null) {
                affNodes = CU.allNodes(ctx, cand.topologyVersion());

                keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size());
              }

              // Send request to remove from remote nodes.
              GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

              GridNearUnlockRequest<K, V> req = map.get(primary);

              if (req == null) {
                map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                req.version(ver);
              }

              // Remove candidate from local node first.
              GridCacheMvccCandidate<K> rmv = entry.removeLock();

              if (rmv != null) {
                if (!rmv.reentry()) {
                  if (ver != null && !ver.equals(rmv.version()))
                    throw new GridException(
                        "Failed to unlock (if keys were locked separately, "
                            + "then they need to be unlocked separately): "
                            + keys);

                  if (!primary.isLocal()) {
                    assert req != null;

                    req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                  } else locKeys.add(key);

                  if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
                } else if (log.isDebugEnabled())
                  log.debug(
                      "Current thread still owns lock (or there are no other nodes)"
                          + " [lock="
                          + rmv
                          + ", curThreadId="
                          + Thread.currentThread().getId()
                          + ']');
              }

              // Try to evict near entry if it's dht-mapped locally.
              evictNearEntry(entry, obsoleteVer);
            }

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug("Attempted to unlock removed entry (will retry): " + entry);
          }
        }
      }

      if (ver == null) return;

      for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridRichNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
        else if (!req.keyBytes().isEmpty())
          // We don't wait for reply to this message.
          ctx.io().send(n, req);
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return;

    Collection<? extends GridNode> nodes = ctx.remoteNodes(keys);

    try {
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      for (K key : keys) {
        GridDistributedCacheEntry<K, V> entry = entryexx(key);

        if (!ctx.isAll(entry.wrap(false), filter)) continue;

        // Unlock local lock first.
        GridCacheMvccCandidate<K> rmv = entry.removeLock();

        if (rmv != null && !nodes.isEmpty()) {
          if (!rmv.reentry()) {
            req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);

            // We are assuming that lock ID is the same for all keys.
            req.version(rmv.version());

            if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
          } else {
            if (log.isDebugEnabled())
              log.debug(
                  "Locally unlocked lock reentry without distributing to other nodes [removed="
                      + rmv
                      + ", entry="
                      + entry
                      + ']');
          }
        } else {
          if (log.isDebugEnabled())
            log.debug(
                "Current thread still owns lock (or there are no other nodes) [lock="
                    + rmv
                    + ", curThreadId="
                    + Thread.currentThread().getId()
                    + ']');
        }
      }

      // Don't proceed of no keys to unlock.
      if (req.keyBytes().isEmpty()) {
        if (log.isDebugEnabled())
          log.debug("No keys to unlock locally (was it reentry unlock?): " + keys);

        return;
      }

      // We don't wait for reply to this message. Receiving side will have
      // to make sure that unlock requests don't come before lock requests.
      ctx.io().safeSend(nodes, req, null);
    } catch (GridException e) {
      U.error(log, "Failed to unlock keys: " + keys, e);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  @Override
  protected GridFuture<Boolean> lockAllAsync(
      Collection<? extends K> keys,
      long timeout,
      GridCacheTxLocalEx<K, V> tx,
      boolean isInvalidate,
      boolean isRead,
      boolean retval,
      GridCacheTxIsolation isolation,
      GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return new GridFinishedFuture<Boolean>(ctx.kernalContext(), true);

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    final GridReplicatedLockFuture<K, V> fut =
        new GridReplicatedLockFuture<K, V>(ctx, keys, tx, this, nodes, timeout, filter);

    GridDistributedLockRequest<K, V> req =
        new GridDistributedLockRequest<K, V>(
            locNodeId,
            Thread.currentThread().getId(),
            fut.futureId(),
            fut.version(),
            tx != null,
            isRead,
            isolation,
            isInvalidate,
            timeout,
            keys.size());

    try {
      // Must add future before redying locks.
      if (!ctx.mvcc().addFuture(fut))
        throw new IllegalStateException("Duplicate future ID: " + fut);

      boolean distribute = false;

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = null;

          try {
            entry = entryexx(key);

            if (!ctx.isAll(entry.wrap(false), filter)) {
              if (log.isDebugEnabled())
                log.debug("Entry being locked did not pass filter (will not lock): " + entry);

              fut.onDone(false);

              return fut;
            }

            // Removed exception may be thrown here.
            GridCacheMvccCandidate<K> cand = fut.addEntry(entry);

            if (cand != null) {
              req.addKeyBytes(
                  key,
                  cand.reentry() ? null : entry.getOrMarshalKeyBytes(),
                  retval,
                  entry.localCandidates(fut.version()),
                  ctx);

              req.completedVersions(
                  ctx.tm().committedVersions(fut.version()),
                  ctx.tm().rolledbackVersions(fut.version()));

              distribute = !cand.reentry();
            } else if (fut.isDone()) return fut;

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
          }
        }
      }

      // If nothing to distribute at this point,
      // then all locks are reentries.
      if (!distribute) fut.complete(true);

      if (nodes.isEmpty()) fut.readyLocks();

      // No reason to send request if all locks are locally re-entered,
      // or if timeout is negative and local locks could not be acquired.
      if (fut.isDone()) return fut;

      try {
        ctx.io()
            .safeSend(
                fut.nodes(),
                req,
                new P1<GridNode>() {
                  @Override
                  public boolean apply(GridNode node) {
                    fut.onNodeLeft(node.id());

                    return !fut.isDone();
                  }
                });
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send lock request to node [nodes="
                + U.toShortString(nodes)
                + ", req="
                + req
                + ']',
            e);

        fut.onError(e);
      }

      return fut;
    } catch (GridException e) {
      Throwable err = new GridException("Failed to acquire asynchronous lock for keys: " + keys, e);

      // Clean-up.
      fut.onError(err);

      ctx.mvcc().removeFuture(fut);

      return fut;
    }
  }
예제 #14
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }
예제 #15
0
  /**
   * @param nodeId Reader to add.
   * @param msgId Message ID.
   * @return Future for all relevant transactions that were active at the time of adding reader, or
   *     {@code null} if reader was added
   * @throws GridCacheEntryRemovedException If entry was removed.
   */
  @Nullable
  public GridFuture<Boolean> addReader(UUID nodeId, long msgId)
      throws GridCacheEntryRemovedException {
    // Don't add local node as reader.
    if (cctx.nodeId().equals(nodeId)) return null;

    GridNode node = cctx.discovery().node(nodeId);

    // If remote node has no near cache, don't add it.
    if (node == null || !U.hasNearCache(node, cctx.dht().near().name())) return null;

    // If remote node is (primary?) or back up, don't add it as a reader.
    if (U.nodeIds(cctx.affinity(partition(), CU.allNodes(cctx))).contains(nodeId)) return null;

    boolean ret = false;

    GridCacheMultiTxFuture<K, V> txFut;

    Collection<GridCacheMvccCandidate<K>> cands = null;

    synchronized (mux) {
      checkObsolete();

      txFut = this.txFut;

      ReaderId reader = readerId(nodeId);

      if (reader == null) {
        reader = new ReaderId(nodeId, msgId);

        readers = new LinkedList<ReaderId>(readers);

        readers.add(reader);

        // Seal.
        readers = Collections.unmodifiableList(readers);

        txFut = this.txFut = new GridCacheMultiTxFuture<K, V>(cctx);

        cands = localCandidates();

        ret = true;
      } else {
        long id = reader.messageId();

        if (id < msgId) reader.messageId(msgId);
      }
    }

    if (ret) {
      assert txFut != null;

      if (!F.isEmpty(cands)) {
        for (GridCacheMvccCandidate<K> c : cands) {
          GridCacheTxEx<K, V> tx = cctx.tm().<GridCacheTxEx<K, V>>tx(c.version());

          if (tx != null) {
            assert tx.local();

            txFut.addTx(tx);
          }
        }
      }

      txFut.init();

      if (!txFut.isDone()) {
        txFut.listenAsync(
            new CI1<GridFuture<?>>() {
              @Override
              public void apply(GridFuture<?> f) {
                synchronized (mux) {
                  // Release memory.
                  GridDhtCacheEntry.this.txFut = null;
                }
              }
            });
      } else
        // Release memory.
        txFut = this.txFut = null;
    }

    return txFut;
  }