/** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys,
      @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map = peekAll0(keys, filter, skipped);

    if (map.size() + skipped.size() != keys.size()) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              filter));
    }

    return map;
  }
  /** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys, @Nullable Collection<GridCachePeekMode> modes)
      throws GridException {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map =
        !modes.contains(PARTITIONED_ONLY)
            ? peekAll0(keys, modes, ctx.tm().localTxx(), skipped)
            : new GridLeanMap<K, V>(0);

    if (map.size() != keys.size() && !modes.contains(NEAR_ONLY)) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              modes));
    }

    return map;
  }
Ejemplo n.º 3
0
  /**
   * @param cctx Registry.
   * @param keys Keys to lock.
   * @param tx Transaction.
   * @param read Read flag.
   * @param retval Flag to return value or not.
   * @param timeout Lock acquisition timeout.
   * @param filter Filter.
   */
  public GridNearLockFuture(
      GridCacheContext<K, V> cctx,
      Collection<? extends K> keys,
      @Nullable GridNearTxLocal<K, V> tx,
      boolean read,
      boolean retval,
      long timeout,
      GridPredicate<GridCacheEntry<K, V>>[] filter) {
    super(cctx.kernalContext(), CU.boolReducer());
    assert cctx != null;
    assert keys != null;

    this.cctx = cctx;
    this.keys = keys;
    this.tx = tx;
    this.read = read;
    this.retval = retval;
    this.timeout = timeout;
    this.filter = filter;

    threadId = tx == null ? Thread.currentThread().getId() : tx.threadId();

    lockVer = tx != null ? tx.xidVersion() : cctx.versions().next();

    futId = GridUuid.randomUuid();

    entries = new ArrayList<>(keys.size());

    log = U.logger(ctx, logRef, GridNearLockFuture.class);

    if (timeout > 0) {
      timeoutObj = new LockTimeoutObject();

      cctx.time().addTimeoutObject(timeoutObj);
    }

    valMap = new ConcurrentHashMap8<>(keys.size(), 1f);
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    try {
      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridNode, GridNearUnlockRequest<K, V>> map = null;

      for (K key : keys) {
        // Send request to remove from remote nodes.
        GridNearUnlockRequest<K, V> req = null;

        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                if (affNodes == null) {
                  affNodes = CU.allNodes(ctx, cand.topologyVersion());

                  keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                  map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size());
                }

                GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

                if (!primary.isLocal()) {
                  req = map.get(primary);

                  if (req == null) {
                    map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                    req.version(ver);
                  }
                }

                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  if (primary.isLocal()) {
                    dht.removeLocks(primary.id(), ver, F.asList(key), true);

                    assert req == null;

                    continue;
                  }

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (map == null || map.isEmpty()) return;

      Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver);
      Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver);

      for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (!req.keyBytes().isEmpty()) {
          req.completedVersions(committed, rolledback);

          // We don't wait for reply to this message.
          ctx.io().send(n, req);
        }
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return;

    try {
      GridCacheVersion ver = null;

      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null;

      Collection<K> locKeys = new LinkedList<K>();

      GridCacheVersion obsoleteVer = ctx.versions().next();

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While.

          try {
            GridCacheMvccCandidate<K> cand =
                entry.candidate(ctx.nodeId(), Thread.currentThread().getId());

            if (cand != null) {
              ver = cand.version();

              if (affNodes == null) {
                affNodes = CU.allNodes(ctx, cand.topologyVersion());

                keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size());
              }

              // Send request to remove from remote nodes.
              GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

              GridNearUnlockRequest<K, V> req = map.get(primary);

              if (req == null) {
                map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                req.version(ver);
              }

              // Remove candidate from local node first.
              GridCacheMvccCandidate<K> rmv = entry.removeLock();

              if (rmv != null) {
                if (!rmv.reentry()) {
                  if (ver != null && !ver.equals(rmv.version()))
                    throw new GridException(
                        "Failed to unlock (if keys were locked separately, "
                            + "then they need to be unlocked separately): "
                            + keys);

                  if (!primary.isLocal()) {
                    assert req != null;

                    req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                  } else locKeys.add(key);

                  if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
                } else if (log.isDebugEnabled())
                  log.debug(
                      "Current thread still owns lock (or there are no other nodes)"
                          + " [lock="
                          + rmv
                          + ", curThreadId="
                          + Thread.currentThread().getId()
                          + ']');
              }

              // Try to evict near entry if it's dht-mapped locally.
              evictNearEntry(entry, obsoleteVer);
            }

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug("Attempted to unlock removed entry (will retry): " + entry);
          }
        }
      }

      if (ver == null) return;

      for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridRichNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
        else if (!req.keyBytes().isEmpty())
          // We don't wait for reply to this message.
          ctx.io().send(n, req);
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return;

    Collection<? extends GridNode> nodes = ctx.remoteNodes(keys);

    try {
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      for (K key : keys) {
        GridDistributedCacheEntry<K, V> entry = entryexx(key);

        if (!ctx.isAll(entry.wrap(false), filter)) continue;

        // Unlock local lock first.
        GridCacheMvccCandidate<K> rmv = entry.removeLock();

        if (rmv != null && !nodes.isEmpty()) {
          if (!rmv.reentry()) {
            req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);

            // We are assuming that lock ID is the same for all keys.
            req.version(rmv.version());

            if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
          } else {
            if (log.isDebugEnabled())
              log.debug(
                  "Locally unlocked lock reentry without distributing to other nodes [removed="
                      + rmv
                      + ", entry="
                      + entry
                      + ']');
          }
        } else {
          if (log.isDebugEnabled())
            log.debug(
                "Current thread still owns lock (or there are no other nodes) [lock="
                    + rmv
                    + ", curThreadId="
                    + Thread.currentThread().getId()
                    + ']');
        }
      }

      // Don't proceed of no keys to unlock.
      if (req.keyBytes().isEmpty()) {
        if (log.isDebugEnabled())
          log.debug("No keys to unlock locally (was it reentry unlock?): " + keys);

        return;
      }

      // We don't wait for reply to this message. Receiving side will have
      // to make sure that unlock requests don't come before lock requests.
      ctx.io().safeSend(nodes, req, null);
    } catch (GridException e) {
      U.error(log, "Failed to unlock keys: " + keys, e);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  @Override
  protected GridFuture<Boolean> lockAllAsync(
      Collection<? extends K> keys,
      long timeout,
      GridCacheTxLocalEx<K, V> tx,
      boolean isInvalidate,
      boolean isRead,
      boolean retval,
      GridCacheTxIsolation isolation,
      GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return new GridFinishedFuture<Boolean>(ctx.kernalContext(), true);

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    final GridReplicatedLockFuture<K, V> fut =
        new GridReplicatedLockFuture<K, V>(ctx, keys, tx, this, nodes, timeout, filter);

    GridDistributedLockRequest<K, V> req =
        new GridDistributedLockRequest<K, V>(
            locNodeId,
            Thread.currentThread().getId(),
            fut.futureId(),
            fut.version(),
            tx != null,
            isRead,
            isolation,
            isInvalidate,
            timeout,
            keys.size());

    try {
      // Must add future before redying locks.
      if (!ctx.mvcc().addFuture(fut))
        throw new IllegalStateException("Duplicate future ID: " + fut);

      boolean distribute = false;

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = null;

          try {
            entry = entryexx(key);

            if (!ctx.isAll(entry.wrap(false), filter)) {
              if (log.isDebugEnabled())
                log.debug("Entry being locked did not pass filter (will not lock): " + entry);

              fut.onDone(false);

              return fut;
            }

            // Removed exception may be thrown here.
            GridCacheMvccCandidate<K> cand = fut.addEntry(entry);

            if (cand != null) {
              req.addKeyBytes(
                  key,
                  cand.reentry() ? null : entry.getOrMarshalKeyBytes(),
                  retval,
                  entry.localCandidates(fut.version()),
                  ctx);

              req.completedVersions(
                  ctx.tm().committedVersions(fut.version()),
                  ctx.tm().rolledbackVersions(fut.version()));

              distribute = !cand.reentry();
            } else if (fut.isDone()) return fut;

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
          }
        }
      }

      // If nothing to distribute at this point,
      // then all locks are reentries.
      if (!distribute) fut.complete(true);

      if (nodes.isEmpty()) fut.readyLocks();

      // No reason to send request if all locks are locally re-entered,
      // or if timeout is negative and local locks could not be acquired.
      if (fut.isDone()) return fut;

      try {
        ctx.io()
            .safeSend(
                fut.nodes(),
                req,
                new P1<GridNode>() {
                  @Override
                  public boolean apply(GridNode node) {
                    fut.onNodeLeft(node.id());

                    return !fut.isDone();
                  }
                });
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send lock request to node [nodes="
                + U.toShortString(nodes)
                + ", req="
                + req
                + ']',
            e);

        fut.onError(e);
      }

      return fut;
    } catch (GridException e) {
      Throwable err = new GridException("Failed to acquire asynchronous lock for keys: " + keys, e);

      // Clean-up.
      fut.onError(err);

      ctx.mvcc().removeFuture(fut);

      return fut;
    }
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    try {
      // Send request to remove from remote nodes.
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      req.version(ver);

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  // If there is only local node in this lock's topology,
                  // then there is no reason to distribute the request.
                  if (nodes.isEmpty()) continue;

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (nodes.isEmpty()) return;

      req.completedVersions(ctx.tm().committedVersions(ver), ctx.tm().rolledbackVersions(ver));

      if (!req.keyBytes().isEmpty())
        // We don't wait for reply to this message.
        ctx.io().safeSend(nodes, req, null);
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
Ejemplo n.º 9
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }