/** {@inheritDoc} */
  @Override
  public GridCacheEntryEx<K, V> entryEx(K key) {
    GridNearCacheEntry<K, V> entry = null;

    while (true) {
      try {
        entry = (GridNearCacheEntry<K, V>) super.entryEx(key);

        entry.initializeFromDht();

        return entry;
      } catch (GridCacheEntryRemovedException ignore) {
        if (log.isDebugEnabled())
          log.debug(
              "Got removed near entry while initializing from DHT entry (will retry): " + entry);
      }
    }
  }
Example #2
0
  /**
   * Adds entry to future.
   *
   * @param topVer Topology version.
   * @param entry Entry to add.
   * @param dhtNodeId DHT node ID.
   * @return Lock candidate.
   * @throws GridCacheEntryRemovedException If entry was removed.
   */
  @Nullable
  private GridCacheMvccCandidate<K> addEntry(
      long topVer, GridNearCacheEntry<K, V> entry, UUID dhtNodeId)
      throws GridCacheEntryRemovedException {
    // Check if lock acquisition is timed out.
    if (timedOut) return null;

    // Add local lock first, as it may throw GridCacheEntryRemovedException.
    GridCacheMvccCandidate<K> c =
        entry.addNearLocal(
            dhtNodeId, threadId, lockVer, timeout, !inTx(), inTx(), implicitSingleTx());

    if (inTx()) {
      GridCacheTxEntry<K, V> txEntry = tx.entry(entry.key());

      txEntry.cached(entry, txEntry.keyBytes());
    }

    if (c != null) c.topologyVersion(topVer);

    synchronized (mux) {
      entries.add(entry);
    }

    if (c == null && timeout < 0) {
      if (log.isDebugEnabled()) log.debug("Failed to acquire lock with negative timeout: " + entry);

      onFailed(false);

      return null;
    }

    // Double check if lock acquisition has already timed out.
    if (timedOut) {
      entry.removeLock(lockVer);

      return null;
    }

    return c;
  }
  /**
   * @param nodeId Primary node ID.
   * @param req Request.
   * @return Remote transaction.
   * @throws GridException If failed.
   * @throws GridDistributedLockCancelledException If lock has been cancelled.
   */
  @SuppressWarnings({"RedundantTypeArguments"})
  @Nullable
  public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req)
      throws GridException, GridDistributedLockCancelledException {
    List<byte[]> nearKeyBytes = req.nearKeyBytes();

    GridNearTxRemote<K, V> tx = null;

    ClassLoader ldr = ctx.deploy().globalLoader();

    if (ldr != null) {
      for (int i = 0; i < nearKeyBytes.size(); i++) {
        byte[] bytes = nearKeyBytes.get(i);

        if (bytes == null) continue;

        K key = req.nearKeys().get(i);

        Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i);

        if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

        GridNearCacheEntry<K, V> entry = null;

        while (true) {
          try {
            entry = peekExx(key);

            if (entry != null) {
              entry.keyBytes(bytes);

              // Handle implicit locks for pessimistic transactions.
              if (req.inTx()) {
                tx = ctx.tm().tx(req.version());

                if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/);
                else {
                  tx =
                      new GridNearTxRemote<K, V>(
                          nodeId,
                          req.nearNodeId(),
                          req.threadId(),
                          req.version(),
                          null,
                          PESSIMISTIC,
                          req.isolation(),
                          req.isInvalidate(),
                          req.timeout(),
                          key,
                          bytes,
                          null, // Value.
                          null, // Value bytes.
                          ctx);

                  if (tx.empty()) return tx;

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + req.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  req.nodeId(),
                  nodeId,
                  req.threadId(),
                  req.version(),
                  req.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, req.version(), req.committedVersions(), req.rolledbackVersions());

              entry.orderOwned(req.version(), req.owned(entry.key()));
            }

            // Double-check in case if sender node left the grid.
            if (ctx.discovery().node(req.nodeId()) == null) {
              if (log.isDebugEnabled())
                log.debug("Node requesting lock left grid (lock request will be ignored): " + req);

              if (tx != null) tx.rollback();

              return null;
            }

            // Entry is legit.
            break;
          } catch (GridCacheEntryRemovedException ignored) {
            assert entry.obsoleteVersion() != null
                : "Obsolete flag not set on removed entry: " + entry;

            if (log.isDebugEnabled())
              log.debug("Received entry removed exception (will retry on renewed entry): " + entry);

            if (tx != null) {
              tx.clearEntry(entry.key());

              if (log.isDebugEnabled())
                log.debug(
                    "Cleared removed entry from remote transaction (will retry) [entry="
                        + entry
                        + ", tx="
                        + tx
                        + ']');
            }
          }
        }
      }
    } else {
      String err = "Failed to acquire deployment class loader for message: " + req;

      U.warn(log, err);

      throw new GridException(err);
    }

    return tx;
  }
Example #4
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }
Example #5
0
    /** @param res Result callback. */
    void onResult(GridNearLockResponse<K, V> res) {
      if (rcvRes.compareAndSet(false, true)) {
        if (res.error() != null) {
          if (log.isDebugEnabled())
            log.debug(
                "Finishing mini future with an error due to error in response [miniFut="
                    + this
                    + ", res="
                    + res
                    + ']');

          // Fail.
          if (res.error() instanceof GridCacheLockTimeoutException) onDone(false);
          else onDone(res.error());

          return;
        }

        int i = 0;

        long topVer = topSnapshot.get().topologyVersion();

        for (K k : keys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, topVer);

            try {
              if (res.dhtVersion(i) == null) {
                onDone(
                    new GridException(
                        "Failed to receive DHT version from remote node "
                            + "(will fail the lock): "
                            + res));

                return;
              }

              GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key());

              V oldVal = entry.rawGet();
              boolean hasOldVal = false;
              V newVal = res.value(i);
              byte[] newBytes = res.valueBytes(i);

              boolean readRecordable = false;

              if (retval) {
                readRecordable = cctx.events().isRecordable(EVT_CACHE_OBJECT_READ);

                if (readRecordable) hasOldVal = entry.hasValue();
              }

              GridCacheVersion dhtVer = res.dhtVersion(i);
              GridCacheVersion mappedVer = res.mappedVersion(i);

              if (newVal == null) {
                if (oldValTup != null) {
                  if (oldValTup.get1().equals(dhtVer)) {
                    newVal = oldValTup.get2();

                    newBytes = oldValTup.get3();
                  }

                  oldVal = oldValTup.get2();
                }
              }

              // Lock is held at this point, so we can set the
              // returned value if any.
              entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

              if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                boolean pass = res.filterResult(i);

                tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
              }

              entry.readyNearLock(
                  lockVer,
                  mappedVer,
                  res.committedVersions(),
                  res.rolledbackVersions(),
                  res.pending());

              if (retval) {
                if (readRecordable)
                  cctx.events()
                      .addEvent(
                          entry.partition(),
                          entry.key(),
                          tx,
                          null,
                          EVT_CACHE_OBJECT_READ,
                          newVal,
                          newVal != null || newBytes != null,
                          oldVal,
                          hasOldVal,
                          CU.subjectId(tx, cctx));

                cctx.cache().metrics0().onRead(false);
              }

              if (log.isDebugEnabled())
                log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']');

              break; // Inner while loop.
            } catch (GridCacheEntryRemovedException ignored) {
              if (log.isDebugEnabled())
                log.debug("Failed to add candidates because entry was removed (will renew).");

              // Replace old entry with new one.
              entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
            } catch (GridException e) {
              onDone(e);

              return;
            }
          }

          i++;
        }

        try {
          proceedMapping(mappings);
        } catch (GridException e) {
          onDone(e);
        }

        onDone(true);
      }
    }