/** @return Reentry candidate. */ public GridCacheMvccCandidate<K> reenter() { GridCacheMvccCandidate<K> old = reentry; GridCacheMvccCandidate<K> reentry = new GridCacheMvccCandidate<>( parent, nodeId, otherNodeId, otherVer, threadId, ver, timeout, local(), /*reentry*/ true, tx(), singleImplicit(), nearLocal(), dhtLocal()); reentry.topVer = topVer; if (old != null) reentry.reentry = old; this.reentry = reentry; return reentry; }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public boolean equals(Object o) { if (o == null) return false; if (o == this) return true; GridCacheMvccCandidate<K> other = (GridCacheMvccCandidate<K>) o; assert key() != null && other.key() != null : "Key is null [this=" + this + ", other=" + o + ']'; return ver.equals(other.ver) && key().equals(other.key()); }
/** {@inheritDoc} */ @Override public String toString() { GridCacheMvccCandidate<?> prev = previous(); GridCacheMvccCandidate<?> next = next(); return S.toString( GridCacheMvccCandidate.class, this, "key", parent == null ? null : parent.key(), "masks", Mask.toString(flags()), "prevVer", (prev == null ? null : prev.version()), "nextVer", (next == null ? null : next.version())); }
/** * Callback for whenever entry lock ownership changes. * * @param entry Entry whose lock ownership changed. */ @Override public boolean onOwnerChanged(GridCacheEntryEx<K, V> entry, GridCacheMvccCandidate<K> owner) { if (owner != null && owner.version().equals(lockVer)) { onDone(true); return true; } return false; }
/** {@inheritDoc} */ @Override public int compareTo(GridCacheMvccCandidate<K> o) { if (o == this) return 0; int c = ver.compareTo(o.ver); // This is done, so compare and equals methods will be consistent. if (c == 0) return key().equals(o.key()) ? 0 : id < o.id ? -1 : 1; return c; }
/** * Adds entry to future. * * @param topVer Topology version. * @param entry Entry to add. * @param dhtNodeId DHT node ID. * @return Lock candidate. * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable private GridCacheMvccCandidate<K> addEntry( long topVer, GridNearCacheEntry<K, V> entry, UUID dhtNodeId) throws GridCacheEntryRemovedException { // Check if lock acquisition is timed out. if (timedOut) return null; // Add local lock first, as it may throw GridCacheEntryRemovedException. GridCacheMvccCandidate<K> c = entry.addNearLocal( dhtNodeId, threadId, lockVer, timeout, !inTx(), inTx(), implicitSingleTx()); if (inTx()) { GridCacheTxEntry<K, V> txEntry = tx.entry(entry.key()); txEntry.cached(entry, txEntry.keyBytes()); } if (c != null) c.topologyVersion(topVer); synchronized (mux) { entries.add(entry); } if (c == null && timeout < 0) { if (log.isDebugEnabled()) log.debug("Failed to acquire lock with negative timeout: " + entry); onFailed(false); return null; } // Double check if lock acquisition has already timed out. if (timedOut) { entry.removeLock(lockVer); return null; } return c; }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }