/** * @param reads Read entries. * @param writes Write entries. */ @SuppressWarnings({"unchecked"}) private void prepare( Iterable<GridCacheTxEntry<K, V>> reads, Iterable<GridCacheTxEntry<K, V>> writes) { Collection<GridRichNode> nodes = CU.allNodes(cctx); // Assign keys to primary nodes. for (GridCacheTxEntry<K, V> read : reads) map(read, nodes); for (GridCacheTxEntry<K, V> write : writes) map(write, nodes); // Create mini futures. for (GridDistributedTxMapping<K, V> m : mappings.values()) finish(m); }
/** * @param cctx Cache context. * @param prj Projection (optional). * @return Collection of data nodes in provided projection (if any). */ private static Collection<GridNode> nodes( final GridCacheContext<?, ?> cctx, @Nullable final GridProjection prj) { assert cctx != null; return F.view( CU.allNodes(cctx), new P1<GridNode>() { @Override public boolean apply(GridNode n) { GridCacheDistributionMode mode = U.distributionMode(n, cctx.name()); return (mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED) && (prj == null || prj.node(n.id()) != null); } }); }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * @param e Transaction entry. * @return {@code True} if entry is locally mapped as a primary or back up node. */ protected boolean isNearLocallyMapped(GridCacheEntryEx<K, V> e) { return F.contains(ctx.affinity(e.key(), CU.allNodes(ctx)), ctx.localNode()); }
/** * @param nodeId Reader to add. * @param msgId Message ID. * @return Future for all relevant transactions that were active at the time of adding reader, or * {@code null} if reader was added * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable public GridFuture<Boolean> addReader(UUID nodeId, long msgId) throws GridCacheEntryRemovedException { // Don't add local node as reader. if (cctx.nodeId().equals(nodeId)) return null; GridNode node = cctx.discovery().node(nodeId); // If remote node has no near cache, don't add it. if (node == null || !U.hasNearCache(node, cctx.dht().near().name())) return null; // If remote node is (primary?) or back up, don't add it as a reader. if (U.nodeIds(cctx.affinity(partition(), CU.allNodes(cctx))).contains(nodeId)) return null; boolean ret = false; GridCacheMultiTxFuture<K, V> txFut; Collection<GridCacheMvccCandidate<K>> cands = null; synchronized (mux) { checkObsolete(); txFut = this.txFut; ReaderId reader = readerId(nodeId); if (reader == null) { reader = new ReaderId(nodeId, msgId); readers = new LinkedList<ReaderId>(readers); readers.add(reader); // Seal. readers = Collections.unmodifiableList(readers); txFut = this.txFut = new GridCacheMultiTxFuture<K, V>(cctx); cands = localCandidates(); ret = true; } else { long id = reader.messageId(); if (id < msgId) reader.messageId(msgId); } } if (ret) { assert txFut != null; if (!F.isEmpty(cands)) { for (GridCacheMvccCandidate<K> c : cands) { GridCacheTxEx<K, V> tx = cctx.tm().<GridCacheTxEx<K, V>>tx(c.version()); if (tx != null) { assert tx.local(); txFut.addTx(tx); } } } txFut.init(); if (!txFut.isDone()) { txFut.listenAsync( new CI1<GridFuture<?>>() { @Override public void apply(GridFuture<?> f) { synchronized (mux) { // Release memory. GridDhtCacheEntry.this.txFut = null; } } }); } else // Release memory. txFut = this.txFut = null; } return txFut; }