/** @param keyMap Key map to register. */ void addKeyMapping(Map<GridRichNode, Collection<K>> keyMap) { for (Map.Entry<GridRichNode, Collection<K>> mapping : keyMap.entrySet()) { GridRichNode n = mapping.getKey(); for (K key : mapping.getValue()) { GridCacheTxEntry<K, V> txEntry = txMap.get(key); assert txEntry != null; GridDistributedTxMapping<K, V> m = mappings.get(n.id()); if (m == null) mappings.put(n.id(), m = new GridDistributedTxMapping<K, V>(n)); txEntry.nodeId(n.id()); m.add(txEntry); } } if (log.isDebugEnabled()) log.debug( "Added mappings to transaction [locId=" + cctx.nodeId() + ", mappings=" + keyMap + ", tx=" + this + ']'); }
/** {@inheritDoc} */ @Override public boolean onDone(GridCacheTx tx, Throwable err) { if ((initialized() || err != null) && super.onDone(tx, err)) { if (error() instanceof GridCacheTxHeuristicException) { long topVer = this.tx.topologyVersion(); for (GridCacheTxEntry<K, V> e : this.tx.writeMap().values()) { try { if (e.op() != NOOP && !cctx.affinity().localNode(e.key(), topVer)) { GridCacheEntryEx<K, V> cacheEntry = cctx.cache().peekEx(e.key()); if (cacheEntry != null) cacheEntry.invalidate(null, this.tx.xidVersion()); } } catch (Throwable t) { U.error(log, "Failed to invalidate entry.", t); if (t instanceof Error) throw (Error) t; } } } // Don't forget to clean up. cctx.mvcc().removeFuture(this); return true; } return false; }
/** * @param entry Transaction entry. * @param nodes Nodes. */ private void map(GridCacheTxEntry<K, V> entry, Collection<GridRichNode> nodes) { GridRichNode primary = CU.primary0(cctx.affinity(entry.key(), nodes)); GridDistributedTxMapping<K, V> t = mappings.get(primary.id()); if (t == null) mappings.put(primary.id(), t = new GridDistributedTxMapping<K, V>(primary)); t.add(entry); }
/** * Adds entry to future. * * @param topVer Topology version. * @param entry Entry to add. * @param dhtNodeId DHT node ID. * @return Lock candidate. * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable private GridCacheMvccCandidate<K> addEntry( long topVer, GridNearCacheEntry<K, V> entry, UUID dhtNodeId) throws GridCacheEntryRemovedException { // Check if lock acquisition is timed out. if (timedOut) return null; // Add local lock first, as it may throw GridCacheEntryRemovedException. GridCacheMvccCandidate<K> c = entry.addNearLocal( dhtNodeId, threadId, lockVer, timeout, !inTx(), inTx(), implicitSingleTx()); if (inTx()) { GridCacheTxEntry<K, V> txEntry = tx.entry(entry.key()); txEntry.cached(entry, txEntry.keyBytes()); } if (c != null) c.topologyVersion(topVer); synchronized (mux) { entries.add(entry); } if (c == null && timeout < 0) { if (log.isDebugEnabled()) log.debug("Failed to acquire lock with negative timeout: " + entry); onFailed(false); return null; } // Double check if lock acquisition has already timed out. if (timedOut) { entry.removeLock(lockVer); return null; } return c; }
/** * @param mapping Mapping to order. * @param committedVers Committed versions. * @param rolledbackVers Rolled back versions. */ void orderCompleted( GridDistributedTxMapping<K, V> mapping, Collection<GridCacheVersion> committedVers, Collection<GridCacheVersion> rolledbackVers) { for (GridCacheTxEntry<K, V> txEntry : F.concat(false, mapping.reads(), mapping.writes())) { while (true) { GridDistributedCacheEntry<K, V> entry = (GridDistributedCacheEntry<K, V>) txEntry.cached(); try { // Handle explicit locks. GridCacheVersion base = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer; entry.doneRemote(xidVer, base, committedVers, rolledbackVers); if (ec()) entry.recheck(); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null; if (log.isDebugEnabled()) log.debug( "Replacing obsolete entry in remote transaction [entry=" + entry + ", tx=" + this + ']'); // Replace the entry. txEntry.cached(cctx.cache().entryEx(txEntry.key()), entry.keyBytes()); } } } }
/** * @param key Key. * @return Mapping for the key. */ @Nullable UUID mapping(K key) { GridCacheTxEntry<K, V> txEntry = txMap.get(key); return txEntry == null ? null : txEntry.nodeId(); }
/** * @param nodeId Primary node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. * @throws GridDistributedLockCancelledException If lock has been cancelled. */ @SuppressWarnings({"RedundantTypeArguments"}) @Nullable public GridNearTxRemote<K, V> startRemoteTxForFinish( UUID nodeId, GridDhtTxFinishRequest<K, V> req) throws GridException, GridDistributedLockCancelledException { GridNearTxRemote<K, V> tx = null; ClassLoader ldr = ctx.deploy().globalLoader(); if (ldr != null) { for (GridCacheTxEntry<K, V> txEntry : req.nearWrites()) { GridDistributedCacheEntry<K, V> entry = null; while (true) { try { entry = peekExx(txEntry.key()); if (entry != null) { entry.keyBytes(txEntry.keyBytes()); // Handle implicit locks for pessimistic transactions. tx = ctx.tm().tx(req.version()); if (tx != null) { if (tx.local()) return null; if (tx.markFinalizing()) tx.addWrite(txEntry.key(), txEntry.keyBytes()); else return null; } else { tx = new GridNearTxRemote<K, V>( nodeId, req.nearNodeId(), req.threadId(), req.version(), null, PESSIMISTIC, req.isolation(), req.isInvalidate(), 0, txEntry.key(), txEntry.keyBytes(), txEntry.value(), txEntry.valueBytes(), ctx); if (tx.empty()) return tx; tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + req.version()); if (!tx.markFinalizing()) return null; } // Add remote candidate before reordering. if (txEntry.explicitVersion() == null) entry.addRemote( req.nearNodeId(), nodeId, req.threadId(), req.version(), 0, tx.ec(), /*tx*/ true, tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( Collections.<GridCacheMvccCandidate<K>>emptyList(), req.version(), req.committedVersions(), req.rolledbackVersions()); } // Double-check in case if sender node left the grid. if (ctx.discovery().node(req.nearNodeId()) == null) { if (log.isDebugEnabled()) log.debug("Node requesting lock left grid (lock request will be ignored): " + req); if (tx != null) tx.rollback(); return null; } // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug("Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class loader for message: " + req; U.warn(log, err); throw new GridException(err); } return tx; }
/** * @param nodeId Sender node ID. * @param req Finish transaction message. */ @SuppressWarnings({"CatchGenericClass"}) private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) { assert nodeId != null; assert req != null; GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version()); try { ClassLoader ldr = ctx.deploy().globalLoader(); if (req.commit()) { // If lock was acquired explicitly. if (tx == null) { // Create transaction and add entries. tx = ctx.tm() .onCreated( new GridReplicatedTxRemote<K, V>( ldr, nodeId, req.threadId(), req.version(), req.commitVersion(), PESSIMISTIC, READ_COMMITTED, req.isInvalidate(), /*timeout */ 0, /*read entries*/ null, req.writes(), ctx)); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Attempt to start a completed " + "transaction: " + req); } else { boolean set = tx.commitVersion(req.commitVersion()); assert set; } Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes(); if (!F.isEmpty(writeEntries)) { // In OPTIMISTIC mode, we get the values at PREPARE stage. assert tx.concurrency() == PESSIMISTIC; for (GridCacheTxEntry<K, V> entry : writeEntries) { // Unmarshal write entries. entry.unmarshal(ctx, ldr); if (log.isDebugEnabled()) log.debug( "Unmarshalled transaction entry from pessimistic transaction [key=" + entry.key() + ", value=" + entry.value() + ", tx=" + tx + ']'); if (!tx.setWriteValue(entry)) U.warn( log, "Received entry to commit that was not present in transaction [entry=" + entry + ", tx=" + tx + ']'); } } // Add completed versions. tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); if (tx.pessimistic()) tx.prepare(); tx.commit(); } else if (tx != null) { tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); tx.rollback(); } if (req.replyRequired()) { GridCacheMessage<K, V> res = new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId()); try { ctx.io().send(nodeId, res); } catch (Throwable e) { // Double-check. if (ctx.discovery().node(nodeId) == null) { if (log.isDebugEnabled()) log.debug( "Node left while sending finish response [nodeId=" + nodeId + ", res=" + res + ']'); } else U.error( log, "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']', e); } } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Attempted to start a completed transaction (will ignore): " + e); } catch (Throwable e) { U.error( log, "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']', e); if (tx != null) tx.rollback(); } }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }