/** * Completeness callback. * * @param success {@code True} if lock was acquired. * @param distribute {@code True} if need to distribute lock removal in case of failure. * @return {@code True} if complete by this operation. */ private boolean onComplete(boolean success, boolean distribute) { if (log.isDebugEnabled()) log.debug( "Received onComplete(..) callback [success=" + success + ", distribute=" + distribute + ", fut=" + this + ']'); if (!success) undoLocks(distribute); if (tx != null) cctx.tm().txContext(tx); if (super.onDone(success, err.get())) { if (log.isDebugEnabled()) log.debug("Completing future: " + this); // Clean up. cctx.mvcc().removeFuture(this); if (timeoutObj != null) cctx.time().removeTimeoutObject(timeoutObj); return true; } return false; }
/** {@inheritDoc} */ @Override public boolean onDone(GridCacheTx tx, Throwable err) { if ((initialized() || err != null) && super.onDone(tx, err)) { if (error() instanceof GridCacheTxHeuristicException) { long topVer = this.tx.topologyVersion(); for (GridCacheTxEntry<K, V> e : this.tx.writeMap().values()) { try { if (e.op() != NOOP && !cctx.affinity().localNode(e.key(), topVer)) { GridCacheEntryEx<K, V> cacheEntry = cctx.cache().peekEx(e.key()); if (cacheEntry != null) cacheEntry.invalidate(null, this.tx.xidVersion()); } } catch (Throwable t) { U.error(log, "Failed to invalidate entry.", t); if (t instanceof Error) throw (Error) t; } } } // Don't forget to clean up. cctx.mvcc().removeFuture(this); return true; } return false; }
/** {@inheritDoc} */ @Override public boolean onDone(GridCacheTx tx, Throwable err) { if (initialized() && super.onDone(tx, err)) { // Don't forget to clean up. cctx.mvcc().removeFuture(this); return true; } return false; }
/** * Undoes all locks. * * @param dist If {@code true}, then remove locks from remote nodes as well. */ private void undoLocks(boolean dist) { // Transactions will undo during rollback. if (dist && tx == null) cctx.nearTx().removeLocks(lockVer, keys); else { if (tx != null) { if (tx.setRollbackOnly()) { if (log.isDebugEnabled()) log.debug( "Marked transaction as rollback only because locks could not be acquired: " + tx); } else if (log.isDebugEnabled()) log.debug( "Transaction was not marked rollback-only while locks were not acquired: " + tx); } for (GridCacheEntryEx<K, V> e : entriesCopy()) { try { e.removeLock(lockVer); } catch (GridCacheEntryRemovedException ignored) { while (true) { try { e = cctx.cache().peekEx(e.key()); if (e != null) e.removeLock(lockVer); break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock on removed entry (will retry) [ver=" + lockVer + ", entry=" + e + ']'); } } } } } cctx.mvcc().recheckPendingLocks(); }
/** * Basically, future mapping consists from two parts. First, we must determine the topology * version this future will map on. Locking is performed within a user transaction, we must * continue to map keys on the same topology version as it started. If topology version is * undefined, we get current topology future and wait until it completes so the topology is ready * to use. * * <p>During the second part we map keys to primary nodes using topology snapshot we obtained * during the first part. Note that if primary node leaves grid, the future will fail and * transaction will be rolled back. */ void map() { // Obtain the topology version to use. GridDiscoveryTopologySnapshot snapshot = tx != null ? tx.topologySnapshot() : cctx.mvcc().lastExplicitLockTopologySnapshot(Thread.currentThread().getId()); if (snapshot != null) { // Continue mapping on the same topology version as it was before. topSnapshot.compareAndSet(null, snapshot); map(keys); markInitialized(); return; } // Must get topology snapshot and map on that version. mapOnTopology(); }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }