/** Initializes future. */ @SuppressWarnings({"unchecked"}) void finish() { if (tx.onePhaseCommit()) { // No need to send messages as transaction was already committed on remote node. markInitialized(); return; } if (mappings != null) { finish(mappings.values()); markInitialized(); if (!isSync()) { boolean complete = true; for (GridFuture<?> f : pending()) // Mini-future in non-sync mode gets done when message gets sent. if (isMini(f) && !f.isDone()) complete = false; if (complete) onComplete(); } } else { assert !commit; try { tx.rollback(); } catch (GridException e) { U.error(log, "Failed to rollback empty transaction: " + tx, e); } markInitialized(); } }
/** @param m Mapping. */ @SuppressWarnings({"unchecked"}) private void finish(GridDistributedTxMapping<K, V> m) { GridRichNode n = m.node(); assert !m.empty(); GridNearTxFinishRequest req = new GridNearTxFinishRequest<K, V>( futId, tx.xidVersion(), tx.commitVersion(), tx.threadId(), commit, tx.isInvalidate(), m.explicitLock(), tx.topologyVersion(), null, null, null, commit && tx.pessimistic() ? m.writes() : null, tx.syncCommit() && commit || tx.syncRollback() && !commit); // If this is the primary node for the keys. if (n.isLocal()) { req.miniId(GridUuid.randomUuid()); if (CU.DHT_ENABLED) { GridFuture<GridCacheTx> fut = commit ? dht().commitTx(n.id(), req) : dht().rollbackTx(n.id(), req); // Add new future. add(fut); } else // Add done future for testing. add(new GridFinishedFuture<GridCacheTx>(ctx)); } else { MiniFuture fut = new MiniFuture(m); req.miniId(fut.futureId()); add(fut); // Append new future. try { cctx.io().send(n, req); // If we don't wait for result, then mark future as done. if (!isSync() && !m.explicitLock()) fut.onDone(); } catch (GridTopologyException e) { // Remove previous mapping. mappings.remove(m.node().id()); fut.onResult(e); } catch (GridException e) { // Fail the whole thing. fut.onResult(e); } } }
/** * @param mapping Mappings. * @param key Key to map. * @param topVer Topology version. * @return Near lock mapping. * @throws GridException If mapping for key failed. */ private GridNearLockMapping<K, V> map( K key, @Nullable GridNearLockMapping<K, V> mapping, long topVer) throws GridException { assert mapping == null || mapping.node() != null; GridNode primary = cctx.affinity().primary(key, topVer); if (cctx.discovery().node(primary.id()) == null) // If primary node left the grid before lock acquisition, fail the whole future. throw newTopologyException(null, primary.id()); if (inTx() && tx.groupLock() && !primary.isLocal()) throw new GridException( "Failed to start group lock transaction (local node is not primary for " + " key) [key=" + key + ", primaryNodeId=" + primary.id() + ']'); if (mapping == null || !primary.id().equals(mapping.node().id())) mapping = new GridNearLockMapping<>(primary, key); else mapping.addKey(key); return mapping; }
/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } onComplete(); } }
/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) { if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } } else if (tx.implicit() && tx.isSystemInvalidate()) { // Finish implicit transaction on heuristic error. try { tx.close(); } catch (GridException ex) { U.error(log, "Failed to invalidate transaction: " + tx, ex); } } onComplete(); } }
/** * @param cctx Registry. * @param keys Keys to lock. * @param tx Transaction. * @param read Read flag. * @param retval Flag to return value or not. * @param timeout Lock acquisition timeout. * @param filter Filter. */ public GridNearLockFuture( GridCacheContext<K, V> cctx, Collection<? extends K> keys, @Nullable GridNearTxLocal<K, V> tx, boolean read, boolean retval, long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null; this.cctx = cctx; this.keys = keys; this.tx = tx; this.read = read; this.retval = retval; this.timeout = timeout; this.filter = filter; threadId = tx == null ? Thread.currentThread().getId() : tx.threadId(); lockVer = tx != null ? tx.xidVersion() : cctx.versions().next(); futId = GridUuid.randomUuid(); entries = new ArrayList<>(keys.size()); log = U.logger(ctx, logRef, GridNearLockFuture.class); if (timeout > 0) { timeoutObj = new LockTimeoutObject(); cctx.time().addTimeoutObject(timeoutObj); } valMap = new ConcurrentHashMap8<>(keys.size(), 1f); }
/** * Acquires topology future and checks it completeness under the read lock. If it is not complete, * will asynchronously wait for it's completeness and then try again. */ void mapOnTopology() { // We must acquire topology snapshot from the topology version future. try { cctx.topology().readLock(); try { GridDhtTopologyFuture fut = cctx.topologyVersionFuture(); if (fut.isDone()) { GridDiscoveryTopologySnapshot snapshot = fut.topologySnapshot(); if (tx != null) { tx.topologyVersion(snapshot.topologyVersion()); tx.topologySnapshot(snapshot); } topSnapshot.compareAndSet(null, snapshot); map(keys); markInitialized(); } else { fut.listenAsync( new CI1<GridFuture<Long>>() { @Override public void apply(GridFuture<Long> t) { mapOnTopology(); } }); } } finally { cctx.topology().readUnlock(); } } catch (GridException e) { onDone(e); } }
/** * @param cctx Context. * @param tx Transaction. * @param commit Commit flag. */ public GridNearTxFinishFuture( GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) { super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx)); assert cctx != null; this.cctx = cctx; this.tx = tx; this.commit = commit; mappings = tx.mappings(); futId = GridUuid.randomUuid(); log = U.logger(ctx, logRef, GridNearTxFinishFuture.class); }
/** * Adds entry to future. * * @param topVer Topology version. * @param entry Entry to add. * @param dhtNodeId DHT node ID. * @return Lock candidate. * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable private GridCacheMvccCandidate<K> addEntry( long topVer, GridNearCacheEntry<K, V> entry, UUID dhtNodeId) throws GridCacheEntryRemovedException { // Check if lock acquisition is timed out. if (timedOut) return null; // Add local lock first, as it may throw GridCacheEntryRemovedException. GridCacheMvccCandidate<K> c = entry.addNearLocal( dhtNodeId, threadId, lockVer, timeout, !inTx(), inTx(), implicitSingleTx()); if (inTx()) { GridCacheTxEntry<K, V> txEntry = tx.entry(entry.key()); txEntry.cached(entry, txEntry.keyBytes()); } if (c != null) c.topologyVersion(topVer); synchronized (mux) { entries.add(entry); } if (c == null && timeout < 0) { if (log.isDebugEnabled()) log.debug("Failed to acquire lock with negative timeout: " + entry); onFailed(false); return null; } // Double check if lock acquisition has already timed out. if (timedOut) { entry.removeLock(lockVer); return null; } return c; }
/** * Undoes all locks. * * @param dist If {@code true}, then remove locks from remote nodes as well. */ private void undoLocks(boolean dist) { // Transactions will undo during rollback. if (dist && tx == null) cctx.nearTx().removeLocks(lockVer, keys); else { if (tx != null) { if (tx.setRollbackOnly()) { if (log.isDebugEnabled()) log.debug( "Marked transaction as rollback only because locks could not be acquired: " + tx); } else if (log.isDebugEnabled()) log.debug( "Transaction was not marked rollback-only while locks were not acquired: " + tx); } for (GridCacheEntryEx<K, V> e : entriesCopy()) { try { e.removeLock(lockVer); } catch (GridCacheEntryRemovedException ignored) { while (true) { try { e = cctx.cache().peekEx(e.key()); if (e != null) e.removeLock(lockVer); break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock on removed entry (will retry) [ver=" + lockVer + ", entry=" + e + ']'); } } } } } cctx.mvcc().recheckPendingLocks(); }
/** * Basically, future mapping consists from two parts. First, we must determine the topology * version this future will map on. Locking is performed within a user transaction, we must * continue to map keys on the same topology version as it started. If topology version is * undefined, we get current topology future and wait until it completes so the topology is ready * to use. * * <p>During the second part we map keys to primary nodes using topology snapshot we obtained * during the first part. Note that if primary node leaves grid, the future will fail and * transaction will be rolled back. */ void map() { // Obtain the topology version to use. GridDiscoveryTopologySnapshot snapshot = tx != null ? tx.topologySnapshot() : cctx.mvcc().lastExplicitLockTopologySnapshot(Thread.currentThread().getId()); if (snapshot != null) { // Continue mapping on the same topology version as it was before. topSnapshot.compareAndSet(null, snapshot); map(keys); markInitialized(); return; } // Must get topology snapshot and map on that version. mapOnTopology(); }
/** {@inheritDoc} */ @Override public GridCacheVersion version() { return tx.xidVersion(); }
/** @return {@code True} if implicit-single-tx flag is set. */ private boolean implicitSingleTx() { return tx != null && tx.implicitSingle(); }
/** @return {@code True} if transaction is not {@code null} and has invalidate flag set. */ private boolean isInvalidate() { return tx != null && tx.isInvalidate(); }
/** @return Synchronous flag. */ private boolean isSync() { return tx.syncCommit() && commit || tx.syncRollback() && !commit; }
/** @return {@code True} if commit is synchronous. */ private boolean syncCommit() { return tx != null && tx.syncCommit(); }
/** * Gets next near lock mapping and either acquires dht locks locally or sends near lock request to * remote primary node. * * @param mappings Queue of mappings. * @throws GridException If mapping can not be completed. */ private void proceedMapping(final ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings) throws GridException { GridNearLockMapping<K, V> map = mappings.poll(); // If there are no more mappings to process, complete the future. if (map == null) return; final GridNearLockRequest<K, V> req = map.request(); final Collection<K> mappedKeys = map.distributedKeys(); final GridNode node = map.node(); if (filter != null && filter.length != 0) req.filter(filter, cctx); if (node.isLocal()) { req.miniId(GridUuid.randomUuid()); if (log.isDebugEnabled()) log.debug("Before locally locking near request: " + req); GridFuture<GridNearLockResponse<K, V>> fut; if (CU.DHT_ENABLED) fut = dht().lockAllAsync(cctx.localNode(), req, filter); else { // Create dummy values for testing. GridNearLockResponse<K, V> res = new GridNearLockResponse<>(lockVer, futId, null, false, 1, null); res.addValueBytes(null, null, true, lockVer, lockVer, cctx); fut = new GridFinishedFuture<>(ctx, res); } // Add new future. add( new GridEmbeddedFuture<>( cctx.kernalContext(), fut, new C2<GridNearLockResponse<K, V>, Exception, Boolean>() { @Override public Boolean apply(GridNearLockResponse<K, V> res, Exception e) { if (CU.isLockTimeoutOrCancelled(e) || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false; if (e != null) { onError(e); return false; } if (res == null) { onError(new GridException("Lock response is null for future: " + this)); return false; } if (res.error() != null) { onError(res.error()); return false; } if (log.isDebugEnabled()) log.debug( "Acquired lock for local DHT mapping [locId=" + cctx.nodeId() + ", mappedKeys=" + mappedKeys + ", fut=" + GridNearLockFuture.this + ']'); try { int i = 0; for (K k : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, req.topologyVersion()); try { GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key()); boolean hasBytes = entry.hasValue(); V oldVal = entry.rawGet(); V newVal = res.value(i); byte[] newBytes = res.valueBytes(i); GridCacheVersion dhtVer = res.dhtVersion(i); GridCacheVersion mappedVer = res.mappedVersion(i); // On local node don't record twice if DHT cache already recorded. boolean record = retval && oldValTup != null && oldValTup.get1().equals(dhtVer); if (newVal == null) { if (oldValTup != null) { if (oldValTup.get1().equals(dhtVer)) { newVal = oldValTup.get2(); newBytes = oldValTup.get3(); } oldVal = oldValTup.get2(); } } // Lock is held at this point, so we can set the // returned value if any. entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id()); entry.readyNearLock( lockVer, mappedVer, res.committedVersions(), res.rolledbackVersions(), res.pending()); if (inTx() && implicitTx() && tx.onePhaseCommit()) { boolean pass = res.filterResult(i); tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse()); } if (record) { if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ)) cctx.events() .addEvent( entry.partition(), entry.key(), tx, null, EVT_CACHE_OBJECT_READ, newVal, newVal != null, oldVal, hasBytes, CU.subjectId(tx, cctx)); cctx.cache().metrics0().onRead(oldVal != null); } if (log.isDebugEnabled()) log.debug( "Processed response for entry [res=" + res + ", entry=" + entry + ']'); break; // Inner while loop. } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Failed to add candidates because entry was " + "removed (will renew)."); // Replace old entry with new one. entries.set( i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key())); } } i++; // Increment outside of while loop. } // Proceed and add new future (if any) before completing embedded future. proceedMapping(mappings); } catch (GridException ex) { onError(ex); return false; } return true; } })); } else { final MiniFuture fut = new MiniFuture(node, mappedKeys, mappings); req.miniId(fut.futureId()); add(fut); // Append new future. GridFuture<?> txSync = null; if (inTx()) txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId()); if (txSync == null || txSync.isDone()) { try { if (log.isDebugEnabled()) log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']'); cctx.io().send(node, req); } catch (GridTopologyException ex) { assert fut != null; fut.onResult(ex); } } else { txSync.listenAsync( new CI1<GridFuture<?>>() { @Override public void apply(GridFuture<?> t) { try { if (log.isDebugEnabled()) log.debug( "Sending near lock request [node=" + node.id() + ", req=" + req + ']'); cctx.io().send(node, req); } catch (GridTopologyException ex) { assert fut != null; fut.onResult(ex); } catch (GridException e) { onError(e); } } }); } } }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }
/** @return {@code True} if rollback is synchronous. */ private boolean syncRollback() { return tx != null && tx.syncRollback(); }
/** @return Transaction isolation or {@code null} if no transaction. */ @Nullable private GridCacheTxIsolation isolation() { return tx == null ? null : tx.isolation(); }
/** @return {@code true} if related transaction is implicit. */ private boolean implicitTx() { return tx != null && tx.implicit(); }