/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) { if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } } else if (tx.implicit() && tx.isSystemInvalidate()) { // Finish implicit transaction on heuristic error. try { tx.close(); } catch (GridException ex) { U.error(log, "Failed to invalidate transaction: " + tx, ex); } } onComplete(); } }
/** Initializes future. */ @SuppressWarnings({"unchecked"}) void finish() { if (mappings != null) { finish(mappings.values()); markInitialized(); if (!isSync()) { boolean complete = true; for (GridFuture<?> f : pending()) // Mini-future in non-sync mode gets done when message gets sent. if (isMini(f) && !f.isDone()) complete = false; if (complete) onComplete(); } } else { assert !commit; try { tx.rollback(); } catch (GridException e) { U.error(log, "Failed to rollback empty transaction: " + tx, e); } markInitialized(); } }
/** {@inheritDoc} */ @Override public boolean onDone(GridCacheTx tx, Throwable err) { if ((initialized() || err != null) && super.onDone(tx, err)) { if (error() instanceof GridCacheTxHeuristicException) { long topVer = this.tx.topologyVersion(); for (GridCacheTxEntry<K, V> e : this.tx.writeMap().values()) { try { if (e.op() != NOOP && !cctx.affinity().localNode(e.key(), topVer)) { GridCacheEntryEx<K, V> cacheEntry = cctx.cache().peekEx(e.key()); if (cacheEntry != null) cacheEntry.invalidate(null, this.tx.xidVersion()); } } catch (Throwable t) { U.error(log, "Failed to invalidate entry.", t); if (t instanceof Error) throw (Error) t; } } } // Don't forget to clean up. cctx.mvcc().removeFuture(this); return true; } return false; }
/** * @param nodeId Sender. * @param res Result. */ void onResult(UUID nodeId, GridNearLockResponse<K, V> res) { if (!isDone()) { if (log.isDebugEnabled()) log.debug( "Received lock response from node [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); for (GridFuture<Boolean> fut : pending()) { if (isMini(fut)) { MiniFuture mini = (MiniFuture) fut; if (mini.futureId().equals(res.miniId())) { assert mini.node().id().equals(nodeId); if (log.isDebugEnabled()) log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']'); mini.onResult(res); if (log.isDebugEnabled()) log.debug( "Future after processed lock response [fut=" + this + ", mini=" + mini + ", res=" + res + ']'); return; } } } U.warn( log, "Failed to find mini future for response (perhaps due to stale message) [res=" + res + ", fut=" + this + ']'); } else if (log.isDebugEnabled()) log.debug( "Ignoring lock response from node (future is done) [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); }
/** * @param cctx Context. * @param tx Transaction. * @param failedNodeId ID of failed node started transaction. */ @SuppressWarnings("ConstantConditions") public GridCachePessimisticCheckCommittedTxFuture( GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) { super(cctx.kernalContext(), new SingleReducer<K, V>()); this.cctx = cctx; this.tx = tx; this.failedNodeId = failedNodeId; log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class); nodes = new GridLeanMap<>(); for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node); }
/** * @param cctx Context. * @param tx Transaction. * @param commit Commit flag. */ public GridNearTxFinishFuture( GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) { super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx)); assert cctx != null; this.cctx = cctx; this.tx = tx; this.commit = commit; mappings = tx.mappings(); futId = GridUuid.randomUuid(); log = U.logger(ctx, logRef, GridNearTxFinishFuture.class); }
/** @param e Error. */ void onResult(Throwable e) { if (rcvRes.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']'); // Fail. onDone(e); } else U.warn( log, "Received error after another result has been processed [fut=" + GridNearLockFuture.this + ", mini=" + this + ']', e); }
/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } onComplete(); } }
/** * @param cctx Registry. * @param keys Keys to lock. * @param tx Transaction. * @param read Read flag. * @param retval Flag to return value or not. * @param timeout Lock acquisition timeout. * @param filter Filter. */ public GridNearLockFuture( GridCacheContext<K, V> cctx, Collection<? extends K> keys, @Nullable GridNearTxLocal<K, V> tx, boolean read, boolean retval, long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null; this.cctx = cctx; this.keys = keys; this.tx = tx; this.read = read; this.retval = retval; this.timeout = timeout; this.filter = filter; threadId = tx == null ? Thread.currentThread().getId() : tx.threadId(); lockVer = tx != null ? tx.xidVersion() : cctx.versions().next(); futId = GridUuid.randomUuid(); entries = new ArrayList<>(keys.size()); log = U.logger(ctx, logRef, GridNearLockFuture.class); if (timeout > 0) { timeoutObj = new LockTimeoutObject(); cctx.time().addTimeoutObject(timeoutObj); } valMap = new ConcurrentHashMap8<>(keys.size(), 1f); }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * @param nodeId Primary node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. * @throws GridDistributedLockCancelledException If lock has been cancelled. */ @SuppressWarnings({"RedundantTypeArguments"}) @Nullable public GridNearTxRemote<K, V> startRemoteTxForFinish( UUID nodeId, GridDhtTxFinishRequest<K, V> req) throws GridException, GridDistributedLockCancelledException { GridNearTxRemote<K, V> tx = null; ClassLoader ldr = ctx.deploy().globalLoader(); if (ldr != null) { for (GridCacheTxEntry<K, V> txEntry : req.nearWrites()) { GridDistributedCacheEntry<K, V> entry = null; while (true) { try { entry = peekExx(txEntry.key()); if (entry != null) { entry.keyBytes(txEntry.keyBytes()); // Handle implicit locks for pessimistic transactions. tx = ctx.tm().tx(req.version()); if (tx != null) { if (tx.local()) return null; if (tx.markFinalizing()) tx.addWrite(txEntry.key(), txEntry.keyBytes()); else return null; } else { tx = new GridNearTxRemote<K, V>( nodeId, req.nearNodeId(), req.threadId(), req.version(), null, PESSIMISTIC, req.isolation(), req.isInvalidate(), 0, txEntry.key(), txEntry.keyBytes(), txEntry.value(), txEntry.valueBytes(), ctx); if (tx.empty()) return tx; tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + req.version()); if (!tx.markFinalizing()) return null; } // Add remote candidate before reordering. if (txEntry.explicitVersion() == null) entry.addRemote( req.nearNodeId(), nodeId, req.threadId(), req.version(), 0, tx.ec(), /*tx*/ true, tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( Collections.<GridCacheMvccCandidate<K>>emptyList(), req.version(), req.committedVersions(), req.rolledbackVersions()); } // Double-check in case if sender node left the grid. if (ctx.discovery().node(req.nearNodeId()) == null) { if (log.isDebugEnabled()) log.debug("Node requesting lock left grid (lock request will be ignored): " + req); if (tx != null) tx.rollback(); return null; } // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug("Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class loader for message: " + req; U.warn(log, err); throw new GridException(err); } return tx; }
/** * @param nodeId Primary node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. * @throws GridDistributedLockCancelledException If lock has been cancelled. */ @SuppressWarnings({"RedundantTypeArguments"}) @Nullable public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req) throws GridException, GridDistributedLockCancelledException { List<byte[]> nearKeyBytes = req.nearKeyBytes(); GridNearTxRemote<K, V> tx = null; ClassLoader ldr = ctx.deploy().globalLoader(); if (ldr != null) { for (int i = 0; i < nearKeyBytes.size(); i++) { byte[] bytes = nearKeyBytes.get(i); if (bytes == null) continue; K key = req.nearKeys().get(i); Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i); if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key); GridNearCacheEntry<K, V> entry = null; while (true) { try { entry = peekExx(key); if (entry != null) { entry.keyBytes(bytes); // Handle implicit locks for pessimistic transactions. if (req.inTx()) { tx = ctx.tm().tx(req.version()); if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/); else { tx = new GridNearTxRemote<K, V>( nodeId, req.nearNodeId(), req.threadId(), req.version(), null, PESSIMISTIC, req.isolation(), req.isInvalidate(), req.timeout(), key, bytes, null, // Value. null, // Value bytes. ctx); if (tx.empty()) return tx; tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + req.version()); } } // Add remote candidate before reordering. entry.addRemote( req.nodeId(), nodeId, req.threadId(), req.version(), req.timeout(), tx != null && tx.ec(), tx != null, tx != null && tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( cands, req.version(), req.committedVersions(), req.rolledbackVersions()); entry.orderOwned(req.version(), req.owned(entry.key())); } // Double-check in case if sender node left the grid. if (ctx.discovery().node(req.nodeId()) == null) { if (log.isDebugEnabled()) log.debug("Node requesting lock left grid (lock request will be ignored): " + req); if (tx != null) tx.rollback(); return null; } // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug("Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class loader for message: " + req; U.warn(log, err); throw new GridException(err); } return tx; }