/** @param tx Committed cache transaction. */ public GridCacheCommittedTxInfo(GridCacheTxEx<K, V> tx) { assert !tx.local() || !tx.replicated(); originatingTxId = tx.nearXidVersion(); originatingNodeId = tx.eventNodeId(); recoveryWrites = tx.recoveryWrites(); }
/** * Check if flags in correct state. * * @param msg Message. */ private void checkSyncFlags(GridIoMessage msg) { if (!commSpiEnabled) return; Object o = msg.message(); if (!(o instanceof GridDistributedLockRequest)) return; GridKernal g = (GridKernal) G.grid(nodeId); GridCacheTxManager<Object, Object> tm = g.internalCache(REPLICATED_ASYNC_CACHE_NAME).context().tm(); GridCacheVersion v = ((GridCacheVersionable) o).version(); GridCacheTxEx t = tm.tx(v); if (t.hasWriteKey("x1")) { assertFalse(t.syncCommit()); } else if (t.hasWriteKey("x2")) { assertTrue(t.syncCommit()); } else if (t.hasWriteKey("x3")) { assertFalse(t.syncCommit()); } else if (t.hasWriteKey("x4")) { assertTrue(t.syncCommit()); } }
/** {@inheritDoc} */ @Override public boolean tmLock(GridCacheTxEx<K, V> tx, long timeout) throws GridCacheEntryRemovedException, GridDistributedLockCancelledException { if (tx.local()) { GridDhtTxLocal<K, V> dhtTx = (GridDhtTxLocal<K, V>) tx; // Null is returned if timeout is negative and there is other lock owner. return addDhtLocal( dhtTx.nearNodeId(), dhtTx.nearXidVersion(), tx.threadId(), tx.xidVersion(), timeout, false, tx.ec(), true) != null; } try { addRemote( tx.nodeId(), tx.otherNodeId(), tx.threadId(), tx.xidVersion(), tx.timeout(), tx.ec(), true); return true; } catch (GridDistributedLockCancelledException ignored) { if (log.isDebugEnabled()) log.debug("Attempted to enter tx lock for cancelled ID (will ignore): " + tx); return false; } }
/** * @param cctx Context. * @param tx Transaction. * @param failedNodeId ID of failed node started transaction. */ @SuppressWarnings("ConstantConditions") public GridCachePessimisticCheckCommittedTxFuture( GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) { super(cctx.kernalContext(), new SingleReducer<K, V>()); this.cctx = cctx; this.tx = tx; this.failedNodeId = failedNodeId; log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class); nodes = new GridLeanMap<>(); for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node); }
/** * @param nodeId Reader to add. * @param msgId Message ID. * @return Future for all relevant transactions that were active at the time of adding reader, or * {@code null} if reader was added * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable public GridFuture<Boolean> addReader(UUID nodeId, long msgId) throws GridCacheEntryRemovedException { // Don't add local node as reader. if (cctx.nodeId().equals(nodeId)) return null; GridNode node = cctx.discovery().node(nodeId); // If remote node has no near cache, don't add it. if (node == null || !U.hasNearCache(node, cctx.dht().near().name())) return null; // If remote node is (primary?) or back up, don't add it as a reader. if (U.nodeIds(cctx.affinity(partition(), CU.allNodes(cctx))).contains(nodeId)) return null; boolean ret = false; GridCacheMultiTxFuture<K, V> txFut; Collection<GridCacheMvccCandidate<K>> cands = null; synchronized (mux) { checkObsolete(); txFut = this.txFut; ReaderId reader = readerId(nodeId); if (reader == null) { reader = new ReaderId(nodeId, msgId); readers = new LinkedList<ReaderId>(readers); readers.add(reader); // Seal. readers = Collections.unmodifiableList(readers); txFut = this.txFut = new GridCacheMultiTxFuture<K, V>(cctx); cands = localCandidates(); ret = true; } else { long id = reader.messageId(); if (id < msgId) reader.messageId(msgId); } } if (ret) { assert txFut != null; if (!F.isEmpty(cands)) { for (GridCacheMvccCandidate<K> c : cands) { GridCacheTxEx<K, V> tx = cctx.tm().<GridCacheTxEx<K, V>>tx(c.version()); if (tx != null) { assert tx.local(); txFut.addTx(tx); } } } txFut.init(); if (!txFut.isDone()) { txFut.listenAsync( new CI1<GridFuture<?>>() { @Override public void apply(GridFuture<?> f) { synchronized (mux) { // Release memory. GridDhtCacheEntry.this.txFut = null; } } }); } else // Release memory. txFut = this.txFut = null; } return txFut; }
/** {@inheritDoc} */ @Override public GridCacheVersion version() { return tx.xidVersion(); }
/** Initializes future. */ public void prepare() { if (log.isDebugEnabled()) log.debug("Checking if transaction was committed on remote nodes: " + tx); // Check local node first (local node can be a backup node for some part of this transaction). long originatingThreadId = tx.threadId(); if (tx instanceof GridCacheTxRemoteEx) originatingThreadId = ((GridCacheTxRemoteEx) tx).remoteThreadId(); GridCacheCommittedTxInfo<K, V> txInfo = cctx.tm().txCommitted(tx.nearXidVersion(), tx.eventNodeId(), originatingThreadId); if (txInfo != null) { onDone(txInfo); markInitialized(); return; } Collection<GridNode> checkNodes = CU.remoteNodes(cctx, tx.topologyVersion()); if (tx instanceof GridDhtTxRemote) { // If we got primary node failure and near node has not failed. if (tx.nodeId().equals(failedNodeId) && !tx.eventNodeId().equals(failedNodeId)) { nearCheck = true; GridNode nearNode = cctx.discovery().node(tx.eventNodeId()); if (nearNode == null) { // Near node failed, separate check prepared future will take care of it. onDone( new GridTopologyException( "Failed to check near transaction state (near node left grid): " + tx.eventNodeId())); return; } checkNodes = Collections.singletonList(nearNode); } } for (GridNode rmtNode : checkNodes) { // Skip left nodes and local node. if (rmtNode.id().equals(failedNodeId)) continue; /* * Send message to all cache nodes in the topology. */ MiniFuture fut = new MiniFuture(rmtNode.id()); GridCachePessimisticCheckCommittedTxRequest<K, V> req = new GridCachePessimisticCheckCommittedTxRequest<>( tx, originatingThreadId, futureId(), fut.futureId()); add(fut); try { cctx.io().send(rmtNode.id(), req); } catch (GridTopologyException ignored) { fut.onNodeLeft(); } catch (GridException e) { fut.onError(e); break; } } markInitialized(); }