/** @param m Mapping. */ @SuppressWarnings({"unchecked"}) private void finish(GridDistributedTxMapping<K, V> m) { GridRichNode n = m.node(); assert !m.empty(); GridNearTxFinishRequest req = new GridNearTxFinishRequest<K, V>( futId, tx.xidVersion(), tx.commitVersion(), tx.threadId(), commit, tx.isInvalidate(), m.explicitLock(), tx.topologyVersion(), null, null, null, commit && tx.pessimistic() ? m.writes() : null, tx.syncCommit() && commit || tx.syncRollback() && !commit); // If this is the primary node for the keys. if (n.isLocal()) { req.miniId(GridUuid.randomUuid()); if (CU.DHT_ENABLED) { GridFuture<GridCacheTx> fut = commit ? dht().commitTx(n.id(), req) : dht().rollbackTx(n.id(), req); // Add new future. add(fut); } else // Add done future for testing. add(new GridFinishedFuture<GridCacheTx>(ctx)); } else { MiniFuture fut = new MiniFuture(m); req.miniId(fut.futureId()); add(fut); // Append new future. try { cctx.io().send(n, req); // If we don't wait for result, then mark future as done. if (!isSync() && !m.explicitLock()) fut.onDone(); } catch (GridTopologyException e) { // Remove previous mapping. mappings.remove(m.node().id()); fut.onResult(e); } catch (GridException e) { // Fail the whole thing. fut.onResult(e); } } }
/** * @param cctx Context. * @param tx Transaction. * @param commit Commit flag. */ public GridNearTxFinishFuture( GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) { super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx)); assert cctx != null; this.cctx = cctx; this.tx = tx; this.commit = commit; mappings = tx.mappings(); futId = GridUuid.randomUuid(); log = U.logger(ctx, logRef, GridNearTxFinishFuture.class); }
/** * @param cctx Registry. * @param keys Keys to lock. * @param tx Transaction. * @param read Read flag. * @param retval Flag to return value or not. * @param timeout Lock acquisition timeout. * @param filter Filter. */ public GridNearLockFuture( GridCacheContext<K, V> cctx, Collection<? extends K> keys, @Nullable GridNearTxLocal<K, V> tx, boolean read, boolean retval, long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null; this.cctx = cctx; this.keys = keys; this.tx = tx; this.read = read; this.retval = retval; this.timeout = timeout; this.filter = filter; threadId = tx == null ? Thread.currentThread().getId() : tx.threadId(); lockVer = tx != null ? tx.xidVersion() : cctx.versions().next(); futId = GridUuid.randomUuid(); entries = new ArrayList<>(keys.size()); log = U.logger(ctx, logRef, GridNearLockFuture.class); if (timeout > 0) { timeoutObj = new LockTimeoutObject(); cctx.time().addTimeoutObject(timeoutObj); } valMap = new ConcurrentHashMap8<>(keys.size(), 1f); }
/** * @param ctx Grid kernal context. * @param cacheName Cache name. * @param flushQ Flush queue. */ public GridDataLoaderImpl( final GridKernalContext ctx, @Nullable final String cacheName, DelayQueue<GridDataLoaderImpl<K, V>> flushQ) { assert ctx != null; this.ctx = ctx; this.cacheName = cacheName; this.flushQ = flushQ; log = U.logger(ctx, logRef, GridDataLoaderImpl.class); discoLsnr = new GridLocalEventListener() { @Override public void onEvent(GridEvent evt) { assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt; UUID id = discoEvt.eventNodeId(); // Remap regular mappings. final Buffer buf = bufMappings.remove(id); if (buf != null) { // Only async notification is possible since // discovery thread may be trapped otherwise. ctx.closure() .callLocalSafe( new Callable<Object>() { @Override public Object call() throws Exception { buf.onNodeLeft(); return null; } }, true /* system pool */); } } }; ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT); // Generate unique topic for this loader. topic = TOPIC_DATALOAD.topic(GridUuid.fromUuid(ctx.localNodeId())); ctx.io() .addMessageListener( topic, new GridMessageListener() { @Override public void onMessage(UUID nodeId, Object msg) { assert msg instanceof GridDataLoadResponse; GridDataLoadResponse res = (GridDataLoadResponse) msg; if (log.isDebugEnabled()) log.debug("Received data load response: " + res); Buffer buf = bufMappings.get(nodeId); if (buf != null) buf.onResponse(res); else if (log.isDebugEnabled()) log.debug("Ignoring response since node has left [nodeId=" + nodeId + ", "); } }); if (log.isDebugEnabled()) log.debug("Added response listener within topic: " + topic); fut = new GridDataLoaderFuture(ctx, this); }
/** * Mini-future for get operations. Mini-futures are only waiting on a single node as opposed to * multiple nodes. */ private class MiniFuture extends GridFutureAdapter<GridCacheTx> { /** */ private final GridUuid futId = GridUuid.randomUuid(); /** Keys. */ @GridToStringInclude private GridDistributedTxMapping<K, V> m; /** Empty constructor required for {@link Externalizable}. */ public MiniFuture() { // No-op. } /** @param m Mapping. */ MiniFuture(GridDistributedTxMapping<K, V> m) { super(cctx.kernalContext()); this.m = m; } /** @return Future ID. */ GridUuid futureId() { return futId; } /** @return Node ID. */ public GridRichNode node() { return m.node(); } /** @return Keys. */ public GridDistributedTxMapping<K, V> mapping() { return m; } /** @param e Error. */ void onResult(Throwable e) { if (log.isDebugEnabled()) log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']'); // Fail. onDone(e); } /** @param e Node failure. */ void onResult(GridTopologyException e) { if (log.isDebugEnabled()) log.debug( "Remote node left grid while sending or waiting for reply (will ignore): " + this); onDone(tx); } /** @param res Result callback. */ void onResult(GridNearTxFinishResponse<K, V> res) { onDone(tx); } /** {@inheritDoc} */ @Override public String toString() { return S.toString( MiniFuture.class, this, "done", isDone(), "cancelled", isCancelled(), "err", error()); } }
/** * Gets next near lock mapping and either acquires dht locks locally or sends near lock request to * remote primary node. * * @param mappings Queue of mappings. * @throws GridException If mapping can not be completed. */ private void proceedMapping(final ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings) throws GridException { GridNearLockMapping<K, V> map = mappings.poll(); // If there are no more mappings to process, complete the future. if (map == null) return; final GridNearLockRequest<K, V> req = map.request(); final Collection<K> mappedKeys = map.distributedKeys(); final GridNode node = map.node(); if (filter != null && filter.length != 0) req.filter(filter, cctx); if (node.isLocal()) { req.miniId(GridUuid.randomUuid()); if (log.isDebugEnabled()) log.debug("Before locally locking near request: " + req); GridFuture<GridNearLockResponse<K, V>> fut; if (CU.DHT_ENABLED) fut = dht().lockAllAsync(cctx.localNode(), req, filter); else { // Create dummy values for testing. GridNearLockResponse<K, V> res = new GridNearLockResponse<>(lockVer, futId, null, false, 1, null); res.addValueBytes(null, null, true, lockVer, lockVer, cctx); fut = new GridFinishedFuture<>(ctx, res); } // Add new future. add( new GridEmbeddedFuture<>( cctx.kernalContext(), fut, new C2<GridNearLockResponse<K, V>, Exception, Boolean>() { @Override public Boolean apply(GridNearLockResponse<K, V> res, Exception e) { if (CU.isLockTimeoutOrCancelled(e) || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false; if (e != null) { onError(e); return false; } if (res == null) { onError(new GridException("Lock response is null for future: " + this)); return false; } if (res.error() != null) { onError(res.error()); return false; } if (log.isDebugEnabled()) log.debug( "Acquired lock for local DHT mapping [locId=" + cctx.nodeId() + ", mappedKeys=" + mappedKeys + ", fut=" + GridNearLockFuture.this + ']'); try { int i = 0; for (K k : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, req.topologyVersion()); try { GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key()); boolean hasBytes = entry.hasValue(); V oldVal = entry.rawGet(); V newVal = res.value(i); byte[] newBytes = res.valueBytes(i); GridCacheVersion dhtVer = res.dhtVersion(i); GridCacheVersion mappedVer = res.mappedVersion(i); // On local node don't record twice if DHT cache already recorded. boolean record = retval && oldValTup != null && oldValTup.get1().equals(dhtVer); if (newVal == null) { if (oldValTup != null) { if (oldValTup.get1().equals(dhtVer)) { newVal = oldValTup.get2(); newBytes = oldValTup.get3(); } oldVal = oldValTup.get2(); } } // Lock is held at this point, so we can set the // returned value if any. entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id()); entry.readyNearLock( lockVer, mappedVer, res.committedVersions(), res.rolledbackVersions(), res.pending()); if (inTx() && implicitTx() && tx.onePhaseCommit()) { boolean pass = res.filterResult(i); tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse()); } if (record) { if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ)) cctx.events() .addEvent( entry.partition(), entry.key(), tx, null, EVT_CACHE_OBJECT_READ, newVal, newVal != null, oldVal, hasBytes, CU.subjectId(tx, cctx)); cctx.cache().metrics0().onRead(oldVal != null); } if (log.isDebugEnabled()) log.debug( "Processed response for entry [res=" + res + ", entry=" + entry + ']'); break; // Inner while loop. } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Failed to add candidates because entry was " + "removed (will renew)."); // Replace old entry with new one. entries.set( i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key())); } } i++; // Increment outside of while loop. } // Proceed and add new future (if any) before completing embedded future. proceedMapping(mappings); } catch (GridException ex) { onError(ex); return false; } return true; } })); } else { final MiniFuture fut = new MiniFuture(node, mappedKeys, mappings); req.miniId(fut.futureId()); add(fut); // Append new future. GridFuture<?> txSync = null; if (inTx()) txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId()); if (txSync == null || txSync.isDone()) { try { if (log.isDebugEnabled()) log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']'); cctx.io().send(node, req); } catch (GridTopologyException ex) { assert fut != null; fut.onResult(ex); } } else { txSync.listenAsync( new CI1<GridFuture<?>>() { @Override public void apply(GridFuture<?> t) { try { if (log.isDebugEnabled()) log.debug( "Sending near lock request [node=" + node.id() + ", req=" + req + ']'); cctx.io().send(node, req); } catch (GridTopologyException ex) { assert fut != null; fut.onResult(ex); } catch (GridException e) { onError(e); } } }); } } }
/** {@inheritDoc} */ @Override public int hashCode() { return futId.hashCode(); }
/** * Mini-future for get operations. Mini-futures are only waiting on a single node as opposed to * multiple nodes. */ private class MiniFuture extends GridFutureAdapter<Boolean> { /** */ private static final long serialVersionUID = 0L; /** */ private final GridUuid futId = GridUuid.randomUuid(); /** Node ID. */ @GridToStringExclude private GridNode node; /** Keys. */ @GridToStringInclude private Collection<K> keys; /** Mappings to proceed. */ @GridToStringExclude private ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings; /** */ private AtomicBoolean rcvRes = new AtomicBoolean(false); /** Empty constructor required for {@link Externalizable}. */ public MiniFuture() { // No-op. } /** * @param node Node. * @param keys Keys. * @param mappings Mappings to proceed. */ MiniFuture( GridNode node, Collection<K> keys, ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings) { super(cctx.kernalContext()); this.node = node; this.keys = keys; this.mappings = mappings; } /** @return Future ID. */ GridUuid futureId() { return futId; } /** @return Node ID. */ public GridNode node() { return node; } /** @return Keys. */ public Collection<K> keys() { return keys; } /** @param e Error. */ void onResult(Throwable e) { if (rcvRes.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']'); // Fail. onDone(e); } else U.warn( log, "Received error after another result has been processed [fut=" + GridNearLockFuture.this + ", mini=" + this + ']', e); } /** @param e Node left exception. */ void onResult(GridTopologyException e) { if (isDone()) return; if (rcvRes.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug( "Remote node left grid while sending or waiting for reply (will fail): " + this); if (tx != null) tx.removeMapping(node.id()); // Primary node left the grid, so fail the future. GridNearLockFuture.this.onDone(newTopologyException(e, node.id())); onDone(true); } } /** @param res Result callback. */ void onResult(GridNearLockResponse<K, V> res) { if (rcvRes.compareAndSet(false, true)) { if (res.error() != null) { if (log.isDebugEnabled()) log.debug( "Finishing mini future with an error due to error in response [miniFut=" + this + ", res=" + res + ']'); // Fail. if (res.error() instanceof GridCacheLockTimeoutException) onDone(false); else onDone(res.error()); return; } int i = 0; long topVer = topSnapshot.get().topologyVersion(); for (K k : keys) { while (true) { GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, topVer); try { if (res.dhtVersion(i) == null) { onDone( new GridException( "Failed to receive DHT version from remote node " + "(will fail the lock): " + res)); return; } GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key()); V oldVal = entry.rawGet(); boolean hasOldVal = false; V newVal = res.value(i); byte[] newBytes = res.valueBytes(i); boolean readRecordable = false; if (retval) { readRecordable = cctx.events().isRecordable(EVT_CACHE_OBJECT_READ); if (readRecordable) hasOldVal = entry.hasValue(); } GridCacheVersion dhtVer = res.dhtVersion(i); GridCacheVersion mappedVer = res.mappedVersion(i); if (newVal == null) { if (oldValTup != null) { if (oldValTup.get1().equals(dhtVer)) { newVal = oldValTup.get2(); newBytes = oldValTup.get3(); } oldVal = oldValTup.get2(); } } // Lock is held at this point, so we can set the // returned value if any. entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id()); if (inTx() && implicitTx() && tx.onePhaseCommit()) { boolean pass = res.filterResult(i); tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse()); } entry.readyNearLock( lockVer, mappedVer, res.committedVersions(), res.rolledbackVersions(), res.pending()); if (retval) { if (readRecordable) cctx.events() .addEvent( entry.partition(), entry.key(), tx, null, EVT_CACHE_OBJECT_READ, newVal, newVal != null || newBytes != null, oldVal, hasOldVal, CU.subjectId(tx, cctx)); cctx.cache().metrics0().onRead(false); } if (log.isDebugEnabled()) log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']'); break; // Inner while loop. } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug("Failed to add candidates because entry was removed (will renew)."); // Replace old entry with new one. entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key())); } catch (GridException e) { onDone(e); return; } } i++; } try { proceedMapping(mappings); } catch (GridException e) { onDone(e); } onDone(true); } } /** {@inheritDoc} */ @Override public String toString() { return S.toString(MiniFuture.class, this, "node", node.id(), "super", super.toString()); } }
/** * Future verifying that all remote transactions related to some optimistic transaction were * prepared. * * @author @java.author * @version @java.version */ public class GridCachePessimisticCheckCommittedTxFuture<K, V> extends GridCompoundIdentityFuture<GridCacheCommittedTxInfo<K, V>> implements GridCacheFuture<GridCacheCommittedTxInfo<K, V>> { /** Logger reference. */ private static final AtomicReference<GridLogger> logRef = new AtomicReference<>(); /** Trackable flag. */ private boolean trackable = true; /** Context. */ private final GridCacheContext<K, V> cctx; /** Future ID. */ private final GridUuid futId = GridUuid.randomUuid(); /** Transaction. */ private final GridCacheTxEx<K, V> tx; /** All involved nodes. */ private final Map<UUID, GridNode> nodes; /** ID of failed node started transaction. */ private final UUID failedNodeId; /** * Flag indicating that future checks near node instead of checking all topology in case of * primary node crash. */ private boolean nearCheck; /** Logger. */ private final GridLogger log; /** * @param cctx Context. * @param tx Transaction. * @param failedNodeId ID of failed node started transaction. */ @SuppressWarnings("ConstantConditions") public GridCachePessimisticCheckCommittedTxFuture( GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) { super(cctx.kernalContext(), new SingleReducer<K, V>()); this.cctx = cctx; this.tx = tx; this.failedNodeId = failedNodeId; log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class); nodes = new GridLeanMap<>(); for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node); } /** Initializes future. */ public void prepare() { if (log.isDebugEnabled()) log.debug("Checking if transaction was committed on remote nodes: " + tx); // Check local node first (local node can be a backup node for some part of this transaction). long originatingThreadId = tx.threadId(); if (tx instanceof GridCacheTxRemoteEx) originatingThreadId = ((GridCacheTxRemoteEx) tx).remoteThreadId(); GridCacheCommittedTxInfo<K, V> txInfo = cctx.tm().txCommitted(tx.nearXidVersion(), tx.eventNodeId(), originatingThreadId); if (txInfo != null) { onDone(txInfo); markInitialized(); return; } Collection<GridNode> checkNodes = CU.remoteNodes(cctx, tx.topologyVersion()); if (tx instanceof GridDhtTxRemote) { // If we got primary node failure and near node has not failed. if (tx.nodeId().equals(failedNodeId) && !tx.eventNodeId().equals(failedNodeId)) { nearCheck = true; GridNode nearNode = cctx.discovery().node(tx.eventNodeId()); if (nearNode == null) { // Near node failed, separate check prepared future will take care of it. onDone( new GridTopologyException( "Failed to check near transaction state (near node left grid): " + tx.eventNodeId())); return; } checkNodes = Collections.singletonList(nearNode); } } for (GridNode rmtNode : checkNodes) { // Skip left nodes and local node. if (rmtNode.id().equals(failedNodeId)) continue; /* * Send message to all cache nodes in the topology. */ MiniFuture fut = new MiniFuture(rmtNode.id()); GridCachePessimisticCheckCommittedTxRequest<K, V> req = new GridCachePessimisticCheckCommittedTxRequest<>( tx, originatingThreadId, futureId(), fut.futureId()); add(fut); try { cctx.io().send(rmtNode.id(), req); } catch (GridTopologyException ignored) { fut.onNodeLeft(); } catch (GridException e) { fut.onError(e); break; } } markInitialized(); } /** * @param nodeId Node ID. * @param res Response. */ public void onResult(UUID nodeId, GridCachePessimisticCheckCommittedTxResponse<K, V> res) { if (!isDone()) { for (GridFuture<GridCacheCommittedTxInfo<K, V>> fut : pending()) { if (isMini(fut)) { MiniFuture f = (MiniFuture) fut; if (f.futureId().equals(res.miniId())) { assert f.nodeId().equals(nodeId); f.onResult(res); break; } } } } } /** {@inheritDoc} */ @Override public GridUuid futureId() { return futId; } /** {@inheritDoc} */ @Override public GridCacheVersion version() { return tx.xidVersion(); } /** {@inheritDoc} */ @Override public Collection<? extends GridNode> nodes() { return nodes.values(); } /** {@inheritDoc} */ @Override public boolean onNodeLeft(UUID nodeId) { for (GridFuture<?> fut : futures()) if (isMini(fut)) { MiniFuture f = (MiniFuture) fut; if (f.nodeId().equals(nodeId)) { f.onNodeLeft(); return true; } } return false; } /** {@inheritDoc} */ @Override public boolean trackable() { return trackable; } /** {@inheritDoc} */ @Override public void markNotTrackable() { trackable = false; } /** {@inheritDoc} */ @Override public boolean onDone(@Nullable GridCacheCommittedTxInfo<K, V> res, @Nullable Throwable err) { if (super.onDone(res, err)) { cctx.mvcc().removeFuture(this); if (log.isDebugEnabled()) log.debug( "Completing check committed tx future for transaction [tx=" + tx + ", res=" + res + ", err=" + err + ']'); if (err == null) cctx.tm().finishPessimisticTxOnRecovery(tx, res); else { if (log.isDebugEnabled()) log.debug( "Failed to check prepared transactions, " + "invalidating transaction [err=" + err + ", tx=" + tx + ']'); if (nearCheck) return true; cctx.tm().salvageTx(tx); } return true; } return false; } /** * @param f Future. * @return {@code True} if mini-future. */ private boolean isMini(GridFuture<?> f) { return f.getClass().equals(MiniFuture.class); } /** {@inheritDoc} */ @Override public String toString() { return S.toString( GridCachePessimisticCheckCommittedTxFuture.class, this, "super", super.toString()); } /** */ private class MiniFuture extends GridFutureAdapter<GridCacheCommittedTxInfo<K, V>> { /** Mini future ID. */ private final GridUuid futId = GridUuid.randomUuid(); /** Node ID. */ private UUID nodeId; /** Empty constructor required by {@link Externalizable} */ public MiniFuture() { // No-op. } /** @param nodeId Node ID. */ private MiniFuture(UUID nodeId) { super(cctx.kernalContext()); this.nodeId = nodeId; } /** @return Node ID. */ private UUID nodeId() { return nodeId; } /** @return Future ID. */ private GridUuid futureId() { return futId; } /** @param e Error. */ private void onError(Throwable e) { if (log.isDebugEnabled()) log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']'); onDone(e); } /** */ private void onNodeLeft() { if (log.isDebugEnabled()) log.debug("Transaction node left grid (will ignore) [fut=" + this + ']'); if (nearCheck) { onDone( new GridTopologyException( "Failed to check near transaction state (near node left grid): " + nodeId)); return; } onDone((GridCacheCommittedTxInfo<K, V>) null); } /** @param res Result callback. */ private void onResult(GridCachePessimisticCheckCommittedTxResponse<K, V> res) { onDone(res.committedTxInfo()); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(MiniFuture.class, this, "done", isDone(), "err", error()); } } /** Single value reducer. */ private static class SingleReducer<K, V> extends GridReducer<GridCacheCommittedTxInfo<K, V>, GridCacheCommittedTxInfo<K, V>> { /** */ private AtomicReference<GridCacheCommittedTxInfo<K, V>> collected = new AtomicReference<>(); /** {@inheritDoc} */ @Override public boolean collect(@Nullable GridCacheCommittedTxInfo<K, V> info) { if (info != null) { collected.compareAndSet(null, info); // Stop collecting on first collected info. return false; } return true; } /** {@inheritDoc} */ @Override public GridCacheCommittedTxInfo<K, V> reduce() { return collected.get(); } } }
private class MiniFuture extends GridFutureAdapter<GridCacheCommittedTxInfo<K, V>> { /** Mini future ID. */ private final GridUuid futId = GridUuid.randomUuid(); /** Node ID. */ private UUID nodeId; /** Empty constructor required by {@link Externalizable} */ public MiniFuture() { // No-op. } /** @param nodeId Node ID. */ private MiniFuture(UUID nodeId) { super(cctx.kernalContext()); this.nodeId = nodeId; } /** @return Node ID. */ private UUID nodeId() { return nodeId; } /** @return Future ID. */ private GridUuid futureId() { return futId; } /** @param e Error. */ private void onError(Throwable e) { if (log.isDebugEnabled()) log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']'); onDone(e); } /** */ private void onNodeLeft() { if (log.isDebugEnabled()) log.debug("Transaction node left grid (will ignore) [fut=" + this + ']'); if (nearCheck) { onDone( new GridTopologyException( "Failed to check near transaction state (near node left grid): " + nodeId)); return; } onDone((GridCacheCommittedTxInfo<K, V>) null); } /** @param res Result callback. */ private void onResult(GridCachePessimisticCheckCommittedTxResponse<K, V> res) { onDone(res.committedTxInfo()); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(MiniFuture.class, this, "done", isDone(), "err", error()); } }
/** * @param depMode Deployment mode. * @param ldr Class loader to deploy. * @param cls Class. * @param alias Class alias. * @return Deployment. */ @SuppressWarnings({"ConstantConditions"}) private GridDeployment deploy( GridDeploymentMode depMode, ClassLoader ldr, Class<?> cls, String alias) { assert Thread.holdsLock(mux); LinkedList<GridDeployment> cachedDeps = null; GridDeployment dep = null; // Find existing class loader info. for (LinkedList<GridDeployment> deps : cache.values()) { for (GridDeployment d : deps) { if (d.classLoader() == ldr) { // Cache class and alias. d.addDeployedClass(cls, alias); cachedDeps = deps; dep = d; break; } } if (cachedDeps != null) { break; } } if (cachedDeps != null) { assert dep != null; cache.put(alias, cachedDeps); if (!cls.getName().equals(alias)) { // Cache by class name as well. cache.put(cls.getName(), cachedDeps); } return dep; } GridUuid ldrId = GridUuid.randomUuid(); long seqNum = seq.incrementAndGet(); String userVer = getUserVersion(ldr); dep = new GridDeployment(depMode, ldr, ldrId, seqNum, userVer, cls.getName(), true); dep.addDeployedClass(cls, alias); LinkedList<GridDeployment> deps = F.addIfAbsent(cache, alias, F.<GridDeployment>newLinkedList()); if (!deps.isEmpty()) { for (GridDeployment d : deps) { if (!d.isUndeployed()) { U.error( log, "Found more than one active deployment for the same resource " + "[cls=" + cls + ", depMode=" + depMode + ", dep=" + d + ']'); return null; } } } // Add at the beginning of the list for future fast access. deps.addFirst(dep); if (!cls.getName().equals(alias)) { // Cache by class name as well. cache.put(cls.getName(), deps); } if (log.isDebugEnabled()) { log.debug("Created new deployment: " + dep); } return dep; }