/** {@inheritDoc} */ @Override public GridDiscoveryTopologySnapshot topologySnapshot() throws IgniteCheckedException { get(); if (topSnapshot.get() == null) topSnapshot.compareAndSet( null, new GridDiscoveryTopologySnapshot(discoEvt.topologyVersion(), discoEvt.topologyNodes())); return topSnapshot.get(); }
/** Cleans up resources to avoid excessive memory usage. */ public void cleanUp() { topSnapshot.set(null); singleMsgs.clear(); fullMsgs.clear(); rcvdIds.clear(); oldestNode.set(null); partReleaseFut = null; Collection<ClusterNode> rmtNodes = this.rmtNodes; if (rmtNodes != null) rmtNodes.clear(); }
/** {@inheritDoc} */ @Override protected Collection<E> dequeue0(int cnt) { WindowHolder tup = ref.get(); AtomicInteger size = tup.size(); Collection<T> evts = tup.collection(); Collection<E> resCol = new ArrayList<>(cnt); while (true) { int curSize = size.get(); if (curSize > 0) { if (size.compareAndSet(curSize, curSize - 1)) { E res = pollInternal(evts, tup.set()); if (res != null) { resCol.add(res); if (resCol.size() >= cnt) return resCol; } else { size.incrementAndGet(); return resCol; } } } else return resCol; } }
/** * Poll evicted internal implementation. * * @return Evicted element. */ @Nullable private E pollEvictedInternal() { WindowHolder tup = ref.get(); AtomicInteger size = tup.size(); while (true) { int curSize = size.get(); if (curSize > maxSize) { if (size.compareAndSet(curSize, curSize - 1)) { E evt = pollInternal(tup.collection(), tup.set()); if (evt != null) return evt; else { // No actual events in queue, it means that other thread is just adding event. // return null as it is a concurrent add call. size.incrementAndGet(); return null; } } } else return null; } }
/** * @param nodes Nodes. * @param id ID. * @throws IgniteCheckedException If failed. */ private void sendAllPartitions( Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id) throws IgniteCheckedException { GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) { AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion(); boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0; if (ready) m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true)); } } // It is important that client topologies be added after contexts. for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true)); if (log.isDebugEnabled()) log.debug( "Sending full partition map [nodeIds=" + F.viewReadOnly(nodes, F.node2id()) + ", exchId=" + exchId + ", msg=" + m + ']'); cctx.io().safeSend(nodes, m, SYSTEM_POOL, null); }
/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) { if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } } else if (tx.implicit() && tx.isSystemInvalidate()) { // Finish implicit transaction on heuristic error. try { tx.close(); } catch (GridException ex) { U.error(log, "Failed to invalidate transaction: " + tx, ex); } } onComplete(); } }
/** @param evts Events to add. */ private void addAll(Collection<E> evts) { WindowHolder tup = ref.get(); int cnt = addAllInternal(evts, tup.collection(), tup.set()); tup.size().addAndGet(cnt); }
/** * Completeness callback. * * @param success {@code True} if lock was acquired. * @param distribute {@code True} if need to distribute lock removal in case of failure. * @return {@code True} if complete by this operation. */ private boolean onComplete(boolean success, boolean distribute) { if (log.isDebugEnabled()) log.debug( "Received onComplete(..) callback [success=" + success + ", distribute=" + distribute + ", fut=" + this + ']'); if (!success) undoLocks(distribute); if (tx != null) cctx.tm().txContext(tx); if (super.onDone(success, err.get())) { if (log.isDebugEnabled()) log.debug("Completing future: " + this); // Clean up. cctx.mvcc().removeFuture(this); if (timeoutObj != null) cctx.time().removeTimeoutObject(timeoutObj); return true; } return false; }
private void recheck() { // If this is the oldest node. if (oldestNode.get().id().equals(cctx.localNodeId())) { Collection<UUID> remaining = remaining(); if (!remaining.isEmpty()) { try { cctx.io() .safeSend( cctx.discovery().nodes(remaining), new GridDhtPartitionsSingleRequest(exchId), SYSTEM_POOL, null); } catch (IgniteCheckedException e) { U.error( log, "Failed to request partitions from nodes [exchangeId=" + exchId + ", nodes=" + remaining + ']', e); } } // Resend full partition map because last attempt failed. else { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } else sendPartitions(); // Schedule another send. scheduleRecheck(); }
/** @param lsnr Listener to external collision events. */ public void setCollisionExternalListener(@Nullable GridCollisionExternalListener lsnr) { if (enabled()) { if (lsnr != null && !extLsnr.compareAndSet(null, lsnr)) assert false : "Collision external listener has already been set " + "(perhaps need to add support for multiple listeners)"; else if (log.isDebugEnabled()) log.debug("Successfully set external collision listener: " + lsnr); } }
/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } onComplete(); } }
/** * Basically, future mapping consists from two parts. First, we must determine the topology * version this future will map on. Locking is performed within a user transaction, we must * continue to map keys on the same topology version as it started. If topology version is * undefined, we get current topology future and wait until it completes so the topology is ready * to use. * * <p>During the second part we map keys to primary nodes using topology snapshot we obtained * during the first part. Note that if primary node leaves grid, the future will fail and * transaction will be rolled back. */ void map() { // Obtain the topology version to use. GridDiscoveryTopologySnapshot snapshot = tx != null ? tx.topologySnapshot() : cctx.mvcc().lastExplicitLockTopologySnapshot(Thread.currentThread().getId()); if (snapshot != null) { // Continue mapping on the same topology version as it was before. topSnapshot.compareAndSet(null, snapshot); map(keys); markInitialized(); return; } // Must get topology snapshot and map on that version. mapOnTopology(); }
/** * Acquires topology future and checks it completeness under the read lock. If it is not complete, * will asynchronously wait for it's completeness and then try again. */ void mapOnTopology() { // We must acquire topology snapshot from the topology version future. try { cctx.topology().readLock(); try { GridDhtTopologyFuture fut = cctx.topologyVersionFuture(); if (fut.isDone()) { GridDiscoveryTopologySnapshot snapshot = fut.topologySnapshot(); if (tx != null) { tx.topologyVersion(snapshot.topologyVersion()); tx.topologySnapshot(snapshot); } topSnapshot.compareAndSet(null, snapshot); map(keys); markInitialized(); } else { fut.listenAsync( new CI1<GridFuture<Long>>() { @Override public void apply(GridFuture<Long> t) { mapOnTopology(); } }); } } finally { cctx.topology().readUnlock(); } } catch (GridException e) { onDone(e); } }
/** @param evt Event to add. */ private void add(E evt) { WindowHolder tup = ref.get(); if (addInternal(evt, tup.collection(), tup.set())) tup.size().incrementAndGet(); }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** {@inheritDoc} */ @Override protected GridStreamerWindowIterator<E> iterator0() { WindowHolder win = ref.get(); return iteratorInternal(win.collection(), win.set(), win.size()); }
/** Checks window consistency. Used for testing. */ void consistencyCheck() { WindowHolder win = ref.get(); consistencyCheck(win.collection(), win.set(), win.size()); }
/** * @param nodeId Sender node id. * @param msg Single partition info. */ public void onReceive(final UUID nodeId, final GridDhtPartitionsSingleMessage msg) { assert msg != null; assert msg.exchangeId().equals(exchId); // Update last seen version. while (true) { GridCacheVersion old = lastVer.get(); if (old == null || old.compareTo(msg.lastVersion()) < 0) { if (lastVer.compareAndSet(old, msg.lastVersion())) break; } else break; } if (isDone()) { if (log.isDebugEnabled()) log.debug( "Received message for finished future (will reply only to sender) [msg=" + msg + ", fut=" + this + ']'); sendAllPartitions(nodeId, cctx.gridConfig().getNetworkSendRetryCount()); } else { initFut.listen( new CI1<IgniteInternalFuture<Boolean>>() { @Override public void apply(IgniteInternalFuture<Boolean> t) { try { if (!t.get()) // Just to check if there was an error. return; ClusterNode loc = cctx.localNode(); singleMsgs.put(nodeId, msg); boolean match = true; // Check if oldest node has changed. if (!oldestNode.get().equals(loc)) { match = false; synchronized (mux) { // Double check. if (oldestNode.get().equals(loc)) match = true; } } if (match) { boolean allReceived; synchronized (rcvdIds) { if (rcvdIds.add(nodeId)) updatePartitionSingleMap(msg); allReceived = allReceived(); } // If got all replies, and initialization finished, and reply has not been sent // yet. if (allReceived && ready.get() && replied.compareAndSet(false, true)) { spreadPartitions(); onDone(exchId.topologyVersion()); } else if (log.isDebugEnabled()) log.debug( "Exchange future full map is not sent [allReceived=" + allReceived() + ", ready=" + ready + ", replied=" + replied.get() + ", init=" + init.get() + ", fut=" + this + ']'); } } catch (IgniteCheckedException e) { U.error(log, "Failed to initialize exchange future: " + this, e); } } }); } }
/** {@inheritDoc} */ @Override protected void reset0() { ref.set( new WindowHolder( newCollection(), unique ? new GridConcurrentHashSet<E>() : null, new AtomicInteger())); }
/** * Get underlying collection. * * @return Collection. */ @SuppressWarnings("ConstantConditions") protected Collection<T> collection() { return ref.get().get1(); }
/** Completeness callback. */ private void onComplete() { onDone(tx, err.get()); }
/** @param t Error. */ private void onError(Throwable t) { err.compareAndSet(null, t instanceof GridCacheLockTimeoutException ? null : t); }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }
/** {@inheritDoc} */ @Override public int size() { int size = ref.get().size().get(); return size > 0 ? size : 0; }