/** {@inheritDoc} */ @Override public Integer call() throws Exception { GridCacheTx tx = CU.txStartInternal(ctx, latchView, PESSIMISTIC, REPEATABLE_READ); try { GridCacheCountDownLatchValue latchVal = latchView.get(key); if (latchVal == null) { if (log.isDebugEnabled()) log.debug("Failed to find count down latch with given name: " + name); assert cnt == 0; return cnt; } int retVal; if (val > 0) { retVal = latchVal.get() - val; if (retVal < 0) retVal = 0; } else retVal = 0; latchVal.set(retVal); latchView.put(key, latchVal); tx.commit(); return retVal; } finally { tx.end(); } }
/** {@inheritDoc} */ @Override public GridFuture<Boolean> loadMissing( boolean async, final Collection<? extends K> keys, final GridInClosure2<K, V> closure) { GridFuture<Map<K, V>> f = cctx.near().txLoadAsync(this, keys, CU.<K, V>empty()); return new GridEmbeddedFuture<Boolean, Map<K, V>>( cctx.kernalContext(), f, new C2<Map<K, V>, Exception, Boolean>() { @Override public Boolean apply(Map<K, V> map, Exception e) { if (e != null) { setRollbackOnly(); throw new GridClosureException(e); } // Must loop through keys, not map entries, // as map entries may not have all the keys. for (K key : keys) closure.apply(key, map.get(key)); return true; } }); }
/** * @param entry Transaction entry. * @param nodes Nodes. */ private void map(GridCacheTxEntry<K, V> entry, Collection<GridRichNode> nodes) { GridRichNode primary = CU.primary0(cctx.affinity(entry.key(), nodes)); GridDistributedTxMapping<K, V> t = mappings.get(primary.id()); if (t == null) mappings.put(primary.id(), t = new GridDistributedTxMapping<K, V>(primary)); t.add(entry); }
/** * Peeks only near cache without looking into DHT cache. * * @param key Key. * @return Peeked value. */ @Nullable public V peekNearOnly(K key) { try { return peek0(true, key, SMART, CU.<K, V>empty()); } catch (GridCacheFilterFailedException ignored) { if (log.isDebugEnabled()) log.debug("Filter validation failed for key: " + key); return null; } }
/** * @param reads Read entries. * @param writes Write entries. */ @SuppressWarnings({"unchecked"}) private void prepare( Iterable<GridCacheTxEntry<K, V>> reads, Iterable<GridCacheTxEntry<K, V>> writes) { Collection<GridRichNode> nodes = CU.allNodes(cctx); // Assign keys to primary nodes. for (GridCacheTxEntry<K, V> read : reads) map(read, nodes); for (GridCacheTxEntry<K, V> write : writes) map(write, nodes); // Create mini futures. for (GridDistributedTxMapping<K, V> m : mappings.values()) finish(m); }
/** {@inheritDoc} */ @Override public void remove() { if (currEntry == null) throw new IllegalStateException(); assert currIter != null; currIter.remove(); try { GridNearCache.this.remove(currEntry.getKey(), CU.<K, V>empty()); } catch (GridException e) { throw new GridRuntimeException(e); } }
/** @throws GridException If operation failed. */ private void initializeLatch() throws GridException { if (initGuard.compareAndSet(false, true)) { try { internalLatch = CU.outTx( new Callable<CountDownLatch>() { @Override public CountDownLatch call() throws Exception { GridCacheTx tx = CU.txStartInternal(ctx, latchView, PESSIMISTIC, REPEATABLE_READ); try { GridCacheCountDownLatchValue val = latchView.get(key); if (val == null) { if (log.isDebugEnabled()) log.debug("Failed to find count down latch with given name: " + name); assert cnt == 0; return new CountDownLatch(cnt); } tx.commit(); return new CountDownLatch(val.get()); } finally { tx.end(); } } }, ctx); if (log.isDebugEnabled()) log.debug("Initialized internal latch: " + internalLatch); } finally { initLatch.countDown(); } } else { try { initLatch.await(); } catch (InterruptedException ignored) { throw new GridException("Thread has been interrupted."); } if (internalLatch == null) throw new GridException("Internal latch has not been properly initialized."); } }
/** * @param key Key to read swap entry for. * @return Read value. * @throws GridException If read failed. */ @Nullable GridCacheSwapEntry<V> readAndRemove(K key) throws GridException { if (!enabled) return null; return readAndRemove(CU.marshal(cctx, key).getEntireArray()); }
/** {@inheritDoc} */ @Override public int countDown(int val) throws GridException { A.ensure(val > 0, "val should be positive"); return CU.outTx(new CountDownCallable(val), ctx); }
/** {@inheritDoc} */ @Override public Map<GridRichNode, Collection<K>> mapKeysToNodes(Collection<? extends K> keys) { return CU.mapKeysToNodes(ctx, keys); }
/** @return Near entries. */ public Set<GridCacheEntry<K, V>> nearEntries() { return super.entrySet(CU.<K, V>empty()); }
/** * @param nodeId Sender node ID. * @param req Finish transaction message. */ @SuppressWarnings({"CatchGenericClass"}) private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) { assert nodeId != null; assert req != null; GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version()); try { ClassLoader ldr = ctx.deploy().globalLoader(); if (req.commit()) { // If lock was acquired explicitly. if (tx == null) { // Create transaction and add entries. tx = ctx.tm() .onCreated( new GridReplicatedTxRemote<K, V>( ldr, nodeId, req.threadId(), req.version(), req.commitVersion(), PESSIMISTIC, READ_COMMITTED, req.isInvalidate(), /*timeout */ 0, /*read entries*/ null, req.writes(), ctx)); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Attempt to start a completed " + "transaction: " + req); } else { boolean set = tx.commitVersion(req.commitVersion()); assert set; } Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes(); if (!F.isEmpty(writeEntries)) { // In OPTIMISTIC mode, we get the values at PREPARE stage. assert tx.concurrency() == PESSIMISTIC; for (GridCacheTxEntry<K, V> entry : writeEntries) { // Unmarshal write entries. entry.unmarshal(ctx, ldr); if (log.isDebugEnabled()) log.debug( "Unmarshalled transaction entry from pessimistic transaction [key=" + entry.key() + ", value=" + entry.value() + ", tx=" + tx + ']'); if (!tx.setWriteValue(entry)) U.warn( log, "Received entry to commit that was not present in transaction [entry=" + entry + ", tx=" + tx + ']'); } } // Add completed versions. tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); if (tx.pessimistic()) tx.prepare(); tx.commit(); } else if (tx != null) { tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); tx.rollback(); } if (req.replyRequired()) { GridCacheMessage<K, V> res = new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId()); try { ctx.io().send(nodeId, res); } catch (Throwable e) { // Double-check. if (ctx.discovery().node(nodeId) == null) { if (log.isDebugEnabled()) log.debug( "Node left while sending finish response [nodeId=" + nodeId + ", res=" + res + ']'); } else U.error( log, "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']', e); } } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Attempted to start a completed transaction (will ignore): " + e); } catch (Throwable e) { U.error( log, "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']', e); if (tx != null) tx.rollback(); } }
/** * Synchronous sequence update operation. Will add given amount to the sequence value. * * @param l Increment amount. * @param updateCall Cache call that will update sequence reservation count in accordance with l. * @param updated If {@code true}, will return sequence value after update, otherwise will return * sequence value prior to update. * @return Sequence value. * @throws GridException If update failed. */ private long internalUpdate(long l, @Nullable Callable<Long> updateCall, boolean updated) throws GridException { checkRemoved(); assert l > 0; lock.lock(); try { // If reserved range isn't exhausted. if (locVal + l <= upBound) { long curVal = locVal; locVal += l; return updated ? locVal : curVal; } } finally { lock.unlock(); } if (updateCall == null) updateCall = internalUpdate(l, updated); while (true) { if (updateGuard.compareAndSet(false, true)) { try { // This call must be outside lock. return CU.outTx(updateCall, ctx); } finally { lock.lock(); try { updateGuard.set(false); cond.signalAll(); } finally { lock.unlock(); } } } else { lock.lock(); try { while (locVal >= upBound && updateGuard.get()) { try { cond.await(500, MILLISECONDS); } catch (InterruptedException e) { throw new GridInterruptedException(e); } } checkRemoved(); // If reserved range isn't exhausted. if (locVal + l <= upBound) { long curVal = locVal; locVal += l; return updated ? locVal : curVal; } } finally { lock.unlock(); } } } }
/** {@inheritDoc} */ @Override public int countDown() throws GridException { return CU.outTx(new CountDownCallable(1), ctx); }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * @param e Transaction entry. * @return {@code True} if entry is locally mapped as a primary or back up node. */ protected boolean isNearLocallyMapped(GridCacheEntryEx<K, V> e) { return F.contains(ctx.affinity(e.key(), CU.allNodes(ctx)), ctx.localNode()); }
/** {@inheritDoc} */ @Override public void countDownAll() throws GridException { CU.outTx(new CountDownCallable(0), ctx); }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * @param obj Object to marshal. * @return Marshalled byte array. * @throws GridException If marshalling failed. */ private byte[] marshal(Object obj) throws GridException { return CU.marshal(cctx, obj).getEntireArray(); }
/** {@inheritDoc} */ @Override public void start0() throws GridException { spaceName = CU.swapSpaceName(cctx); swapMgr = cctx.gridSwap(); }
/** * @param nodeId Reader to add. * @param msgId Message ID. * @return Future for all relevant transactions that were active at the time of adding reader, or * {@code null} if reader was added * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable public GridFuture<Boolean> addReader(UUID nodeId, long msgId) throws GridCacheEntryRemovedException { // Don't add local node as reader. if (cctx.nodeId().equals(nodeId)) return null; GridNode node = cctx.discovery().node(nodeId); // If remote node has no near cache, don't add it. if (node == null || !U.hasNearCache(node, cctx.dht().near().name())) return null; // If remote node is (primary?) or back up, don't add it as a reader. if (U.nodeIds(cctx.affinity(partition(), CU.allNodes(cctx))).contains(nodeId)) return null; boolean ret = false; GridCacheMultiTxFuture<K, V> txFut; Collection<GridCacheMvccCandidate<K>> cands = null; synchronized (mux) { checkObsolete(); txFut = this.txFut; ReaderId reader = readerId(nodeId); if (reader == null) { reader = new ReaderId(nodeId, msgId); readers = new LinkedList<ReaderId>(readers); readers.add(reader); // Seal. readers = Collections.unmodifiableList(readers); txFut = this.txFut = new GridCacheMultiTxFuture<K, V>(cctx); cands = localCandidates(); ret = true; } else { long id = reader.messageId(); if (id < msgId) reader.messageId(msgId); } } if (ret) { assert txFut != null; if (!F.isEmpty(cands)) { for (GridCacheMvccCandidate<K> c : cands) { GridCacheTxEx<K, V> tx = cctx.tm().<GridCacheTxEx<K, V>>tx(c.version()); if (tx != null) { assert tx.local(); txFut.addTx(tx); } } } txFut.init(); if (!txFut.isDone()) { txFut.listenAsync( new CI1<GridFuture<?>>() { @Override public void apply(GridFuture<?> f) { synchronized (mux) { // Release memory. GridDhtCacheEntry.this.txFut = null; } } }); } else // Release memory. txFut = this.txFut = null; } return txFut; }