/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); threadId = in.readLong(); commit = in.readBoolean(); invalidate = in.readBoolean(); reply = in.readBoolean(); futId = U.readGridUuid(in); commitVer = CU.readVersion(in); baseVer = CU.readVersion(in); writeEntries = U.readList(in); }
/** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { super.writeExternal(out); out.writeLong(threadId); out.writeBoolean(commit); out.writeBoolean(invalidate); out.writeBoolean(reply); U.writeGridUuid(out, futId); CU.writeVersion(out, commitVer); CU.writeVersion(out, baseVer); U.writeCollection(out, writeEntries); }
/** {@inheritDoc} */ @Override public GridFuture<Boolean> loadMissing( boolean async, final Collection<? extends K> keys, final GridInClosure2<K, V> closure) { GridFuture<Map<K, V>> f = cctx.near().txLoadAsync(this, keys, CU.<K, V>empty()); return new GridEmbeddedFuture<Boolean, Map<K, V>>( cctx.kernalContext(), f, new C2<Map<K, V>, Exception, Boolean>() { @Override public Boolean apply(Map<K, V> map, Exception e) { if (e != null) { setRollbackOnly(); throw new GridClosureException(e); } // Must loop through keys, not map entries, // as map entries may not have all the keys. for (K key : keys) closure.apply(key, map.get(key)); return true; } }); }
/** {@inheritDoc} */ @SuppressWarnings("TypeMayBeWeakened") @Nullable private Collection<byte[]> marshalFieldsCollection( @Nullable Collection<Object> col, GridCacheContext<K, V> ctx) throws GridException { assert ctx != null; if (col == null) return null; Collection<List<Object>> col0 = new ArrayList<>(col.size()); for (Object o : col) { List<GridIndexingEntity<?>> list = (List<GridIndexingEntity<?>>) o; List<Object> list0 = new ArrayList<>(list.size()); for (GridIndexingEntity<?> ent : list) { if (ent.bytes() != null) list0.add(ent.bytes()); else { if (ctx.deploymentEnabled()) prepareObject(ent.value(), ctx); list0.add(CU.marshal(ctx, ent.value())); } } col0.add(list0); } return marshalCollection(col0, ctx); }
/** {@inheritDoc} */ @Override public Integer call() throws Exception { GridCacheTx tx = CU.txStartInternal(ctx, latchView, PESSIMISTIC, REPEATABLE_READ); try { GridCacheCountDownLatchValue latchVal = latchView.get(key); if (latchVal == null) { if (log.isDebugEnabled()) log.debug("Failed to find count down latch with given name: " + name); assert cnt == 0; return cnt; } int retVal; if (val > 0) { retVal = latchVal.get() - val; if (retVal < 0) retVal = 0; } else retVal = 0; latchVal.set(retVal); latchView.put(key, latchVal); tx.commit(); return retVal; } finally { tx.end(); } }
/** * @param entry Transaction entry. * @param nodes Nodes. */ private void map(GridCacheTxEntry<K, V> entry, Collection<GridRichNode> nodes) { GridRichNode primary = CU.primary0(cctx.affinity(entry.key(), nodes)); GridDistributedTxMapping<K, V> t = mappings.get(primary.id()); if (t == null) mappings.put(primary.id(), t = new GridDistributedTxMapping<K, V>(primary)); t.add(entry); }
/** * Peeks only near cache without looking into DHT cache. * * @param key Key. * @return Peeked value. */ @Nullable public V peekNearOnly(K key) { try { return peek0(true, key, SMART, CU.<K, V>empty()); } catch (GridCacheFilterFailedException ignored) { if (log.isDebugEnabled()) log.debug("Filter validation failed for key: " + key); return null; } }
/** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { GridUtils.writeUuid(out, nodeId); CU.writeVersion(out, ver); out.writeLong(timeout); out.writeLong(threadId); out.writeLong(id); out.writeShort(flags()); }
/** {@inheritDoc} */ @Override public void prepareMarshal(GridCacheContext<K, V> ctx) throws GridException { super.prepareMarshal(ctx); if (map != null) { if (ctx.deploymentEnabled()) { for (K key : map.keySet()) prepareObject(key, ctx); } mapBytes = CU.marshal(ctx, map); } }
/** {@inheritDoc} */ @Override public boolean compareAndSet(T expVal, T newVal, S expStamp, S newStamp) throws GridException { checkRemoved(); return CU.outTx( internalCompareAndSet( F0.equalTo(expVal), wrapperClosure(newVal), F0.equalTo(expStamp), wrapperClosure(newStamp)), ctx); }
/** * @param reads Read entries. * @param writes Write entries. */ @SuppressWarnings({"unchecked"}) private void prepare( Iterable<GridCacheTxEntry<K, V>> reads, Iterable<GridCacheTxEntry<K, V>> writes) { Collection<GridRichNode> nodes = CU.allNodes(cctx); // Assign keys to primary nodes. for (GridCacheTxEntry<K, V> read : reads) map(read, nodes); for (GridCacheTxEntry<K, V> write : writes) map(write, nodes); // Create mini futures. for (GridDistributedTxMapping<K, V> m : mappings.values()) finish(m); }
/** {@inheritDoc} */ @Override public void remove() { if (currEntry == null) throw new IllegalStateException(); assert currIter != null; currIter.remove(); try { GridNearCache.this.remove(currEntry.getKey(), CU.<K, V>empty()); } catch (GridException e) { throw new GridRuntimeException(e); } }
/** * @param cctx Context. * @param tx Transaction. * @param failedNodeId ID of failed node started transaction. */ @SuppressWarnings("ConstantConditions") public GridCachePessimisticCheckCommittedTxFuture( GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) { super(cctx.kernalContext(), new SingleReducer<K, V>()); this.cctx = cctx; this.tx = tx; this.failedNodeId = failedNodeId; log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class); nodes = new GridLeanMap<>(); for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node); }
/** @throws GridException If operation failed. */ private void initializeLatch() throws GridException { if (initGuard.compareAndSet(false, true)) { try { internalLatch = CU.outTx( new Callable<CountDownLatch>() { @Override public CountDownLatch call() throws Exception { GridCacheTx tx = CU.txStartInternal(ctx, latchView, PESSIMISTIC, REPEATABLE_READ); try { GridCacheCountDownLatchValue val = latchView.get(key); if (val == null) { if (log.isDebugEnabled()) log.debug("Failed to find count down latch with given name: " + name); assert cnt == 0; return new CountDownLatch(cnt); } tx.commit(); return new CountDownLatch(val.get()); } finally { tx.end(); } } }, ctx); if (log.isDebugEnabled()) log.debug("Initialized internal latch: " + internalLatch); } finally { initLatch.countDown(); } } else { try { initLatch.await(); } catch (InterruptedException ignored) { throw new GridException("Thread has been interrupted."); } if (internalLatch == null) throw new GridException("Internal latch has not been properly initialized."); } }
/** * @param cctx Cache context. * @param prj Projection (optional). * @return Collection of data nodes in provided projection (if any). */ private static Collection<GridNode> nodes( final GridCacheContext<?, ?> cctx, @Nullable final GridProjection prj) { assert cctx != null; return F.view( CU.allNodes(cctx), new P1<GridNode>() { @Override public boolean apply(GridNode n) { GridCacheDistributionMode mode = U.distributionMode(n, cctx.name()); return (mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED) && (prj == null || prj.node(n.id()) != null); } }); }
/** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { super.writeExternal(out); assert futId != null; assert miniId != null; assert ver != null; U.writeGridUuid(out, futId); U.writeGridUuid(out, miniId); U.writeCollection(out, entries); U.writeIntCollection(out, invalidParts); CU.writeVersion(out, ver); out.writeObject(err); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); topVer = in.readLong(); implicitTx = in.readBoolean(); implicitSingleTx = in.readBoolean(); syncCommit = in.readBoolean(); syncRollback = in.readBoolean(); filterBytes = (byte[][]) in.readObject(); dhtVers = U.readArray(in, CU.versionArrayFactory()); miniId = U.readGridUuid(in); assert miniId != null; }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { nodeId = GridUtils.readUuid(in); ver = CU.readVersion(in); timeout = in.readLong(); threadId = in.readLong(); id = in.readLong(); short flags = in.readShort(); mask(OWNER, OWNER.get(flags)); mask(USED, USED.get(flags)); mask(TX, TX.get(flags)); ts = U.currentTimeMillis(); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); futId = U.readGridUuid(in); miniId = U.readGridUuid(in); entries = U.readCollection(in); invalidParts = U.readIntSet(in); ver = CU.readVersion(in); err = (Throwable) in.readObject(); if (invalidParts == null) invalidParts = Collections.emptyList(); assert futId != null; assert miniId != null; assert ver != null; }
/** * @param cctx Registry. * @param keys Keys to lock. * @param tx Transaction. * @param read Read flag. * @param retval Flag to return value or not. * @param timeout Lock acquisition timeout. * @param filter Filter. */ public GridNearLockFuture( GridCacheContext<K, V> cctx, Collection<? extends K> keys, @Nullable GridNearTxLocal<K, V> tx, boolean read, boolean retval, long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null; this.cctx = cctx; this.keys = keys; this.tx = tx; this.read = read; this.retval = retval; this.timeout = timeout; this.filter = filter; threadId = tx == null ? Thread.currentThread().getId() : tx.threadId(); lockVer = tx != null ? tx.xidVersion() : cctx.versions().next(); futId = GridUuid.randomUuid(); entries = new ArrayList<>(keys.size()); log = U.logger(ctx, logRef, GridNearLockFuture.class); if (timeout > 0) { timeoutObj = new LockTimeoutObject(); cctx.time().addTimeoutObject(timeoutObj); } valMap = new ConcurrentHashMap8<>(keys.size(), 1f); }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * @param e Transaction entry. * @return {@code True} if entry is locally mapped as a primary or back up node. */ protected boolean isNearLocallyMapped(GridCacheEntryEx<K, V> e) { return F.contains(ctx.affinity(e.key(), CU.allNodes(ctx)), ctx.localNode()); }
/** {@inheritDoc} */ @Override public T get() throws GridException { checkRemoved(); return CU.outTx(getCall, ctx); }
/** {@inheritDoc} */ @Override public Map<GridRichNode, Collection<K>> mapKeysToNodes(Collection<? extends K> keys) { return CU.mapKeysToNodes(ctx, keys); }
/** @return Near entries. */ public Set<GridCacheEntry<K, V>> nearEntries() { return super.entrySet(CU.<K, V>empty()); }
/** * @param nodeId Sender node ID. * @param req Finish transaction message. */ @SuppressWarnings({"CatchGenericClass"}) private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) { assert nodeId != null; assert req != null; GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version()); try { ClassLoader ldr = ctx.deploy().globalLoader(); if (req.commit()) { // If lock was acquired explicitly. if (tx == null) { // Create transaction and add entries. tx = ctx.tm() .onCreated( new GridReplicatedTxRemote<K, V>( ldr, nodeId, req.threadId(), req.version(), req.commitVersion(), PESSIMISTIC, READ_COMMITTED, req.isInvalidate(), /*timeout */ 0, /*read entries*/ null, req.writes(), ctx)); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Attempt to start a completed " + "transaction: " + req); } else { boolean set = tx.commitVersion(req.commitVersion()); assert set; } Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes(); if (!F.isEmpty(writeEntries)) { // In OPTIMISTIC mode, we get the values at PREPARE stage. assert tx.concurrency() == PESSIMISTIC; for (GridCacheTxEntry<K, V> entry : writeEntries) { // Unmarshal write entries. entry.unmarshal(ctx, ldr); if (log.isDebugEnabled()) log.debug( "Unmarshalled transaction entry from pessimistic transaction [key=" + entry.key() + ", value=" + entry.value() + ", tx=" + tx + ']'); if (!tx.setWriteValue(entry)) U.warn( log, "Received entry to commit that was not present in transaction [entry=" + entry + ", tx=" + tx + ']'); } } // Add completed versions. tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); if (tx.pessimistic()) tx.prepare(); tx.commit(); } else if (tx != null) { tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); tx.rollback(); } if (req.replyRequired()) { GridCacheMessage<K, V> res = new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId()); try { ctx.io().send(nodeId, res); } catch (Throwable e) { // Double-check. if (ctx.discovery().node(nodeId) == null) { if (log.isDebugEnabled()) log.debug( "Node left while sending finish response [nodeId=" + nodeId + ", res=" + res + ']'); } else U.error( log, "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']', e); } } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Attempted to start a completed transaction (will ignore): " + e); } catch (Throwable e) { U.error( log, "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']', e); if (tx != null) tx.rollback(); } }
/** * Synchronous sequence update operation. Will add given amount to the sequence value. * * @param l Increment amount. * @param updateCall Cache call that will update sequence reservation count in accordance with l. * @param updated If {@code true}, will return sequence value after update, otherwise will return * sequence value prior to update. * @return Sequence value. * @throws GridException If update failed. */ private long internalUpdate(long l, @Nullable Callable<Long> updateCall, boolean updated) throws GridException { checkRemoved(); assert l > 0; lock.lock(); try { // If reserved range isn't exhausted. if (locVal + l <= upBound) { long curVal = locVal; locVal += l; return updated ? locVal : curVal; } } finally { lock.unlock(); } if (updateCall == null) updateCall = internalUpdate(l, updated); while (true) { if (updateGuard.compareAndSet(false, true)) { try { // This call must be outside lock. return CU.outTx(updateCall, ctx); } finally { lock.lock(); try { updateGuard.set(false); cond.signalAll(); } finally { lock.unlock(); } } } else { lock.lock(); try { while (locVal >= upBound && updateGuard.get()) { try { cond.await(500, MILLISECONDS); } catch (InterruptedException e) { throw new GridInterruptedException(e); } } checkRemoved(); // If reserved range isn't exhausted. if (locVal + l <= upBound) { long curVal = locVal; locVal += l; return updated ? locVal : curVal; } } finally { lock.unlock(); } } } }
/** {@inheritDoc} */ @Override public void set(T val) throws GridException { checkRemoved(); CU.outTx(internalSet(val), ctx); }
/** {@inheritDoc} */ @Override public boolean compareAndSet(T expVal, T newVal) throws GridException { checkRemoved(); return CU.outTx(internalCompareAndSet(wrapperPredicate(expVal), wrapperClosure(newVal)), ctx); }