/** {@inheritDoc} */ @Override public Map<K, V> peekAll( @Nullable Collection<? extends K> keys, @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys == null || keys.isEmpty()) return emptyMap(); final Collection<K> skipped = new GridLeanSet<K>(); final Map<K, V> map = peekAll0(keys, filter, skipped); if (map.size() + skipped.size() != keys.size()) { map.putAll( dht.peekAll( F.view( keys, new P1<K>() { @Override public boolean apply(K k) { return !map.containsKey(k) && !skipped.contains(k); } }), filter)); } return map; }
/** {@inheritDoc} */ @Override public void stopListenAsync(@Nullable GridInClosure<? super GridFuture<R>>... lsnr) { if (F.isEmpty(lsnr)) synchronized (mux) { lsnrs.clear(); } else synchronized (mux) { lsnrs.removeAll(F.asList(lsnr)); } }
/** {@inheritDoc} */ @Override public GridDeployment getDeployment(final GridUuid ldrId) { synchronized (mux) { return F.find( F.flat(cache.values()), null, new P1<SharedDeployment>() { @Override public boolean apply(SharedDeployment d) { return d.classLoaderId().equals(ldrId); } }); } }
/** {@inheritDoc} */ @Override public V unswap(K key) throws GridException { ctx.denyOnFlags(F.asList(READ, SKIP_SWAP)); // Unswap only from DHT. Near cache does not have swap storage. return dht.unswap(key); }
/** * @param mapping Mapping to order. * @param committedVers Committed versions. * @param rolledbackVers Rolled back versions. */ void orderCompleted( GridDistributedTxMapping<K, V> mapping, Collection<GridCacheVersion> committedVers, Collection<GridCacheVersion> rolledbackVers) { for (GridCacheTxEntry<K, V> txEntry : F.concat(false, mapping.reads(), mapping.writes())) { while (true) { GridDistributedCacheEntry<K, V> entry = (GridDistributedCacheEntry<K, V>) txEntry.cached(); try { // Handle explicit locks. GridCacheVersion base = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer; entry.doneRemote(xidVer, base, committedVers, rolledbackVers); if (ec()) entry.recheck(); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null; if (log.isDebugEnabled()) log.debug( "Replacing obsolete entry in remote transaction [entry=" + entry + ", tx=" + this + ']'); // Replace the entry. txEntry.cached(cctx.cache().entryEx(txEntry.key()), entry.keyBytes()); } } } }
/** {@inheritDoc} */ @Override public Map<K, V> peekAll( @Nullable Collection<? extends K> keys, @Nullable Collection<GridCachePeekMode> modes) throws GridException { if (keys == null || keys.isEmpty()) return emptyMap(); final Collection<K> skipped = new GridLeanSet<K>(); final Map<K, V> map = !modes.contains(PARTITIONED_ONLY) ? peekAll0(keys, modes, ctx.tm().localTxx(), skipped) : new GridLeanMap<K, V>(0); if (map.size() != keys.size() && !modes.contains(NEAR_ONLY)) { map.putAll( dht.peekAll( F.view( keys, new P1<K>() { @Override public boolean apply(K k) { return !map.containsKey(k) && !skipped.contains(k); } }), modes)); } return map; }
/** {@inheritDoc} */ @Override public <T> T affinityKey() { try { return (T) job.getDeployment().annotatedValue(job.getJob(), GridCacheAffinityMapped.class); } catch (GridException e) { throw F.wrap(e); } }
/** {@inheritDoc} */ @Override public String cacheName() { try { return (String) job.getDeployment().annotatedValue(job.getJob(), GridCacheName.class); } catch (GridException e) { throw F.wrap(e); } }
/** @return Involved nodes. */ @Override public Collection<? extends GridNode> nodes() { return F.viewReadOnly( futures(), new GridClosure<GridFuture<?>, GridRichNode>() { @Nullable @Override public GridRichNode apply(GridFuture<?> f) { if (isMini(f)) return ((MiniFuture) f).node(); return cctx.rich().rich(cctx.discovery().localNode()); } }); }
/** {@inheritDoc} */ @Override public Iterator<GridCacheEntry<K, V>> iterator() { return new EntryIterator( nearSet.iterator(), F.iterator0( dhtSet, false, new P1<GridCacheEntry<K, V>>() { @Override public boolean apply(GridCacheEntry<K, V> e) { return !GridNearCache.super.containsKey(e.getKey(), null); } })); }
/** {@inheritDoc} */ @Override public GridFuture<Map<K, V>> getAllAsync( @Nullable Collection<? extends K> keys, @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) { ctx.denyOnFlag(LOCAL); if (F.isEmpty(keys)) return new GridFinishedFuture<Map<K, V>>(ctx.kernalContext(), Collections.<K, V>emptyMap()); GridCacheTxLocalAdapter<K, V> tx = ctx.tm().threadLocalTx(); if (tx != null && !tx.implicit()) return ctx.wrapCloneMap(tx.getAllAsync(keys, filter)); return loadAsync(keys, false, filter); }
/** @param mappings Mappings. */ void addEntryMapping(@Nullable Map<UUID, GridDistributedTxMapping<K, V>> mappings) { if (!F.isEmpty(mappings)) { this.mappings.putAll(mappings); if (log.isDebugEnabled()) log.debug( "Added mappings to transaction [locId=" + cctx.nodeId() + ", mappings=" + mappings + ", tx=" + this + ']'); } }
/** * @param ldr Loader. * @param nodeId Sender node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. */ @Nullable public GridNearTxRemote<K, V> startRemoteTx( ClassLoader ldr, UUID nodeId, GridDhtTxPrepareRequest<K, V> req) throws GridException { if (!F.isEmpty(req.nearWrites())) { GridNearTxRemote<K, V> tx = new GridNearTxRemote<K, V>( ldr, nodeId, req.nearNodeId(), req.threadId(), req.version(), req.commitVersion(), req.concurrency(), req.isolation(), req.isInvalidate(), req.timeout(), req.nearWrites(), ctx); if (!tx.empty()) { tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException("Attempt to start a completed transaction: " + tx); // Prepare prior to reordering, so the pending locks added // in prepare phase will get properly ordered as well. tx.prepare(); // Add remote candidates and reorder completed and uncompleted versions. tx.addRemoteCandidates( req.candidatesByKey(), req.committedVersions(), req.rolledbackVersions()); if (req.concurrency() == EVENTUALLY_CONSISTENT) { if (log.isDebugEnabled()) log.debug("Committing transaction during remote prepare: " + tx); tx.commit(); if (log.isDebugEnabled()) log.debug("Committed transaction during remote prepare: " + tx); } } return tx; } return null; }
/** * @param keys Keys to load. * @param reload Reload flag. * @param filter Filter. * @return Loaded values. */ public GridFuture<Map<K, V>> loadAsync( @Nullable Collection<? extends K> keys, boolean reload, @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (F.isEmpty(keys)) return new GridFinishedFuture<Map<K, V>>(ctx.kernalContext(), Collections.<K, V>emptyMap()); GridNearGetFuture<K, V> fut = new GridNearGetFuture<K, V>(ctx, keys, reload, null, filter); // Register future for responses. ctx.mvcc().addFuture(fut); fut.init(); return ctx.wrapCloneMap(fut); }
/** * @param cctx Context. * @param tx Transaction. * @param commit Commit flag. */ public GridNearTxFinishFuture( GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) { super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx)); assert cctx != null; this.cctx = cctx; this.tx = tx; this.commit = commit; mappings = tx.mappings(); futId = GridUuid.randomUuid(); log = U.logger(ctx, logRef, GridNearTxFinishFuture.class); }
/** * @param e Transaction entry. * @return {@code True} if entry is locally mapped as a primary or back up node. */ protected boolean isNearLocallyMapped(GridCacheEntryEx<K, V> e) { return F.contains(ctx.affinity(e.key(), CU.allNodes(ctx)), ctx.localNode()); }
/** {@inheritDoc} */ @Override public int size() { return F.size(iterator()); }
/** * Records all undeployed tasks. * * @param nodeId Left node ID. * @param undeployed Undeployed deployments. */ private void recordUndeployed(@Nullable UUID nodeId, Collection<SharedDeployment> undeployed) { if (!F.isEmpty(undeployed)) for (SharedDeployment d : undeployed) d.recordUndeployed(nodeId); }
/** {@inheritDoc} */ @Override public Collection<V> primaryValues(GridPredicate<? super GridCacheEntry<K, V>>[] filter) { return new GridCacheValueCollection<K, V>( ctx, primaryEntrySet(filter), ctx.vararg(F.<K, V>cacheHasPeekValue())); }
/** * @param nodeId Sender node ID. * @param req Finish transaction message. */ @SuppressWarnings({"CatchGenericClass"}) private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) { assert nodeId != null; assert req != null; GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version()); try { ClassLoader ldr = ctx.deploy().globalLoader(); if (req.commit()) { // If lock was acquired explicitly. if (tx == null) { // Create transaction and add entries. tx = ctx.tm() .onCreated( new GridReplicatedTxRemote<K, V>( ldr, nodeId, req.threadId(), req.version(), req.commitVersion(), PESSIMISTIC, READ_COMMITTED, req.isInvalidate(), /*timeout */ 0, /*read entries*/ null, req.writes(), ctx)); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Attempt to start a completed " + "transaction: " + req); } else { boolean set = tx.commitVersion(req.commitVersion()); assert set; } Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes(); if (!F.isEmpty(writeEntries)) { // In OPTIMISTIC mode, we get the values at PREPARE stage. assert tx.concurrency() == PESSIMISTIC; for (GridCacheTxEntry<K, V> entry : writeEntries) { // Unmarshal write entries. entry.unmarshal(ctx, ldr); if (log.isDebugEnabled()) log.debug( "Unmarshalled transaction entry from pessimistic transaction [key=" + entry.key() + ", value=" + entry.value() + ", tx=" + tx + ']'); if (!tx.setWriteValue(entry)) U.warn( log, "Received entry to commit that was not present in transaction [entry=" + entry + ", tx=" + tx + ']'); } } // Add completed versions. tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); if (tx.pessimistic()) tx.prepare(); tx.commit(); } else if (tx != null) { tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); tx.rollback(); } if (req.replyRequired()) { GridCacheMessage<K, V> res = new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId()); try { ctx.io().send(nodeId, res); } catch (Throwable e) { // Double-check. if (ctx.discovery().node(nodeId) == null) { if (log.isDebugEnabled()) log.debug( "Node left while sending finish response [nodeId=" + nodeId + ", res=" + res + ']'); } else U.error( log, "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']', e); } } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Attempted to start a completed transaction (will ignore): " + e); } catch (Throwable e) { U.error( log, "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']', e); if (tx != null) tx.rollback(); } }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * Removes obsolete deployments in case of redeploy. * * @param meta Request metadata. * @return List of shares deployment. */ private GridTuple2<Boolean, SharedDeployment> checkRedeploy(GridDeploymentMetadata meta) { assert Thread.holdsLock(mux); SharedDeployment newDep = null; for (List<SharedDeployment> deps : cache.values()) { for (SharedDeployment dep : deps) { if (!dep.isUndeployed() && !dep.isPendingUndeploy()) { long undeployTimeout = ctx.config().getNetworkTimeout(); SharedDeployment doomed = null; // Only check deployments with no participants. if (!dep.hasParticipants()) { // In case of SHARED deployment it is possible to get hear if // unmarshalling happens during undeploy. In this case, we // simply don't do anything. if (dep.deployMode() == CONTINUOUS) { if (dep.existingDeployedClass(meta.className()) != null) { // Change from shared deploy to shared undeploy or user version change. // Simply remove all deployments with no participating nodes. if (meta.deploymentMode() == SHARED || !meta.userVersion().equals(dep.userVersion())) doomed = dep; } } } // If there are participants, we undeploy if class loader ID on some node changed. else if (dep.existingDeployedClass(meta.className()) != null) { GridTuple2<GridUuid, Long> ldr = dep.getClassLoaderId(meta.senderNodeId()); if (ldr != null) { if (!ldr.get1().equals(meta.classLoaderId())) { // If deployed sequence number is less, then schedule for undeployment. if (ldr.get2() < meta.sequenceNumber()) { if (log.isDebugEnabled()) log.debug( "Received request for a class with newer sequence number " + "(will schedule current class for undeployment) [newSeq=" + meta.sequenceNumber() + ", oldSeq=" + ldr.get2() + ", senderNodeId=" + meta.senderNodeId() + ", newClsLdrId=" + meta.classLoaderId() + ", oldClsLdrId=" + ldr.get1() + ']'); doomed = dep; } else if (ldr.get2() > meta.sequenceNumber()) { long time = System.currentTimeMillis() - dep.timestamp(); if (newDep == null && time < ctx.config().getNetworkTimeout()) { // Set undeployTimeout, so the class will be scheduled // for undeployment. undeployTimeout = ctx.config().getNetworkTimeout() - time; if (log.isDebugEnabled()) log.debug( "Received execution request for a stale class (will deploy and " + "schedule undeployment in " + undeployTimeout + "ms) " + "[curSeq=" + ldr.get2() + ", staleSeq=" + meta.sequenceNumber() + ", cls=" + meta.className() + ", senderNodeId=" + meta.senderNodeId() + ", curLdrId=" + ldr.get1() + ", staleLdrId=" + meta.classLoaderId() + ']'); // We got the redeployed class before the old one. // Simply create a temporary deployment for the sender node, // and schedule undeploy for it. newDep = createNewDeployment(meta, false); doomed = newDep; } else { U.warn( log, "Received execution request for a class that has been redeployed " + "(will ignore): " + meta.alias()); if (log.isDebugEnabled()) log.debug( "Received execution request for a class that has been redeployed " + "(will ignore) [alias=" + meta.alias() + ", dep=" + dep + ']'); return F.t(false, null); } } else { U.error( log, "Sequence number does not correspond to class loader ID [seqNum=" + meta.sequenceNumber() + ", dep=" + dep + ']'); return F.t(false, null); } } } } if (doomed != null) { doomed.onUndeployScheduled(); if (log.isDebugEnabled()) log.debug("Deployment was scheduled for undeploy: " + doomed); // Lifespan time. final long endTime = System.currentTimeMillis() + undeployTimeout; // Deployment to undeploy. final SharedDeployment undep = doomed; ctx.timeout() .addTimeoutObject( new GridTimeoutObject() { @Override public GridUuid timeoutId() { return undep.classLoaderId(); } @Override public long endTime() { return endTime < 0 ? Long.MAX_VALUE : endTime; } @Override public void onTimeout() { boolean removed = false; // Hot redeployment. synchronized (mux) { assert undep.isPendingUndeploy(); if (!undep.isUndeployed()) { undep.undeploy(); undep.onRemoved(); removed = true; Collection<SharedDeployment> deps = cache.get(undep.userVersion()); if (deps != null) { for (Iterator<SharedDeployment> i = deps.iterator(); i.hasNext(); ) if (i.next() == undep) i.remove(); if (deps.isEmpty()) cache.remove(undep.userVersion()); } if (log.isInfoEnabled()) log.info( "Undeployed class loader due to deployment mode change, " + "user version change, or hot redeployment: " + undep); } } // Outside synchronization. if (removed) undep.recordUndeployed(null); } }); } } } } if (newDep != null) { List<SharedDeployment> list = F.addIfAbsent(cache, meta.userVersion(), F.<SharedDeployment>newList()); assert list != null; list.add(newDep); } return F.t(true, newDep); }
/** * @param depMode Deployment mode. * @param ldr Class loader to deploy. * @param cls Class. * @param alias Class alias. * @return Deployment. */ @SuppressWarnings({"ConstantConditions"}) private GridDeployment deploy( GridDeploymentMode depMode, ClassLoader ldr, Class<?> cls, String alias) { assert Thread.holdsLock(mux); LinkedList<GridDeployment> cachedDeps = null; GridDeployment dep = null; // Find existing class loader info. for (LinkedList<GridDeployment> deps : cache.values()) { for (GridDeployment d : deps) { if (d.classLoader() == ldr) { // Cache class and alias. d.addDeployedClass(cls, alias); cachedDeps = deps; dep = d; break; } } if (cachedDeps != null) { break; } } if (cachedDeps != null) { assert dep != null; cache.put(alias, cachedDeps); if (!cls.getName().equals(alias)) { // Cache by class name as well. cache.put(cls.getName(), cachedDeps); } return dep; } GridUuid ldrId = GridUuid.randomUuid(); long seqNum = seq.incrementAndGet(); String userVer = getUserVersion(ldr); dep = new GridDeployment(depMode, ldr, ldrId, seqNum, userVer, cls.getName(), true); dep.addDeployedClass(cls, alias); LinkedList<GridDeployment> deps = F.addIfAbsent(cache, alias, F.<GridDeployment>newLinkedList()); if (!deps.isEmpty()) { for (GridDeployment d : deps) { if (!d.isUndeployed()) { U.error( log, "Found more than one active deployment for the same resource " + "[cls=" + cls + ", depMode=" + depMode + ", dep=" + d + ']'); return null; } } } // Add at the beginning of the list for future fast access. deps.addFirst(dep); if (!cls.getName().equals(alias)) { // Cache by class name as well. cache.put(cls.getName(), deps); } if (log.isDebugEnabled()) { log.debug("Created new deployment: " + dep); } return dep; }
/** * Creates and caches new deployment. * * @param meta Deployment metadata. * @param isCache Whether or not to cache. * @return New deployment. */ private SharedDeployment createNewDeployment(GridDeploymentMetadata meta, boolean isCache) { assert Thread.holdsLock(mux); assert meta.parentLoader() == null; GridUuid ldrId = GridUuid.randomUuid(); GridDeploymentClassLoader clsLdr; if (meta.deploymentMode() == CONTINUOUS || meta.participants() == null) { // Create peer class loader. // Note that we are passing empty list for local P2P exclude, as it really // does not make sense with shared deployment. clsLdr = new GridDeploymentClassLoader( ldrId, meta.userVersion(), meta.deploymentMode(), false, ctx, ctxLdr, meta.classLoaderId(), meta.senderNodeId(), meta.sequenceNumber(), comm, ctx.config().getNetworkTimeout(), log, ctx.config().getPeerClassLoadingClassPathExclude(), ctx.config().getPeerClassLoadingMissedResourcesCacheSize(), meta.deploymentMode() == CONTINUOUS /* enable class byte cache in CONTINUOUS mode */); if (meta.participants() != null) for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet()) clsLdr.register(e.getKey(), e.getValue().get1(), e.getValue().get2()); if (log.isDebugEnabled()) log.debug( "Created class loader in CONTINUOUS mode or without participants " + "[ldr=" + clsLdr + ", meta=" + meta + ']'); } else { assert meta.deploymentMode() == SHARED; // Create peer class loader. // Note that we are passing empty list for local P2P exclude, as it really // does not make sense with shared deployment. clsLdr = new GridDeploymentClassLoader( ldrId, meta.userVersion(), meta.deploymentMode(), false, ctx, ctxLdr, meta.participants(), comm, ctx.config().getNetworkTimeout(), log, ctx.config().getPeerClassLoadingClassPathExclude(), ctx.config().getPeerClassLoadingMissedResourcesCacheSize(), false); if (log.isDebugEnabled()) log.debug( "Created classloader in SHARED mode with participants " + "[ldr=" + clsLdr + ", meta=" + meta + ']'); } // Give this deployment a unique class loader to emphasize that this // ID is unique to this shared deployment and is not ID of loader on // sender node. SharedDeployment dep = new SharedDeployment( meta.deploymentMode(), clsLdr, ldrId, -1, meta.userVersion(), meta.alias()); if (log.isDebugEnabled()) log.debug("Created new deployment: " + dep); if (isCache) { List<SharedDeployment> deps = F.addIfAbsent(cache, meta.userVersion(), new LinkedList<SharedDeployment>()); assert deps != null; deps.add(dep); if (log.isDebugEnabled()) log.debug("Added deployment to cache: " + cache); } return dep; }
/** {@inheritDoc} */ @Override public Collection<UUID> getTopology() throws GridException { return F.nodeIds(ctx.topology().getTopology(this, ctx.discovery().allNodes())); }