/** * @param rmtReducer Optional reducer. * @param rmtTransform Optional transformer. * @param args Arguments. * @return Future. */ @SuppressWarnings("IfMayBeConditional") private <R> GridCacheQueryFuture<R> execute( @Nullable GridReducer<T, R> rmtReducer, @Nullable GridClosure<T, R> rmtTransform, @Nullable Object... args) { Collection<GridNode> nodes = nodes(); cctx.checkSecurity(GridSecurityPermission.CACHE_READ); if (F.isEmpty(nodes)) return new GridCacheQueryErrorFuture<>( cctx.kernalContext(), new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx())); if (log.isDebugEnabled()) log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']'); if (cctx.deploymentEnabled()) { try { cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform); cctx.deploy().registerClasses(args); } catch (GridException e) { return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e); } } if (subjId == null) subjId = cctx.localNodeId(); taskHash = cctx.kernalContext().job().currentTaskNameHash(); GridCacheQueryBean bean = new GridCacheQueryBean( this, (GridReducer<Object, Object>) rmtReducer, (GridClosure<Object, Object>) rmtTransform, args); GridCacheQueryManager qryMgr = cctx.queries(); boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId()); if (type == SQL_FIELDS) return (GridCacheQueryFuture<R>) (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes)); else return (GridCacheQueryFuture<R>) (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes)); }
/** {@inheritDoc} */ @Override public void close() throws GridException { closeLock.lock(); try { if (routineId == null) throw new IllegalStateException("Can't cancel query that was not executed."); ctx.kernalContext().continuous().stopRoutine(routineId).get(); } finally { closeLock.unlock(); } }
/** * @param cctx Context. * @param tx Transaction. * @param failedNodeId ID of failed node started transaction. */ @SuppressWarnings("ConstantConditions") public GridCachePessimisticCheckCommittedTxFuture( GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) { super(cctx.kernalContext(), new SingleReducer<K, V>()); this.cctx = cctx; this.tx = tx; this.failedNodeId = failedNodeId; log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class); nodes = new GridLeanMap<>(); for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node); }
/** * @param cctx Context. * @param tx Transaction. * @param commit Commit flag. */ public GridNearTxFinishFuture( GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) { super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx)); assert cctx != null; this.cctx = cctx; this.tx = tx; this.commit = commit; mappings = tx.mappings(); futId = GridUuid.randomUuid(); log = U.logger(ctx, logRef, GridNearTxFinishFuture.class); }
/** * @param cctx Registry. * @param keys Keys to lock. * @param tx Transaction. * @param read Read flag. * @param retval Flag to return value or not. * @param timeout Lock acquisition timeout. * @param filter Filter. */ public GridNearLockFuture( GridCacheContext<K, V> cctx, Collection<? extends K> keys, @Nullable GridNearTxLocal<K, V> tx, boolean read, boolean retval, long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null; this.cctx = cctx; this.keys = keys; this.tx = tx; this.read = read; this.retval = retval; this.timeout = timeout; this.filter = filter; threadId = tx == null ? Thread.currentThread().getId() : tx.threadId(); lockVer = tx != null ? tx.xidVersion() : cctx.versions().next(); futId = GridUuid.randomUuid(); entries = new ArrayList<>(keys.size()); log = U.logger(ctx, logRef, GridNearLockFuture.class); if (timeout > 0) { timeoutObj = new LockTimeoutObject(); cctx.time().addTimeoutObject(timeoutObj); } valMap = new ConcurrentHashMap8<>(keys.size(), 1f); }
/** {@inheritDoc} */ @Override public void execute(@Nullable GridProjection prj) throws GridException { if (cb == null) throw new IllegalStateException("Mandatory local callback is not set for the query: " + this); if (prj == null) prj = ctx.grid(); prj = prj.forCache(ctx.name()); if (prj.nodes().isEmpty()) throw new GridTopologyException("Failed to execute query (projection is empty): " + this); GridCacheMode mode = ctx.config().getCacheMode(); if (mode == LOCAL || mode == REPLICATED) { Collection<GridNode> nodes = prj.nodes(); GridNode node = nodes.contains(ctx.localNode()) ? ctx.localNode() : F.rand(nodes); assert node != null; if (nodes.size() > 1 && !ctx.cache().isDrSystemCache()) { if (node.id().equals(ctx.localNodeId())) U.warn( log, "Continuous query for " + mode + " cache can be run only on local node. " + "Will execute query locally: " + this); else U.warn( log, "Continuous query for " + mode + " cache can be run only on single node. " + "Will execute query on remote node [qry=" + this + ", node=" + node + ']'); } prj = prj.forNode(node); } closeLock.lock(); try { if (routineId != null) throw new IllegalStateException("Continuous query can't be executed twice."); guard.block(); GridContinuousHandler hnd = new GridCacheContinuousQueryHandler<>(ctx.name(), topic, cb, filter, prjPred); routineId = ctx.kernalContext() .continuous() .startRoutine(hnd, bufSize, timeInterval, autoUnsubscribe, prj.predicate()) .get(); } finally { closeLock.unlock(); } }
/** * Asynchronous sequence update operation. Will add given amount to the sequence value. * * @param l Increment amount. * @param updateCall Cache call that will update sequence reservation count in accordance with l. * @param updated If {@code true}, will return sequence value after update, otherwise will return * sequence value prior to update. * @return Future indicating sequence value. * @throws GridException If update failed. */ private GridFuture<Long> internalUpdateAsync( long l, @Nullable Callable<Long> updateCall, boolean updated) throws GridException { checkRemoved(); A.ensure(l > 0, " Parameter mustn't be less then 1: " + l); lock.lock(); try { // If reserved range isn't exhausted. if (locVal + l <= upBound) { long curVal = locVal; locVal += l; return new GridFinishedFuture<Long>(ctx.kernalContext(), updated ? locVal : curVal); } } finally { lock.unlock(); } if (updateCall == null) updateCall = internalUpdate(l, updated); while (true) { if (updateGuard.compareAndSet(false, true)) { try { // This call must be outside lock. return ctx.closures().callLocalSafe(updateCall, true); } finally { lock.lock(); try { updateGuard.set(false); cond.signalAll(); } finally { lock.unlock(); } } } else { lock.lock(); try { while (locVal >= upBound && updateGuard.get()) { try { cond.await(500, MILLISECONDS); } catch (InterruptedException e) { throw new GridInterruptedException(e); } } checkRemoved(); // If reserved range isn't exhausted. if (locVal + l <= upBound) { long curVal = locVal; locVal += l; return new GridFinishedFuture<Long>(ctx.kernalContext(), updated ? locVal : curVal); } } finally { lock.unlock(); } } } }
/** * Gets next near lock mapping and either acquires dht locks locally or sends near lock request to * remote primary node. * * @param mappings Queue of mappings. * @throws GridException If mapping can not be completed. */ private void proceedMapping(final ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings) throws GridException { GridNearLockMapping<K, V> map = mappings.poll(); // If there are no more mappings to process, complete the future. if (map == null) return; final GridNearLockRequest<K, V> req = map.request(); final Collection<K> mappedKeys = map.distributedKeys(); final GridNode node = map.node(); if (filter != null && filter.length != 0) req.filter(filter, cctx); if (node.isLocal()) { req.miniId(GridUuid.randomUuid()); if (log.isDebugEnabled()) log.debug("Before locally locking near request: " + req); GridFuture<GridNearLockResponse<K, V>> fut; if (CU.DHT_ENABLED) fut = dht().lockAllAsync(cctx.localNode(), req, filter); else { // Create dummy values for testing. GridNearLockResponse<K, V> res = new GridNearLockResponse<>(lockVer, futId, null, false, 1, null); res.addValueBytes(null, null, true, lockVer, lockVer, cctx); fut = new GridFinishedFuture<>(ctx, res); } // Add new future. add( new GridEmbeddedFuture<>( cctx.kernalContext(), fut, new C2<GridNearLockResponse<K, V>, Exception, Boolean>() { @Override public Boolean apply(GridNearLockResponse<K, V> res, Exception e) { if (CU.isLockTimeoutOrCancelled(e) || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false; if (e != null) { onError(e); return false; } if (res == null) { onError(new GridException("Lock response is null for future: " + this)); return false; } if (res.error() != null) { onError(res.error()); return false; } if (log.isDebugEnabled()) log.debug( "Acquired lock for local DHT mapping [locId=" + cctx.nodeId() + ", mappedKeys=" + mappedKeys + ", fut=" + GridNearLockFuture.this + ']'); try { int i = 0; for (K k : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, req.topologyVersion()); try { GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key()); boolean hasBytes = entry.hasValue(); V oldVal = entry.rawGet(); V newVal = res.value(i); byte[] newBytes = res.valueBytes(i); GridCacheVersion dhtVer = res.dhtVersion(i); GridCacheVersion mappedVer = res.mappedVersion(i); // On local node don't record twice if DHT cache already recorded. boolean record = retval && oldValTup != null && oldValTup.get1().equals(dhtVer); if (newVal == null) { if (oldValTup != null) { if (oldValTup.get1().equals(dhtVer)) { newVal = oldValTup.get2(); newBytes = oldValTup.get3(); } oldVal = oldValTup.get2(); } } // Lock is held at this point, so we can set the // returned value if any. entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id()); entry.readyNearLock( lockVer, mappedVer, res.committedVersions(), res.rolledbackVersions(), res.pending()); if (inTx() && implicitTx() && tx.onePhaseCommit()) { boolean pass = res.filterResult(i); tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse()); } if (record) { if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ)) cctx.events() .addEvent( entry.partition(), entry.key(), tx, null, EVT_CACHE_OBJECT_READ, newVal, newVal != null, oldVal, hasBytes, CU.subjectId(tx, cctx)); cctx.cache().metrics0().onRead(oldVal != null); } if (log.isDebugEnabled()) log.debug( "Processed response for entry [res=" + res + ", entry=" + entry + ']'); break; // Inner while loop. } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Failed to add candidates because entry was " + "removed (will renew)."); // Replace old entry with new one. entries.set( i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key())); } } i++; // Increment outside of while loop. } // Proceed and add new future (if any) before completing embedded future. proceedMapping(mappings); } catch (GridException ex) { onError(ex); return false; } return true; } })); } else { final MiniFuture fut = new MiniFuture(node, mappedKeys, mappings); req.miniId(fut.futureId()); add(fut); // Append new future. GridFuture<?> txSync = null; if (inTx()) txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId()); if (txSync == null || txSync.isDone()) { try { if (log.isDebugEnabled()) log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']'); cctx.io().send(node, req); } catch (GridTopologyException ex) { assert fut != null; fut.onResult(ex); } } else { txSync.listenAsync( new CI1<GridFuture<?>>() { @Override public void apply(GridFuture<?> t) { try { if (log.isDebugEnabled()) log.debug( "Sending near lock request [node=" + node.id() + ", req=" + req + ']'); cctx.io().send(node, req); } catch (GridTopologyException ex) { assert fut != null; fut.onResult(ex); } catch (GridException e) { onError(e); } } }); } } }