예제 #1
0
  /**
   * Completeness callback.
   *
   * @param success {@code True} if lock was acquired.
   * @param distribute {@code True} if need to distribute lock removal in case of failure.
   * @return {@code True} if complete by this operation.
   */
  private boolean onComplete(boolean success, boolean distribute) {
    if (log.isDebugEnabled())
      log.debug(
          "Received onComplete(..) callback [success="
              + success
              + ", distribute="
              + distribute
              + ", fut="
              + this
              + ']');

    if (!success) undoLocks(distribute);

    if (tx != null) cctx.tm().txContext(tx);

    if (super.onDone(success, err.get())) {
      if (log.isDebugEnabled()) log.debug("Completing future: " + this);

      // Clean up.
      cctx.mvcc().removeFuture(this);

      if (timeoutObj != null) cctx.time().removeTimeoutObject(timeoutObj);

      return true;
    }

    return false;
  }
예제 #2
0
  /**
   * @param mapping Mappings.
   * @param key Key to map.
   * @param topVer Topology version.
   * @return Near lock mapping.
   * @throws GridException If mapping for key failed.
   */
  private GridNearLockMapping<K, V> map(
      K key, @Nullable GridNearLockMapping<K, V> mapping, long topVer) throws GridException {
    assert mapping == null || mapping.node() != null;

    GridNode primary = cctx.affinity().primary(key, topVer);

    if (cctx.discovery().node(primary.id()) == null)
      // If primary node left the grid before lock acquisition, fail the whole future.
      throw newTopologyException(null, primary.id());

    if (inTx() && tx.groupLock() && !primary.isLocal())
      throw new GridException(
          "Failed to start group lock transaction (local node is not primary for "
              + " key) [key="
              + key
              + ", primaryNodeId="
              + primary.id()
              + ']');

    if (mapping == null || !primary.id().equals(mapping.node().id()))
      mapping = new GridNearLockMapping<>(primary, key);
    else mapping.addKey(key);

    return mapping;
  }
  /** @return Nodes to execute on. */
  private Collection<GridNode> nodes() {
    GridCacheMode cacheMode = cctx.config().getCacheMode();

    switch (cacheMode) {
      case LOCAL:
        if (prj != null)
          U.warn(
              log,
              "Ignoring query projection because it's executed over LOCAL cache "
                  + "(only local node will be queried): "
                  + this);

        return Collections.singletonList(cctx.localNode());

      case REPLICATED:
        if (prj != null) return nodes(cctx, prj);

        GridCacheDistributionMode mode = cctx.config().getDistributionMode();

        return mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED
            ? Collections.singletonList(cctx.localNode())
            : Collections.singletonList(F.rand(nodes(cctx, null)));

      case PARTITIONED:
        return nodes(cctx, prj);

      default:
        throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode);
    }
  }
  /** {@inheritDoc} */
  @Override
  public boolean onDone(GridCacheTx tx, Throwable err) {
    if ((initialized() || err != null) && super.onDone(tx, err)) {
      if (error() instanceof GridCacheTxHeuristicException) {
        long topVer = this.tx.topologyVersion();

        for (GridCacheTxEntry<K, V> e : this.tx.writeMap().values()) {
          try {
            if (e.op() != NOOP && !cctx.affinity().localNode(e.key(), topVer)) {
              GridCacheEntryEx<K, V> cacheEntry = cctx.cache().peekEx(e.key());

              if (cacheEntry != null) cacheEntry.invalidate(null, this.tx.xidVersion());
            }
          } catch (Throwable t) {
            U.error(log, "Failed to invalidate entry.", t);

            if (t instanceof Error) throw (Error) t;
          }
        }
      }

      // Don't forget to clean up.
      cctx.mvcc().removeFuture(this);

      return true;
    }

    return false;
  }
  /**
   * @param rmtReducer Optional reducer.
   * @param rmtTransform Optional transformer.
   * @param args Arguments.
   * @return Future.
   */
  @SuppressWarnings("IfMayBeConditional")
  private <R> GridCacheQueryFuture<R> execute(
      @Nullable GridReducer<T, R> rmtReducer,
      @Nullable GridClosure<T, R> rmtTransform,
      @Nullable Object... args) {
    Collection<GridNode> nodes = nodes();

    cctx.checkSecurity(GridSecurityPermission.CACHE_READ);

    if (F.isEmpty(nodes))
      return new GridCacheQueryErrorFuture<>(
          cctx.kernalContext(),
          new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx()));

    if (log.isDebugEnabled())
      log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']');

    if (cctx.deploymentEnabled()) {
      try {
        cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform);
        cctx.deploy().registerClasses(args);
      } catch (GridException e) {
        return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e);
      }
    }

    if (subjId == null) subjId = cctx.localNodeId();

    taskHash = cctx.kernalContext().job().currentTaskNameHash();

    GridCacheQueryBean bean =
        new GridCacheQueryBean(
            this,
            (GridReducer<Object, Object>) rmtReducer,
            (GridClosure<Object, Object>) rmtTransform,
            args);

    GridCacheQueryManager qryMgr = cctx.queries();

    boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId());

    if (type == SQL_FIELDS)
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes));
    else
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes));
  }
  /**
   * Constructor.
   *
   * @param name Latch name.
   * @param cnt Current count.
   * @param initCnt Initial count.
   * @param autoDel Auto delete flag.
   * @param key Latch key.
   * @param latchView Latch projection.
   * @param ctx Cache context.
   */
  public GridCacheCountDownLatchImpl(
      String name,
      int cnt,
      int initCnt,
      boolean autoDel,
      GridCacheInternalKey key,
      GridCacheProjection<GridCacheInternalKey, GridCacheCountDownLatchValue> latchView,
      GridCacheContext ctx) {
    assert name != null;
    assert cnt >= 0;
    assert initCnt >= 0;
    assert key != null;
    assert latchView != null;
    assert ctx != null;

    this.name = name;
    this.cnt = cnt;
    this.initCnt = initCnt;
    this.autoDel = autoDel;
    this.key = key;
    this.latchView = latchView;
    this.ctx = ctx;

    log = ctx.gridConfig().getGridLogger().getLogger(getClass());
  }
  /**
   * @param cctx Context.
   * @param type Query type.
   * @param clsName Class name.
   * @param clause Clause.
   * @param filter Scan filter.
   * @param incMeta Include metadata flag.
   * @param keepPortable Keep portable flag.
   * @param prjPred Cache projection filter.
   */
  public GridCacheQueryAdapter(
      GridCacheContext<?, ?> cctx,
      GridCacheQueryType type,
      @Nullable GridPredicate<GridCacheEntry<Object, Object>> prjPred,
      @Nullable String clsName,
      @Nullable String clause,
      @Nullable GridBiPredicate<Object, Object> filter,
      boolean incMeta,
      boolean keepPortable) {
    assert cctx != null;
    assert type != null;

    this.cctx = cctx;
    this.type = type;
    this.clsName = clsName;
    this.clause = clause;
    this.prjPred = prjPred;
    this.filter = filter;
    this.incMeta = incMeta;
    this.keepPortable = keepPortable;

    log = cctx.logger(getClass());

    pageSize = DFLT_PAGE_SIZE;
    timeout = 0;
    keepAll = true;
    incBackups = false;
    dedup = false;
    prj = null;

    metrics = new GridCacheQueryMetricsAdapter();
  }
예제 #8
0
  /**
   * @param ctx Cache context.
   * @param key Cache key.
   * @param hash Key hash value.
   * @param val Entry value.
   * @param next Next entry in the linked list.
   * @param ttl Time to live.
   */
  public GridDhtCacheEntry(
      GridCacheContext<K, V> ctx, K key, int hash, V val, GridCacheMapEntry<K, V> next, long ttl) {
    super(ctx, key, hash, val, next, ttl);

    // Record this entry with partition.
    locPart = ctx.dht().topology().onAdded(this);
  }
예제 #9
0
  /** {@inheritDoc} */
  @Override
  public GridCacheEntry<K, V> wrap(boolean prjAware) {
    GridCacheContext<K, V> nearCtx = cctx.dht().near().context();

    GridCacheProjectionImpl<K, V> prjPerCall = nearCtx.projectionPerCall();

    if (prjPerCall != null && prjAware)
      return new GridPartitionedCacheEntryImpl<K, V>(prjPerCall, nearCtx, key, this);

    GridCacheEntryImpl<K, V> wrapper = this.wrapper;

    if (wrapper == null)
      this.wrapper = wrapper = new GridPartitionedCacheEntryImpl<K, V>(null, nearCtx, key, this);

    return wrapper;
  }
예제 #10
0
  /**
   * @param ctx Cache registry.
   * @param implicit Implicit flag.
   * @param implicitSingle Implicit with one key flag.
   * @param concurrency Concurrency.
   * @param isolation Isolation.
   * @param timeout Timeout.
   * @param invalidate Invalidation policy.
   * @param syncCommit Synchronous commit flag.
   * @param syncRollback Synchronous rollback flag.
   * @param swapEnabled Whether to use swap storage.
   * @param storeEnabled Whether to use read/write through.
   */
  GridNearTxLocal(
      GridCacheContext<K, V> ctx,
      boolean implicit,
      boolean implicitSingle,
      GridCacheTxConcurrency concurrency,
      GridCacheTxIsolation isolation,
      long timeout,
      boolean invalidate,
      boolean syncCommit,
      boolean syncRollback,
      boolean swapEnabled,
      boolean storeEnabled) {
    super(
        ctx,
        ctx.versions().next(),
        implicit,
        implicitSingle,
        concurrency,
        isolation,
        timeout,
        invalidate,
        swapEnabled,
        storeEnabled);

    assert ctx != null;

    this.syncCommit = syncCommit;
    this.syncRollback = syncRollback;
  }
  /** @param m Mapping. */
  @SuppressWarnings({"unchecked"})
  private void finish(GridDistributedTxMapping<K, V> m) {
    GridRichNode n = m.node();

    assert !m.empty();

    GridNearTxFinishRequest req =
        new GridNearTxFinishRequest<K, V>(
            futId,
            tx.xidVersion(),
            tx.commitVersion(),
            tx.threadId(),
            commit,
            tx.isInvalidate(),
            m.explicitLock(),
            tx.topologyVersion(),
            null,
            null,
            null,
            commit && tx.pessimistic() ? m.writes() : null,
            tx.syncCommit() && commit || tx.syncRollback() && !commit);

    // If this is the primary node for the keys.
    if (n.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (CU.DHT_ENABLED) {
        GridFuture<GridCacheTx> fut =
            commit ? dht().commitTx(n.id(), req) : dht().rollbackTx(n.id(), req);

        // Add new future.
        add(fut);
      } else
        // Add done future for testing.
        add(new GridFinishedFuture<GridCacheTx>(ctx));
    } else {
      MiniFuture fut = new MiniFuture(m);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      try {
        cctx.io().send(n, req);

        // If we don't wait for result, then mark future as done.
        if (!isSync() && !m.explicitLock()) fut.onDone();
      } catch (GridTopologyException e) {
        // Remove previous mapping.
        mappings.remove(m.node().id());

        fut.onResult(e);
      } catch (GridException e) {
        // Fail the whole thing.
        fut.onResult(e);
      }
    }
  }
  /**
   * @param entry Transaction entry.
   * @param nodes Nodes.
   */
  private void map(GridCacheTxEntry<K, V> entry, Collection<GridRichNode> nodes) {
    GridRichNode primary = CU.primary0(cctx.affinity(entry.key(), nodes));

    GridDistributedTxMapping<K, V> t = mappings.get(primary.id());

    if (t == null) mappings.put(primary.id(), t = new GridDistributedTxMapping<K, V>(primary));

    t.add(entry);
  }
  /** {@inheritDoc} */
  @Override
  public boolean onDone(GridCacheTx tx, Throwable err) {
    if (initialized() && super.onDone(tx, err)) {
      // Don't forget to clean up.
      cctx.mvcc().removeFuture(this);

      return true;
    }

    return false;
  }
예제 #14
0
  /**
   * Undoes all locks.
   *
   * @param dist If {@code true}, then remove locks from remote nodes as well.
   */
  private void undoLocks(boolean dist) {
    // Transactions will undo during rollback.
    if (dist && tx == null) cctx.nearTx().removeLocks(lockVer, keys);
    else {
      if (tx != null) {
        if (tx.setRollbackOnly()) {
          if (log.isDebugEnabled())
            log.debug(
                "Marked transaction as rollback only because locks could not be acquired: " + tx);
        } else if (log.isDebugEnabled())
          log.debug(
              "Transaction was not marked rollback-only while locks were not acquired: " + tx);
      }

      for (GridCacheEntryEx<K, V> e : entriesCopy()) {
        try {
          e.removeLock(lockVer);
        } catch (GridCacheEntryRemovedException ignored) {
          while (true) {
            try {
              e = cctx.cache().peekEx(e.key());

              if (e != null) e.removeLock(lockVer);

              break;
            } catch (GridCacheEntryRemovedException ignore) {
              if (log.isDebugEnabled())
                log.debug(
                    "Attempted to remove lock on removed entry (will retry) [ver="
                        + lockVer
                        + ", entry="
                        + e
                        + ']');
            }
          }
        }
      }
    }

    cctx.mvcc().recheckPendingLocks();
  }
 /** {@inheritDoc} */
 @Override
 public GridFuture<Boolean> awaitAsync(final long timeout, final TimeUnit unit) {
   return ctx.closures()
       .callLocalSafe(
           new Callable<Boolean>() {
             @Override
             public Boolean call() throws Exception {
               return await(timeout, unit);
             }
           },
           true);
 }
  /**
   * @param ctx Cache context.
   * @param topic Topic for ordered messages.
   * @param prjPred Projection predicate.
   */
  GridCacheContinuousQueryAdapter(
      GridCacheContext<K, V> ctx,
      Object topic,
      @Nullable GridPredicate<GridCacheEntry<K, V>> prjPred) {
    assert ctx != null;
    assert topic != null;

    this.ctx = ctx;
    this.topic = topic;
    this.prjPred = prjPred;

    log = ctx.logger(getClass());
  }
  /**
   * Default constructor.
   *
   * @param name Sequence name.
   * @param key Sequence key.
   * @param seqView Sequence projection.
   * @param ctx CacheContext.
   * @param locVal Local counter.
   * @param upBound Upper bound.
   */
  public GridCacheAtomicSequenceImpl(
      String name,
      GridCacheInternalStorableKey key,
      GridCacheProjection<GridCacheInternalStorableKey, GridCacheAtomicSequenceValue> seqView,
      GridCacheContext ctx,
      long locVal,
      long upBound) {
    assert key != null;
    assert seqView != null;
    assert ctx != null;
    assert locVal <= upBound;

    batchSize = ctx.config().getAtomicSequenceReserveSize();
    this.ctx = ctx;
    this.key = key;
    this.seqView = seqView;
    this.upBound = upBound;
    this.locVal = locVal;
    this.name = name;

    log = ctx.gridConfig().getGridLogger().getLogger(getClass());
  }
  /** {@inheritDoc} */
  @Override
  public void close() throws GridException {
    closeLock.lock();

    try {
      if (routineId == null)
        throw new IllegalStateException("Can't cancel query that was not executed.");

      ctx.kernalContext().continuous().stopRoutine(routineId).get();
    } finally {
      closeLock.unlock();
    }
  }
예제 #19
0
  /**
   * @param cctx Registry.
   * @param keys Keys to lock.
   * @param tx Transaction.
   * @param read Read flag.
   * @param retval Flag to return value or not.
   * @param timeout Lock acquisition timeout.
   * @param filter Filter.
   */
  public GridNearLockFuture(
      GridCacheContext<K, V> cctx,
      Collection<? extends K> keys,
      @Nullable GridNearTxLocal<K, V> tx,
      boolean read,
      boolean retval,
      long timeout,
      GridPredicate<GridCacheEntry<K, V>>[] filter) {
    super(cctx.kernalContext(), CU.boolReducer());
    assert cctx != null;
    assert keys != null;

    this.cctx = cctx;
    this.keys = keys;
    this.tx = tx;
    this.read = read;
    this.retval = retval;
    this.timeout = timeout;
    this.filter = filter;

    threadId = tx == null ? Thread.currentThread().getId() : tx.threadId();

    lockVer = tx != null ? tx.xidVersion() : cctx.versions().next();

    futId = GridUuid.randomUuid();

    entries = new ArrayList<>(keys.size());

    log = U.logger(ctx, logRef, GridNearLockFuture.class);

    if (timeout > 0) {
      timeoutObj = new LockTimeoutObject();

      cctx.time().addTimeoutObject(timeoutObj);
    }

    valMap = new ConcurrentHashMap8<>(keys.size(), 1f);
  }
예제 #20
0
  /**
   * Acquires topology future and checks it completeness under the read lock. If it is not complete,
   * will asynchronously wait for it's completeness and then try again.
   */
  void mapOnTopology() {
    // We must acquire topology snapshot from the topology version future.
    try {
      cctx.topology().readLock();

      try {
        GridDhtTopologyFuture fut = cctx.topologyVersionFuture();

        if (fut.isDone()) {
          GridDiscoveryTopologySnapshot snapshot = fut.topologySnapshot();

          if (tx != null) {
            tx.topologyVersion(snapshot.topologyVersion());
            tx.topologySnapshot(snapshot);
          }

          topSnapshot.compareAndSet(null, snapshot);

          map(keys);

          markInitialized();
        } else {
          fut.listenAsync(
              new CI1<GridFuture<Long>>() {
                @Override
                public void apply(GridFuture<Long> t) {
                  mapOnTopology();
                }
              });
        }
      } finally {
        cctx.topology().readUnlock();
      }
    } catch (GridException e) {
      onDone(e);
    }
  }
  /**
   * @param cctx Context.
   * @param tx Transaction.
   * @param failedNodeId ID of failed node started transaction.
   */
  @SuppressWarnings("ConstantConditions")
  public GridCachePessimisticCheckCommittedTxFuture(
      GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) {
    super(cctx.kernalContext(), new SingleReducer<K, V>());

    this.cctx = cctx;
    this.tx = tx;
    this.failedNodeId = failedNodeId;

    log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class);

    nodes = new GridLeanMap<>();

    for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node);
  }
  /** {@inheritDoc} */
  @Override
  public GridFuture<?> awaitAsync() {
    return ctx.closures()
        .callLocalSafe(
            new Callable<Object>() {
              @Nullable
              @Override
              public Object call() throws Exception {
                await();

                return null;
              }
            },
            true);
  }
  /** {@inheritDoc} */
  @Override
  public boolean onDone(@Nullable GridCacheCommittedTxInfo<K, V> res, @Nullable Throwable err) {
    if (super.onDone(res, err)) {
      cctx.mvcc().removeFuture(this);

      if (log.isDebugEnabled())
        log.debug(
            "Completing check committed tx future for transaction [tx="
                + tx
                + ", res="
                + res
                + ", err="
                + err
                + ']');

      if (err == null) cctx.tm().finishPessimisticTxOnRecovery(tx, res);
      else {
        if (log.isDebugEnabled())
          log.debug(
              "Failed to check prepared transactions, "
                  + "invalidating transaction [err="
                  + err
                  + ", tx="
                  + tx
                  + ']');

        if (nearCheck) return true;

        cctx.tm().salvageTx(tx);
      }

      return true;
    }

    return false;
  }
  /**
   * @param cctx Context.
   * @param tx Transaction.
   * @param commit Commit flag.
   */
  public GridNearTxFinishFuture(
      GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) {
    super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx));

    assert cctx != null;

    this.cctx = cctx;
    this.tx = tx;
    this.commit = commit;

    mappings = tx.mappings();

    futId = GridUuid.randomUuid();

    log = U.logger(ctx, logRef, GridNearTxFinishFuture.class);
  }
  /**
   * Default constructor.
   *
   * @param name Atomic reference name.
   * @param key Atomic reference key.
   * @param atomicView Atomic projection.
   * @param ctx Cache context.
   */
  public GridCacheAtomicReferenceImpl(
      String name,
      GridCacheInternalKey key,
      GridCacheProjection<GridCacheInternalKey, GridCacheAtomicReferenceValue<T>> atomicView,
      GridCacheContext ctx) {
    assert key != null;
    assert atomicView != null;
    assert ctx != null;
    assert name != null;

    this.ctx = ctx;
    this.key = key;
    this.atomicView = atomicView;
    this.name = name;

    log = ctx.gridConfig().getGridLogger().getLogger(getClass());
  }
예제 #26
0
  /**
   * @param cached Entry to check.
   * @return {@code True} if filter passed.
   */
  private boolean filter(GridCacheEntryEx<K, V> cached) {
    try {
      if (!cctx.isAll(cached, filter)) {
        if (log.isDebugEnabled())
          log.debug("Filter didn't pass for entry (will fail lock): " + cached);

        onFailed(true);

        return false;
      }

      return true;
    } catch (GridException e) {
      onError(e);

      return false;
    }
  }
예제 #27
0
  /** @return {@code True} if locks have been acquired. */
  private boolean checkLocks() {
    if (!isDone() && initialized() && !hasPending()) {
      for (int i = 0; i < entries.size(); i++) {
        while (true) {
          GridCacheEntryEx<K, V> cached = entries.get(i);

          try {
            if (!locked(cached)) {
              if (log.isDebugEnabled())
                log.debug(
                    "Lock is still not acquired for entry (will keep waiting) [entry="
                        + cached
                        + ", fut="
                        + this
                        + ']');

              return false;
            }

            break;
          }
          // Possible in concurrent cases, when owner is changed after locks
          // have been released or cancelled.
          catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug("Got removed entry in onOwnerChanged method (will retry): " + cached);

            // Replace old entry with new one.
            entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(cached.key()));
          }
        }
      }

      if (log.isDebugEnabled())
        log.debug("Local lock acquired for entries [fut=" + this + ", entries=" + entries + "]");

      onComplete(true, true);

      return true;
    }

    return false;
  }
예제 #28
0
  /**
   * Basically, future mapping consists from two parts. First, we must determine the topology
   * version this future will map on. Locking is performed within a user transaction, we must
   * continue to map keys on the same topology version as it started. If topology version is
   * undefined, we get current topology future and wait until it completes so the topology is ready
   * to use.
   *
   * <p>During the second part we map keys to primary nodes using topology snapshot we obtained
   * during the first part. Note that if primary node leaves grid, the future will fail and
   * transaction will be rolled back.
   */
  void map() {
    // Obtain the topology version to use.
    GridDiscoveryTopologySnapshot snapshot =
        tx != null
            ? tx.topologySnapshot()
            : cctx.mvcc().lastExplicitLockTopologySnapshot(Thread.currentThread().getId());

    if (snapshot != null) {
      // Continue mapping on the same topology version as it was before.
      topSnapshot.compareAndSet(null, snapshot);

      map(keys);

      markInitialized();

      return;
    }

    // Must get topology snapshot and map on that version.
    mapOnTopology();
  }
  /**
   * @param res Query result.
   * @param err Error or {@code null} if query executed successfully.
   * @param startTime Start time.
   * @param duration Duration.
   */
  public void onExecuted(Object res, Throwable err, long startTime, long duration) {
    boolean fail = err != null;

    // Update own metrics.
    metrics.onQueryExecute(duration, fail);

    // Update metrics in query manager.
    cctx.queries().onMetricsUpdate(duration, fail);

    if (log.isDebugEnabled())
      log.debug(
          "Query execution finished [qry="
              + this
              + ", startTime="
              + startTime
              + ", duration="
              + duration
              + ", fail="
              + fail
              + ", res="
              + res
              + ']');
  }
  /** {@inheritDoc} */
  @Override
  public void execute(@Nullable GridProjection prj) throws GridException {
    if (cb == null)
      throw new IllegalStateException("Mandatory local callback is not set for the query: " + this);

    if (prj == null) prj = ctx.grid();

    prj = prj.forCache(ctx.name());

    if (prj.nodes().isEmpty())
      throw new GridTopologyException("Failed to execute query (projection is empty): " + this);

    GridCacheMode mode = ctx.config().getCacheMode();

    if (mode == LOCAL || mode == REPLICATED) {
      Collection<GridNode> nodes = prj.nodes();

      GridNode node = nodes.contains(ctx.localNode()) ? ctx.localNode() : F.rand(nodes);

      assert node != null;

      if (nodes.size() > 1 && !ctx.cache().isDrSystemCache()) {
        if (node.id().equals(ctx.localNodeId()))
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on local node. "
                  + "Will execute query locally: "
                  + this);
        else
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on single node. "
                  + "Will execute query on remote node [qry="
                  + this
                  + ", node="
                  + node
                  + ']');
      }

      prj = prj.forNode(node);
    }

    closeLock.lock();

    try {
      if (routineId != null)
        throw new IllegalStateException("Continuous query can't be executed twice.");

      guard.block();

      GridContinuousHandler hnd =
          new GridCacheContinuousQueryHandler<>(ctx.name(), topic, cb, filter, prjPred);

      routineId =
          ctx.kernalContext()
              .continuous()
              .startRoutine(hnd, bufSize, timeInterval, autoUnsubscribe, prj.predicate())
              .get();
    } finally {
      closeLock.unlock();
    }
  }