/**
   * @param rmtReducer Optional reducer.
   * @param rmtTransform Optional transformer.
   * @param args Arguments.
   * @return Future.
   */
  @SuppressWarnings("IfMayBeConditional")
  private <R> CacheQueryFuture<R> execute(
      @Nullable IgniteReducer<T, R> rmtReducer,
      @Nullable IgniteClosure<T, R> rmtTransform,
      @Nullable Object... args) {
    Collection<ClusterNode> nodes = nodes();

    cctx.checkSecurity(SecurityPermission.CACHE_READ);

    if (nodes.isEmpty())
      return new GridCacheQueryErrorFuture<>(
          cctx.kernalContext(), new ClusterGroupEmptyCheckedException());

    if (log.isDebugEnabled())
      log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']');

    if (cctx.deploymentEnabled()) {
      try {
        cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform);
        cctx.deploy().registerClasses(args);
      } catch (IgniteCheckedException e) {
        return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e);
      }
    }

    if (subjId == null) subjId = cctx.localNodeId();

    taskHash = cctx.kernalContext().job().currentTaskNameHash();

    final GridCacheQueryBean bean =
        new GridCacheQueryBean(
            this,
            (IgniteReducer<Object, Object>) rmtReducer,
            (IgniteClosure<Object, Object>) rmtTransform,
            args);

    final GridCacheQueryManager qryMgr = cctx.queries();

    boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId());

    if (type == SQL_FIELDS || type == SPI)
      return (CacheQueryFuture<R>)
          (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes));
    else if (type == SCAN && part != null && nodes.size() > 1)
      return new CacheQueryFallbackFuture<>(nodes, bean, qryMgr);
    else
      return (CacheQueryFuture<R>)
          (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes));
  }
  /**
   * @param cctx Context.
   * @param id Partition ID.
   */
  @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor")
  GridDhtLocalPartition(GridCacheContext cctx, int id) {
    assert cctx != null;

    this.id = id;
    this.cctx = cctx;

    log = U.logger(cctx.kernalContext(), logRef, this);

    rent =
        new GridFutureAdapter<Object>() {
          @Override
          public String toString() {
            return "PartitionRentFuture [part=" + GridDhtLocalPartition.this + ", map=" + map + ']';
          }
        };

    map = new ConcurrentHashMap8<>(cctx.config().getStartSize() / cctx.affinity().partitions());

    int delQueueSize =
        CU.isSystemCache(cctx.name())
            ? 100
            : Math.max(MAX_DELETE_QUEUE_SIZE / cctx.affinity().partitions(), 20);

    rmvQueue = new GridCircularBuffer<>(U.ceilPow2(delQueueSize));
  }
  /**
   * @param cctx Context.
   * @param keys Keys.
   * @param topVer Topology version.
   * @param readThrough Read through flag.
   * @param forcePrimary If {@code true} then will force network trip to primary node even if called
   *     on backup node.
   * @param subjId Subject ID.
   * @param taskName Task name.
   * @param deserializeBinary Deserialize binary flag.
   * @param expiryPlc Expiry policy.
   * @param skipVals Skip values flag.
   * @param canRemap Flag indicating whether future can be remapped on a newer topology version.
   * @param needVer If {@code true} returns values as tuples containing value and version.
   * @param keepCacheObjects Keep cache objects flag.
   */
  public GridPartitionedGetFuture(
      GridCacheContext<K, V> cctx,
      Collection<KeyCacheObject> keys,
      AffinityTopologyVersion topVer,
      boolean readThrough,
      boolean forcePrimary,
      @Nullable UUID subjId,
      String taskName,
      boolean deserializeBinary,
      @Nullable IgniteCacheExpiryPolicy expiryPlc,
      boolean skipVals,
      boolean canRemap,
      boolean needVer,
      boolean keepCacheObjects) {
    super(
        cctx,
        keys,
        readThrough,
        forcePrimary,
        subjId,
        taskName,
        deserializeBinary,
        expiryPlc,
        skipVals,
        canRemap,
        needVer,
        keepCacheObjects);

    this.topVer = topVer;

    if (log == null) log = U.logger(cctx.kernalContext(), logRef, GridPartitionedGetFuture.class);
  }
  /**
   * @param cctx Context.
   * @param nodeId ID of the node that started routine.
   * @param entry Entry.
   * @throws IgniteCheckedException In case of error.
   */
  private void prepareEntry(GridCacheContext cctx, UUID nodeId, CacheContinuousQueryEntry entry)
      throws IgniteCheckedException {
    if (cctx.kernalContext().config().isPeerClassLoadingEnabled()
        && cctx.discovery().node(nodeId) != null) {
      entry.prepareMarshal(cctx);

      cctx.deploy().prepare(entry);
    } else entry.prepareMarshal(cctx);
  }
  /**
   * @param cctx Cache context.
   * @param completionCb Callback to invoke when future is completed.
   * @param writeVer Write version.
   * @param updateReq Update request.
   * @param updateRes Update response.
   */
  public GridDhtAtomicUpdateFuture(
      GridCacheContext cctx,
      CI2<GridNearAtomicUpdateRequest, GridNearAtomicUpdateResponse> completionCb,
      GridCacheVersion writeVer,
      GridNearAtomicUpdateRequest updateReq,
      GridNearAtomicUpdateResponse updateRes) {
    this.cctx = cctx;
    this.writeVer = writeVer;

    futVer = cctx.versions().next(updateReq.topologyVersion());
    this.updateReq = updateReq;
    this.completionCb = completionCb;
    this.updateRes = updateRes;

    if (log == null) log = U.logger(cctx.kernalContext(), logRef, GridDhtAtomicUpdateFuture.class);

    keys = new ArrayList<>(updateReq.keys().size());
    mappings = U.newHashMap(updateReq.keys().size());

    boolean topLocked =
        updateReq.topologyLocked() || (updateReq.fastMap() && !updateReq.clientRequest());

    waitForExchange = !topLocked;
  }
  /** {@inheritDoc} */
  @SuppressWarnings("unchecked")
  @Override
  public void notifyCallback(
      UUID nodeId, UUID routineId, Collection<?> objs, GridKernalContext ctx) {
    assert nodeId != null;
    assert routineId != null;
    assert objs != null;
    assert ctx != null;

    Collection<CacheContinuousQueryEntry> entries = (Collection<CacheContinuousQueryEntry>) objs;

    final GridCacheContext cctx = cacheContext(ctx);

    for (CacheContinuousQueryEntry e : entries) {
      GridCacheDeploymentManager depMgr = cctx.deploy();

      ClassLoader ldr = depMgr.globalLoader();

      if (ctx.config().isPeerClassLoadingEnabled()) {
        GridDeploymentInfo depInfo = e.deployInfo();

        if (depInfo != null) {
          depMgr.p2pContext(
              nodeId,
              depInfo.classLoaderId(),
              depInfo.userVersion(),
              depInfo.deployMode(),
              depInfo.participants(),
              depInfo.localDeploymentOwner());
        }
      }

      try {
        e.unmarshal(cctx, ldr);
      } catch (IgniteCheckedException ex) {
        U.error(ctx.log(getClass()), "Failed to unmarshal entry.", ex);
      }
    }

    final IgniteCache cache = cctx.kernalContext().cache().jcache(cctx.name());

    Collection<CacheContinuousQueryEntry> entries0 = new ArrayList<>();

    for (CacheContinuousQueryEntry e : entries) entries0.addAll(handleEvent(ctx, e));

    Iterable<CacheEntryEvent<? extends K, ? extends V>> evts =
        F.viewReadOnly(
            entries0,
            new C1<CacheContinuousQueryEntry, CacheEntryEvent<? extends K, ? extends V>>() {
              @Override
              public CacheEntryEvent<? extends K, ? extends V> apply(CacheContinuousQueryEntry e) {
                return new CacheContinuousQueryEvent<>(cache, cctx, e);
              }
            },
            new IgnitePredicate<CacheContinuousQueryEntry>() {
              @Override
              public boolean apply(CacheContinuousQueryEntry entry) {
                return !entry.isFiltered();
              }
            });

    locLsnr.onUpdated(evts);
  }