/** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys,
      @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map = peekAll0(keys, filter, skipped);

    if (map.size() + skipped.size() != keys.size()) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              filter));
    }

    return map;
  }
  /** {@inheritDoc} */
  @Override
  public V unswap(K key) throws GridException {
    ctx.denyOnFlags(F.asList(READ, SKIP_SWAP));

    // Unswap only from DHT. Near cache does not have swap storage.
    return dht.unswap(key);
  }
  /** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys, @Nullable Collection<GridCachePeekMode> modes)
      throws GridException {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map =
        !modes.contains(PARTITIONED_ONLY)
            ? peekAll0(keys, modes, ctx.tm().localTxx(), skipped)
            : new GridLeanMap<K, V>(0);

    if (map.size() != keys.size() && !modes.contains(NEAR_ONLY)) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              modes));
    }

    return map;
  }
 /** {@inheritDoc} */
 @Override
 public Iterator<GridCacheEntry<K, V>> iterator() {
   return new EntryIterator(
       nearSet.iterator(),
       F.iterator0(
           dhtSet,
           false,
           new P1<GridCacheEntry<K, V>>() {
             @Override
             public boolean apply(GridCacheEntry<K, V> e) {
               return !GridNearCache.super.containsKey(e.getKey(), null);
             }
           }));
 }
  /** {@inheritDoc} */
  @Override
  public GridFuture<Map<K, V>> getAllAsync(
      @Nullable Collection<? extends K> keys,
      @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    ctx.denyOnFlag(LOCAL);

    if (F.isEmpty(keys))
      return new GridFinishedFuture<Map<K, V>>(ctx.kernalContext(), Collections.<K, V>emptyMap());

    GridCacheTxLocalAdapter<K, V> tx = ctx.tm().threadLocalTx();

    if (tx != null && !tx.implicit()) return ctx.wrapCloneMap(tx.getAllAsync(keys, filter));

    return loadAsync(keys, false, filter);
  }
  /**
   * @param ldr Loader.
   * @param nodeId Sender node ID.
   * @param req Request.
   * @return Remote transaction.
   * @throws GridException If failed.
   */
  @Nullable
  public GridNearTxRemote<K, V> startRemoteTx(
      ClassLoader ldr, UUID nodeId, GridDhtTxPrepareRequest<K, V> req) throws GridException {
    if (!F.isEmpty(req.nearWrites())) {
      GridNearTxRemote<K, V> tx =
          new GridNearTxRemote<K, V>(
              ldr,
              nodeId,
              req.nearNodeId(),
              req.threadId(),
              req.version(),
              req.commitVersion(),
              req.concurrency(),
              req.isolation(),
              req.isInvalidate(),
              req.timeout(),
              req.nearWrites(),
              ctx);

      if (!tx.empty()) {
        tx = ctx.tm().onCreated(tx);

        if (tx == null || !ctx.tm().onStarted(tx))
          throw new GridCacheTxRollbackException("Attempt to start a completed transaction: " + tx);

        // Prepare prior to reordering, so the pending locks added
        // in prepare phase will get properly ordered as well.
        tx.prepare();

        // Add remote candidates and reorder completed and uncompleted versions.
        tx.addRemoteCandidates(
            req.candidatesByKey(), req.committedVersions(), req.rolledbackVersions());

        if (req.concurrency() == EVENTUALLY_CONSISTENT) {
          if (log.isDebugEnabled())
            log.debug("Committing transaction during remote prepare: " + tx);

          tx.commit();

          if (log.isDebugEnabled()) log.debug("Committed transaction during remote prepare: " + tx);
        }
      }

      return tx;
    }

    return null;
  }
  /**
   * @param keys Keys to load.
   * @param reload Reload flag.
   * @param filter Filter.
   * @return Loaded values.
   */
  public GridFuture<Map<K, V>> loadAsync(
      @Nullable Collection<? extends K> keys,
      boolean reload,
      @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (F.isEmpty(keys))
      return new GridFinishedFuture<Map<K, V>>(ctx.kernalContext(), Collections.<K, V>emptyMap());

    GridNearGetFuture<K, V> fut = new GridNearGetFuture<K, V>(ctx, keys, reload, null, filter);

    // Register future for responses.
    ctx.mvcc().addFuture(fut);

    fut.init();

    return ctx.wrapCloneMap(fut);
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    try {
      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridNode, GridNearUnlockRequest<K, V>> map = null;

      for (K key : keys) {
        // Send request to remove from remote nodes.
        GridNearUnlockRequest<K, V> req = null;

        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                if (affNodes == null) {
                  affNodes = CU.allNodes(ctx, cand.topologyVersion());

                  keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                  map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size());
                }

                GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

                if (!primary.isLocal()) {
                  req = map.get(primary);

                  if (req == null) {
                    map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                    req.version(ver);
                  }
                }

                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  if (primary.isLocal()) {
                    dht.removeLocks(primary.id(), ver, F.asList(key), true);

                    assert req == null;

                    continue;
                  }

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (map == null || map.isEmpty()) return;

      Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver);
      Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver);

      for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (!req.keyBytes().isEmpty()) {
          req.completedVersions(committed, rolledback);

          // We don't wait for reply to this message.
          ctx.io().send(n, req);
        }
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
 /**
  * @param e Transaction entry.
  * @return {@code True} if entry is locally mapped as a primary or back up node.
  */
 protected boolean isNearLocallyMapped(GridCacheEntryEx<K, V> e) {
   return F.contains(ctx.affinity(e.key(), CU.allNodes(ctx)), ctx.localNode());
 }
 /** {@inheritDoc} */
 @Override
 public int size() {
   return F.size(iterator());
 }
 /** {@inheritDoc} */
 @Override
 public Collection<V> primaryValues(GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
   return new GridCacheValueCollection<K, V>(
       ctx, primaryEntrySet(filter), ctx.vararg(F.<K, V>cacheHasPeekValue()));
 }
  /**
   * @param nodeId Sender node ID.
   * @param req Finish transaction message.
   */
  @SuppressWarnings({"CatchGenericClass"})
  private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) {
    assert nodeId != null;
    assert req != null;

    GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version());

    try {
      ClassLoader ldr = ctx.deploy().globalLoader();

      if (req.commit()) {
        // If lock was acquired explicitly.
        if (tx == null) {
          // Create transaction and add entries.
          tx =
              ctx.tm()
                  .onCreated(
                      new GridReplicatedTxRemote<K, V>(
                          ldr,
                          nodeId,
                          req.threadId(),
                          req.version(),
                          req.commitVersion(),
                          PESSIMISTIC,
                          READ_COMMITTED,
                          req.isInvalidate(),
                          /*timeout */ 0,
                          /*read entries*/ null,
                          req.writes(),
                          ctx));

          if (tx == null || !ctx.tm().onStarted(tx))
            throw new GridCacheTxRollbackException(
                "Attempt to start a completed " + "transaction: " + req);
        } else {
          boolean set = tx.commitVersion(req.commitVersion());

          assert set;
        }

        Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes();

        if (!F.isEmpty(writeEntries)) {
          // In OPTIMISTIC mode, we get the values at PREPARE stage.
          assert tx.concurrency() == PESSIMISTIC;

          for (GridCacheTxEntry<K, V> entry : writeEntries) {
            // Unmarshal write entries.
            entry.unmarshal(ctx, ldr);

            if (log.isDebugEnabled())
              log.debug(
                  "Unmarshalled transaction entry from pessimistic transaction [key="
                      + entry.key()
                      + ", value="
                      + entry.value()
                      + ", tx="
                      + tx
                      + ']');

            if (!tx.setWriteValue(entry))
              U.warn(
                  log,
                  "Received entry to commit that was not present in transaction [entry="
                      + entry
                      + ", tx="
                      + tx
                      + ']');
          }
        }

        // Add completed versions.
        tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions());

        if (tx.pessimistic()) tx.prepare();

        tx.commit();
      } else if (tx != null) {
        tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions());

        tx.rollback();
      }

      if (req.replyRequired()) {
        GridCacheMessage<K, V> res =
            new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId());

        try {
          ctx.io().send(nodeId, res);
        } catch (Throwable e) {
          // Double-check.
          if (ctx.discovery().node(nodeId) == null) {
            if (log.isDebugEnabled())
              log.debug(
                  "Node left while sending finish response [nodeId="
                      + nodeId
                      + ", res="
                      + res
                      + ']');
          } else
            U.error(
                log,
                "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']',
                e);
        }
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Attempted to start a completed transaction (will ignore): " + e);
    } catch (Throwable e) {
      U.error(
          log,
          "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']',
          e);

      if (tx != null) tx.rollback();
    }
  }
  /**
   * This method is called to map or split grid task into multiple grid jobs. This is the first
   * method that gets called when task execution starts.
   *
   * @param data Task execution argument. Can be {@code null}. This is the same argument as the one
   *     passed into {@code Grid#execute(...)} methods.
   * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed
   *     to be randomized by container. This ensures that every time you simply iterate through grid
   *     nodes, the order of nodes will be random which over time should result into all nodes being
   *     used equally.
   * @return Map of grid jobs assigned to subgrid node. Unless {@link
   *     GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is
   *     returned, exception will be thrown.
   * @throws GridException If mapping could not complete successfully. This exception will be thrown
   *     out of {@link GridComputeTaskFuture#get()} method.
   */
  @Override
  public Map<? extends GridComputeJob, GridNode> map(
      List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException {
    assert !subgrid.isEmpty();

    // Give preference to wanted node. Otherwise, take the first one.
    GridNode targetNode =
        F.find(
            subgrid,
            subgrid.get(0),
            new GridPredicate<GridNode>() {
              @Override
              public boolean apply(GridNode e) {
                return preferredNode.equals(e.id());
              }
            });

    return Collections.singletonMap(
        new GridComputeJobAdapter() {
          @GridLoggerResource private GridLogger log;

          @GridInstanceResource private Grid grid;

          @Override
          public Object execute() throws GridException {
            log.info("Going to put data: " + data.size());

            GridCache<Object, Object> cache = grid.cache(cacheName);

            assert cache != null;

            Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data);

            for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) {
              T2<Integer, Collection<Integer>> pair = entry.getValue();

              Object affKey = pair.get1();

              // Group lock partition.
              try (GridCacheTx tx =
                  cache.txStartPartition(
                      cache.affinity().partition(affKey),
                      optimistic ? OPTIMISTIC : PESSIMISTIC,
                      REPEATABLE_READ,
                      0,
                      pair.get2().size())) {
                for (Integer val : pair.get2()) cache.put(val, val);

                tx.commit();
              }
            }

            log.info("Finished put data: " + data.size());

            return data;
          }

          /**
           * Groups values by partitions.
           *
           * @param data Data to put.
           * @return Grouped map.
           */
          private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) {
            GridCache<Object, Object> cache = grid.cache(cacheName);

            Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>();

            for (Integer val : data) {
              int part = cache.affinity().partition(val);

              T2<Integer, Collection<Integer>> tup = res.get(part);

              if (tup == null) {
                tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>());

                res.put(part, tup);
              }

              tup.get2().add(val);
            }

            return res;
          }
        },
        targetNode);
  }
 @Override
 protected GridTuple2<GridCacheContext, String> initialValue() {
   return F.t2();
 }