/** @throws Exception If failed. */
  public void testAffinityPut() throws Exception {
    Thread.sleep(2 * TOP_REFRESH_FREQ);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT);

    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    GridClientCompute compute = client.compute();

    for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i));

    for (int i = 0; i < 100; i++) {
      String key = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key));

      assertEquals(primaryNodeId, partitioned.affinity(key));

      // Must go to primary node only. Since backup count is 0, value must present on
      // primary node only.
      partitioned.put(key, "val" + key);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key);

        if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val);
        else assertNull(val);
      }
    }

    // Now check that we will see value in near cache in pinned mode.
    for (int i = 100; i < 200; i++) {
      String pinnedKey = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id();

      UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId)));

      GridClientNode node = compute.node(pinnedNodeId);

      partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey);

        if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey()))
          assertEquals("val" + pinnedKey, val);
        else assertNull(val);
      }
    }
  }
  /** @throws Exception If failed. */
  public void testTopologyListener() throws Exception {
    final Collection<UUID> added = new ArrayList<>(1);
    final Collection<UUID> rmvd = new ArrayList<>(1);

    final CountDownLatch addedLatch = new CountDownLatch(1);
    final CountDownLatch rmvLatch = new CountDownLatch(1);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    GridClientTopologyListener lsnr =
        new GridClientTopologyListener() {
          @Override
          public void onNodeAdded(GridClientNode node) {
            added.add(node.nodeId());

            addedLatch.countDown();
          }

          @Override
          public void onNodeRemoved(GridClientNode node) {
            rmvd.add(node.nodeId());

            rmvLatch.countDown();
          }
        };

    client.addTopologyListener(lsnr);

    try {
      Grid g = startGrid(NODES_CNT + 1);

      UUID id = g.localNode().id();

      assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS));

      assertEquals(1, added.size());
      assertEquals(id, F.first(added));

      stopGrid(NODES_CNT + 1);

      assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS));

      assertEquals(1, rmvd.size());
      assertEquals(id, F.first(rmvd));
    } finally {
      client.removeTopologyListener(lsnr);

      stopGrid(NODES_CNT + 1);
    }
  }
  /**
   * This method is called to map or split grid task into multiple grid jobs. This is the first
   * method that gets called when task execution starts.
   *
   * @param data Task execution argument. Can be {@code null}. This is the same argument as the one
   *     passed into {@code Grid#execute(...)} methods.
   * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed
   *     to be randomized by container. This ensures that every time you simply iterate through grid
   *     nodes, the order of nodes will be random which over time should result into all nodes being
   *     used equally.
   * @return Map of grid jobs assigned to subgrid node. Unless {@link
   *     GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is
   *     returned, exception will be thrown.
   * @throws GridException If mapping could not complete successfully. This exception will be thrown
   *     out of {@link GridComputeTaskFuture#get()} method.
   */
  @Override
  public Map<? extends GridComputeJob, GridNode> map(
      List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException {
    assert !subgrid.isEmpty();

    // Give preference to wanted node. Otherwise, take the first one.
    GridNode targetNode =
        F.find(
            subgrid,
            subgrid.get(0),
            new GridPredicate<GridNode>() {
              @Override
              public boolean apply(GridNode e) {
                return preferredNode.equals(e.id());
              }
            });

    return Collections.singletonMap(
        new GridComputeJobAdapter() {
          @GridLoggerResource private GridLogger log;

          @GridInstanceResource private Grid grid;

          @Override
          public Object execute() throws GridException {
            log.info("Going to put data: " + data.size());

            GridCache<Object, Object> cache = grid.cache(cacheName);

            assert cache != null;

            Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data);

            for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) {
              T2<Integer, Collection<Integer>> pair = entry.getValue();

              Object affKey = pair.get1();

              // Group lock partition.
              try (GridCacheTx tx =
                  cache.txStartPartition(
                      cache.affinity().partition(affKey),
                      optimistic ? OPTIMISTIC : PESSIMISTIC,
                      REPEATABLE_READ,
                      0,
                      pair.get2().size())) {
                for (Integer val : pair.get2()) cache.put(val, val);

                tx.commit();
              }
            }

            log.info("Finished put data: " + data.size());

            return data;
          }

          /**
           * Groups values by partitions.
           *
           * @param data Data to put.
           * @return Grouped map.
           */
          private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) {
            GridCache<Object, Object> cache = grid.cache(cacheName);

            Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>();

            for (Integer val : data) {
              int part = cache.affinity().partition(val);

              T2<Integer, Collection<Integer>> tup = res.get(part);

              if (tup == null) {
                tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>());

                res.put(part, tup);
              }

              tup.get2().add(val);
            }

            return res;
          }
        },
        targetNode);
  }