/**
   * Validate streams generate the same output.
   *
   * @param expIn Expected input stream.
   * @param actIn Actual input stream.
   * @param expSize Expected size of the streams.
   * @param seek Seek to use async position-based reading or {@code null} to use simple continuous
   *     reading.
   * @throws IOException In case of any IO exception.
   */
  private void assertEqualStreams(
      InputStream expIn, GridGgfsInputStream actIn, @Nullable Long expSize, @Nullable Long seek)
      throws IOException {
    if (seek != null) expIn.skip(seek);

    int bufSize = 2345;
    byte buf1[] = new byte[bufSize];
    byte buf2[] = new byte[bufSize];
    long pos = 0;

    long start = System.currentTimeMillis();

    while (true) {
      int read = (int) Math.min(bufSize, expSize - pos);

      int i1;

      if (seek == null) i1 = actIn.read(buf1, 0, read);
      else if (seek % 2 == 0) i1 = actIn.read(pos + seek, buf1, 0, read);
      else {
        i1 = read;

        actIn.readFully(pos + seek, buf1, 0, read);
      }

      // Read at least 0 byte, but don't read more then 'i1' or 'read'.
      int i2 = expIn.read(buf2, 0, Math.max(0, Math.min(i1, read)));

      if (i1 != i2) {
        fail(
            "Expects the same data [read="
                + read
                + ", pos="
                + pos
                + ", seek="
                + seek
                + ", i1="
                + i1
                + ", i2="
                + i2
                + ']');
      }

      if (i1 == -1) break; // EOF

      // i1 == bufSize => compare buffers.
      // i1 <  bufSize => Compare part of buffers, rest of buffers are equal from previous
      // iteration.
      assertTrue(
          "Expects the same data [read="
              + read
              + ", pos="
              + pos
              + ", seek="
              + seek
              + ", i1="
              + i1
              + ", i2="
              + i2
              + ']',
          Arrays.equals(buf1, buf2));

      if (read == 0) break; // Nothing more to read.

      pos += i1;
    }

    if (expSize != null) assertEquals(expSize.longValue(), pos);

    long time = System.currentTimeMillis() - start;

    if (time != 0 && log.isInfoEnabled()) {
      log.info(
          String.format(
              "Streams were compared in continuous reading " + "[size=%7d, rate=%3.1f MB/sec]",
              expSize, expSize * 1000. / time / 1024 / 1024));
    }
  }
  /** @return Client configuration for the test. */
  protected GridClientConfiguration clientConfiguration() throws GridClientException {
    GridClientConfiguration cfg = new GridClientConfiguration();

    cfg.setBalancer(getBalancer());

    cfg.setTopologyRefreshFrequency(TOP_REFRESH_FREQ);

    cfg.setProtocol(protocol());
    cfg.setServers(Arrays.asList(serverAddress()));
    cfg.setSslContextFactory(sslContextFactory());

    GridClientDataConfiguration loc = new GridClientDataConfiguration();

    GridClientDataConfiguration partitioned = new GridClientDataConfiguration();

    partitioned.setName(PARTITIONED_CACHE_NAME);
    partitioned.setAffinity(new GridClientPartitionAffinity());

    GridClientDataConfiguration replicated = new GridClientDataConfiguration();
    replicated.setName(REPLICATED_CACHE_NAME);

    GridClientDataConfiguration replicatedAsync = new GridClientDataConfiguration();
    replicatedAsync.setName(REPLICATED_ASYNC_CACHE_NAME);

    cfg.setDataConfigurations(Arrays.asList(loc, partitioned, replicated, replicatedAsync));

    return cfg;
  }
  /** @throws Exception If failed. */
  public void testClientAffinity() throws Exception {
    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    Collection<Object> keys = new ArrayList<>();

    keys.addAll(Arrays.asList(Boolean.TRUE, Boolean.FALSE, 1, Integer.MAX_VALUE));

    Random rnd = new Random();
    StringBuilder sb = new StringBuilder();

    // Generate some random strings.
    for (int i = 0; i < 100; i++) {
      sb.setLength(0);

      for (int j = 0; j < 255; j++)
        // Only printable ASCII symbols for test.
        sb.append((char) (rnd.nextInt(0x7f - 0x20) + 0x20));

      keys.add(sb.toString());
    }

    // Generate some more keys to achieve better coverage.
    for (int i = 0; i < 100; i++) keys.add(UUID.randomUUID());

    for (Object key : keys) {
      UUID nodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      UUID clientNodeId = partitioned.affinity(key);

      assertEquals(
          "Invalid affinity mapping for REST response for key: " + key, nodeId, clientNodeId);
    }
  }
  /** @throws Exception If failed. */
  public void testCreateFileFragmented() throws Exception {
    GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs");

    GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();

    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);

    GridGgfsPath path = new GridGgfsPath("/file");

    try {
      GridGgfs fs0 = grid(0).ggfs("ggfs");
      GridGgfs fs1 = grid(1).ggfs("ggfs");
      GridGgfs fs2 = grid(2).ggfs("ggfs");

      try (GridGgfsOutputStream out =
          fs0.create(
              path,
              128,
              false,
              1,
              CFG_GRP_SIZE,
              F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) {
        // 1.5 blocks
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 1);

        out.write(data);
      }

      try (GridGgfsOutputStream out = fs1.append(path, false)) {
        // 1.5 blocks.
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 2);

        out.write(data);
      }

      // After this we should have first two block colocated with grid 0 and last block colocated
      // with grid 1.
      GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path);

      GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME);

      GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId());

      GridGgfsFileMap map = fileInfo.fileMap();

      List<GridGgfsFileAffinityRange> ranges = map.ranges();

      assertEquals(2, ranges.size());

      assertTrue(ranges.get(0).startOffset() == 0);
      assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);

      assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
      assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);

      // Validate data read after colocated writes.
      try (GridGgfsInputStream in = fs2.open(path)) {
        // Validate first part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());

        // Validate second part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());

        assertEquals(-1, in.read());
      }
    } finally {
      GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);

      boolean hasData = false;

      for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty();

      assertTrue(hasData);

      fs.delete(path, true);
    }

    GridTestUtils.retryAssert(
        log,
        ASSERT_RETRIES,
        ASSERT_RETRY_INTERVAL,
        new CAX() {
          @Override
          public void applyx() {
            for (int i = 0; i < NODES_CNT; i++)
              assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty());
          }
        });
  }