/**
   * Validate streams generate the same output.
   *
   * @param expIn Expected input stream.
   * @param actIn Actual input stream.
   * @param expSize Expected size of the streams.
   * @param seek Seek to use async position-based reading or {@code null} to use simple continuous
   *     reading.
   * @throws IOException In case of any IO exception.
   */
  private void assertEqualStreams(
      InputStream expIn, GridGgfsInputStream actIn, @Nullable Long expSize, @Nullable Long seek)
      throws IOException {
    if (seek != null) expIn.skip(seek);

    int bufSize = 2345;
    byte buf1[] = new byte[bufSize];
    byte buf2[] = new byte[bufSize];
    long pos = 0;

    long start = System.currentTimeMillis();

    while (true) {
      int read = (int) Math.min(bufSize, expSize - pos);

      int i1;

      if (seek == null) i1 = actIn.read(buf1, 0, read);
      else if (seek % 2 == 0) i1 = actIn.read(pos + seek, buf1, 0, read);
      else {
        i1 = read;

        actIn.readFully(pos + seek, buf1, 0, read);
      }

      // Read at least 0 byte, but don't read more then 'i1' or 'read'.
      int i2 = expIn.read(buf2, 0, Math.max(0, Math.min(i1, read)));

      if (i1 != i2) {
        fail(
            "Expects the same data [read="
                + read
                + ", pos="
                + pos
                + ", seek="
                + seek
                + ", i1="
                + i1
                + ", i2="
                + i2
                + ']');
      }

      if (i1 == -1) break; // EOF

      // i1 == bufSize => compare buffers.
      // i1 <  bufSize => Compare part of buffers, rest of buffers are equal from previous
      // iteration.
      assertTrue(
          "Expects the same data [read="
              + read
              + ", pos="
              + pos
              + ", seek="
              + seek
              + ", i1="
              + i1
              + ", i2="
              + i2
              + ']',
          Arrays.equals(buf1, buf2));

      if (read == 0) break; // Nothing more to read.

      pos += i1;
    }

    if (expSize != null) assertEquals(expSize.longValue(), pos);

    long time = System.currentTimeMillis() - start;

    if (time != 0 && log.isInfoEnabled()) {
      log.info(
          String.format(
              "Streams were compared in continuous reading " + "[size=%7d, rate=%3.1f MB/sec]",
              expSize, expSize * 1000. / time / 1024 / 1024));
    }
  }
 /**
  * @param g Grid.
  * @return Non-system caches.
  */
 private Collection<GridCacheConfiguration> caches(Grid g) {
   return F.view(
       Arrays.asList(g.configuration().getCacheConfiguration()),
       new GridPredicate<GridCacheConfiguration>() {
         @Override
         public boolean apply(GridCacheConfiguration c) {
           return c.getName() == null || !c.getName().equals(CU.UTILITY_CACHE_NAME);
         }
       });
 }
 /**
  * Constructs data loader peer-deploy aware.
  *
  * @param objs Collection of objects to detect deploy class and class loader.
  */
 private DataLoaderPda(Object... objs) {
   this.objs = Arrays.asList(objs);
 }
  /** @throws Exception If failed. */
  public void testCreateFileFragmented() throws Exception {
    GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs");

    GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();

    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);

    GridGgfsPath path = new GridGgfsPath("/file");

    try {
      GridGgfs fs0 = grid(0).ggfs("ggfs");
      GridGgfs fs1 = grid(1).ggfs("ggfs");
      GridGgfs fs2 = grid(2).ggfs("ggfs");

      try (GridGgfsOutputStream out =
          fs0.create(
              path,
              128,
              false,
              1,
              CFG_GRP_SIZE,
              F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) {
        // 1.5 blocks
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 1);

        out.write(data);
      }

      try (GridGgfsOutputStream out = fs1.append(path, false)) {
        // 1.5 blocks.
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 2);

        out.write(data);
      }

      // After this we should have first two block colocated with grid 0 and last block colocated
      // with grid 1.
      GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path);

      GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME);

      GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId());

      GridGgfsFileMap map = fileInfo.fileMap();

      List<GridGgfsFileAffinityRange> ranges = map.ranges();

      assertEquals(2, ranges.size());

      assertTrue(ranges.get(0).startOffset() == 0);
      assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);

      assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
      assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);

      // Validate data read after colocated writes.
      try (GridGgfsInputStream in = fs2.open(path)) {
        // Validate first part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());

        // Validate second part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());

        assertEquals(-1, in.read());
      }
    } finally {
      GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);

      boolean hasData = false;

      for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty();

      assertTrue(hasData);

      fs.delete(path, true);
    }

    GridTestUtils.retryAssert(
        log,
        ASSERT_RETRIES,
        ASSERT_RETRY_INTERVAL,
        new CAX() {
          @Override
          public void applyx() {
            for (int i = 0; i < NODES_CNT; i++)
              assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty());
          }
        });
  }