/** @throws Exception If failed. */
  public void testCreateFileColocated() throws Exception {
    GridGgfsPath path = new GridGgfsPath("/colocated");

    UUID uuid = UUID.randomUUID();

    GridUuid affKey;

    long idx = 0;

    while (true) {
      affKey = new GridUuid(uuid, idx);

      if (grid(0).mapKeyToNode(DATA_CACHE_NAME, affKey).id().equals(grid(0).localNode().id()))
        break;

      idx++;
    }

    try (GridGgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) {
      // Write 5M, should be enough to test distribution.
      for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]);
    }

    GridGgfsFile info = fs.info(path);

    Collection<GridGgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length());

    assertEquals(1, affNodes.size());
    Collection<UUID> nodeIds = F.first(affNodes).nodeIds();

    assertEquals(1, nodeIds.size());
    assertEquals(grid(0).localNode().id(), F.first(nodeIds));
  }
  /** {@inheritDoc} */
  @Override
  protected void beforeTest() throws Exception {
    if (NODES_CNT <= 0) return;

    // Initialize FS.
    fs = grid(0).ggfs("ggfs");

    // Cleanup FS.
    fs.format().get();
  }
  /**
   * Test file creation.
   *
   * @throws Exception In case of exception.
   */
  public void testCreateFile() throws Exception {
    GridGgfsPath root = new GridGgfsPath("/");
    GridGgfsPath path = new GridGgfsPath("/asdf");

    long max = 100L * CFG_BLOCK_SIZE / WRITING_THREADS_CNT;

    for (long size = 0; size <= max; size = size * 15 / 10 + 1) {
      assertEquals(Collections.<GridGgfsPath>emptyList(), fs.listPaths(root));

      testCreateFile(path, size, new Random().nextInt());
    }
  }
  /**
   * Test file creation.
   *
   * @param path Path to file to store.
   * @param size Size of file to store.
   * @param salt Salt for file content generation.
   * @throws Exception In case of any exception.
   */
  private void testCreateFile(final GridGgfsPath path, final long size, final int salt)
      throws Exception {
    info("Create file [path=" + path + ", size=" + size + ", salt=" + salt + ']');

    final AtomicInteger cnt = new AtomicInteger(0);
    final Collection<GridGgfsPath> cleanUp = new ConcurrentLinkedQueue<>();

    long time =
        runMultiThreaded(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                int id = cnt.incrementAndGet();

                GridGgfsPath f = new GridGgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : ""));

                try (GridGgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) {
                  assertNotNull(out);

                  cleanUp.add(f); // Add all created into cleanup list.

                  U.copy(new GridGgfsTestInputStream(size, salt), out);
                }

                return null;
              }
            },
            WRITING_THREADS_CNT,
            "perform-multi-thread-writing");

    if (time > 0) {
      double rate = size * 1000. / time / 1024 / 1024;

      info(
          String.format(
              "Write file [path=%s, size=%d kB, rate=%2.1f MB/s]",
              path, WRITING_THREADS_CNT * size / 1024, WRITING_THREADS_CNT * rate));
    }

    info("Read and validate saved file: " + path);

    final InputStream expIn = new GridGgfsTestInputStream(size, salt);
    final GridGgfsInputStream actIn = fs.open(path, CFG_BLOCK_SIZE * READING_THREADS_CNT * 11 / 10);

    // Validate continuous reading of whole file.
    assertEqualStreams(expIn, actIn, size, null);

    // Validate random seek and reading.
    final Random rnd = new Random();

    runMultiThreaded(
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            long skip = Math.abs(rnd.nextLong() % (size + 1));
            long range = Math.min(size - skip, rnd.nextInt(CFG_BLOCK_SIZE * 400));

            assertEqualStreams(new GridGgfsTestInputStream(size, salt), actIn, range, skip);

            return null;
          }
        },
        READING_THREADS_CNT,
        "validate-multi-thread-reading");

    expIn.close();
    actIn.close();

    info("Get stored file info: " + path);

    GridGgfsFile desc = fs.info(path);

    info("Validate stored file info: " + desc);

    assertNotNull(desc);

    if (log.isDebugEnabled()) log.debug("File descriptor: " + desc);

    Collection<GridGgfsBlockLocation> aff = fs.affinity(path, 0, desc.length());

    assertFalse("Affinity: " + aff, desc.length() != 0 && aff.isEmpty());

    int blockSize = desc.blockSize();

    assertEquals("File size", size, desc.length());
    assertEquals("Binary block size", CFG_BLOCK_SIZE, blockSize);
    // assertEquals("Permission", "rwxr-xr-x", desc.getPermission().toString());
    // assertEquals("Permission sticky bit marks this is file", false,
    // desc.getPermission().getStickyBit());
    assertEquals("Type", true, desc.isFile());
    assertEquals("Type", false, desc.isDirectory());

    info("Cleanup files: " + cleanUp);

    for (GridGgfsPath f : cleanUp) {
      fs.delete(f, true);
      assertNull(fs.info(f));
    }
  }
  /** @throws Exception If failed. */
  public void testCreateFileFragmented() throws Exception {
    GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs");

    GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();

    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);

    GridGgfsPath path = new GridGgfsPath("/file");

    try {
      GridGgfs fs0 = grid(0).ggfs("ggfs");
      GridGgfs fs1 = grid(1).ggfs("ggfs");
      GridGgfs fs2 = grid(2).ggfs("ggfs");

      try (GridGgfsOutputStream out =
          fs0.create(
              path,
              128,
              false,
              1,
              CFG_GRP_SIZE,
              F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) {
        // 1.5 blocks
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 1);

        out.write(data);
      }

      try (GridGgfsOutputStream out = fs1.append(path, false)) {
        // 1.5 blocks.
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 2);

        out.write(data);
      }

      // After this we should have first two block colocated with grid 0 and last block colocated
      // with grid 1.
      GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path);

      GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME);

      GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId());

      GridGgfsFileMap map = fileInfo.fileMap();

      List<GridGgfsFileAffinityRange> ranges = map.ranges();

      assertEquals(2, ranges.size());

      assertTrue(ranges.get(0).startOffset() == 0);
      assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);

      assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
      assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);

      // Validate data read after colocated writes.
      try (GridGgfsInputStream in = fs2.open(path)) {
        // Validate first part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());

        // Validate second part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());

        assertEquals(-1, in.read());
      }
    } finally {
      GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);

      boolean hasData = false;

      for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty();

      assertTrue(hasData);

      fs.delete(path, true);
    }

    GridTestUtils.retryAssert(
        log,
        ASSERT_RETRIES,
        ASSERT_RETRY_INTERVAL,
        new CAX() {
          @Override
          public void applyx() {
            for (int i = 0; i < NODES_CNT; i++)
              assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty());
          }
        });
  }
Пример #6
0
  /** {@inheritDoc} */
  @Nullable
  @Override
  public final Map<? extends GridComputeJob, GridNode> map(
      List<GridNode> subgrid, @Nullable GridGgfsTaskArgs<T> args) throws GridException {
    assert grid != null;
    assert args != null;

    GridGgfs ggfs = grid.ggfs(args.ggfsName());
    GridGgfsProcessorAdapter ggfsProc = ((GridKernal) grid).context().ggfs();

    Map<GridComputeJob, GridNode> splitMap = new HashMap<>();

    Map<UUID, GridNode> nodes = mapSubgrid(subgrid);

    for (GridGgfsPath path : args.paths()) {
      GridGgfsFile file = ggfs.info(path);

      if (file == null) {
        if (args.skipNonExistentFiles()) continue;
        else
          throw new GridException("Failed to process GGFS file because it doesn't exist: " + path);
      }

      Collection<GridGgfsBlockLocation> aff =
          ggfs.affinity(path, 0, file.length(), args.maxRangeLength());

      long totalLen = 0;

      for (GridGgfsBlockLocation loc : aff) {
        GridNode node = null;

        for (UUID nodeId : loc.nodeIds()) {
          node = nodes.get(nodeId);

          if (node != null) break;
        }

        if (node == null)
          throw new GridException(
              "Failed to find any of block affinity nodes in subgrid [loc="
                  + loc
                  + ", subgrid="
                  + subgrid
                  + ']');

        GridGgfsJob job =
            createJob(path, new GridGgfsFileRange(file.path(), loc.start(), loc.length()), args);

        if (job != null) {
          GridComputeJob jobImpl =
              ggfsProc.createJob(
                  job, ggfs.name(), file.path(), loc.start(), loc.length(), args.recordResolver());

          splitMap.put(jobImpl, node);
        }

        totalLen += loc.length();
      }

      assert totalLen == file.length();
    }

    return splitMap;
  }