示例#1
0
  /**
   * Maps list by node ID.
   *
   * @param subgrid Subgrid.
   * @return Map.
   */
  private Map<UUID, GridNode> mapSubgrid(Collection<GridNode> subgrid) {
    Map<UUID, GridNode> res = U.newHashMap(subgrid.size());

    for (GridNode node : subgrid) res.put(node.id(), node);

    return res;
  }
  /**
   * Test how IPC cache map works.
   *
   * @throws Exception If failed.
   */
  @SuppressWarnings("unchecked")
  public void testIpcCache() throws Exception {
    Field cacheField = GridGgfsHadoopIpcIo.class.getDeclaredField("ipcCache");

    cacheField.setAccessible(true);

    Field activeCntField = GridGgfsHadoopIpcIo.class.getDeclaredField("activeCnt");

    activeCntField.setAccessible(true);

    Map<String, GridGgfsHadoopIpcIo> cache =
        (Map<String, GridGgfsHadoopIpcIo>) cacheField.get(null);

    String name = "ggfs:" + getTestGridName(0) + "@";

    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveGridGainUrl(HADOOP_FS_CFG));
    cfg.setBoolean("fs.ggfs.impl.disable.cache", true);
    cfg.setBoolean(String.format(GridGgfsHadoopUtils.PARAM_GGFS_ENDPOINT_NO_EMBED, name), true);

    // Ensure that existing IO is reused.
    FileSystem fs1 = FileSystem.get(new URI("ggfs://" + name + "/"), cfg);

    assertEquals(1, cache.size());

    GridGgfsHadoopIpcIo io = null;

    System.out.println("CACHE: " + cache);

    for (String key : cache.keySet()) {
      if (key.contains("10500")) {
        io = cache.get(key);

        break;
      }
    }

    assert io != null;

    assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get());

    // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not
    // stopped.
    FileSystem fs2 = FileSystem.get(new URI("ggfs://" + name + "/abc"), cfg);

    assertEquals(1, cache.size());
    assertEquals(2, ((AtomicInteger) activeCntField.get(io)).get());

    fs2.close();

    assertEquals(1, cache.size());
    assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get());

    Field stopField = GridGgfsHadoopIpcIo.class.getDeclaredField("stopping");

    stopField.setAccessible(true);

    assert !(Boolean) stopField.get(io);

    // Ensure that IO is stopped when nobody else is need it.
    fs1.close();

    assert cache.isEmpty();

    assert (Boolean) stopField.get(io);
  }
示例#3
0
  /** {@inheritDoc} */
  @Nullable
  @Override
  public final Map<? extends GridComputeJob, GridNode> map(
      List<GridNode> subgrid, @Nullable GridGgfsTaskArgs<T> args) throws GridException {
    assert grid != null;
    assert args != null;

    GridGgfs ggfs = grid.ggfs(args.ggfsName());
    GridGgfsProcessorAdapter ggfsProc = ((GridKernal) grid).context().ggfs();

    Map<GridComputeJob, GridNode> splitMap = new HashMap<>();

    Map<UUID, GridNode> nodes = mapSubgrid(subgrid);

    for (GridGgfsPath path : args.paths()) {
      GridGgfsFile file = ggfs.info(path);

      if (file == null) {
        if (args.skipNonExistentFiles()) continue;
        else
          throw new GridException("Failed to process GGFS file because it doesn't exist: " + path);
      }

      Collection<GridGgfsBlockLocation> aff =
          ggfs.affinity(path, 0, file.length(), args.maxRangeLength());

      long totalLen = 0;

      for (GridGgfsBlockLocation loc : aff) {
        GridNode node = null;

        for (UUID nodeId : loc.nodeIds()) {
          node = nodes.get(nodeId);

          if (node != null) break;
        }

        if (node == null)
          throw new GridException(
              "Failed to find any of block affinity nodes in subgrid [loc="
                  + loc
                  + ", subgrid="
                  + subgrid
                  + ']');

        GridGgfsJob job =
            createJob(path, new GridGgfsFileRange(file.path(), loc.start(), loc.length()), args);

        if (job != null) {
          GridComputeJob jobImpl =
              ggfsProc.createJob(
                  job, ggfs.name(), file.path(), loc.start(), loc.length(), args.recordResolver());

          splitMap.put(jobImpl, node);
        }

        totalLen += loc.length();
      }

      assert totalLen == file.length();
    }

    return splitMap;
  }