Пример #1
0
  /**
   * Test that getDU is able to handle cycles caused due to symbolic links and that directory sizes
   * are not added to the final calculated size
   *
   * @throws IOException
   */
  @Test(timeout = 30000)
  public void testGetDU() throws Exception {
    setupDirs();

    long du = FileUtil.getDU(TEST_DIR);
    // Only two files (in partitioned).  Each has 3 characters + system-specific
    // line separator.
    final long expected = 2 * (3 + System.getProperty("line.separator").length());
    Assert.assertEquals(expected, du);

    // target file does not exist:
    final File doesNotExist = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
    long duDoesNotExist = FileUtil.getDU(doesNotExist);
    assertEquals(0, duDoesNotExist);

    // target file is not a directory:
    File notADirectory = new File(partitioned, "part-r-00000");
    long duNotADirectoryActual = FileUtil.getDU(notADirectory);
    long duNotADirectoryExpected = 3 + System.getProperty("line.separator").length();
    assertEquals(duNotADirectoryExpected, duNotADirectoryActual);

    try {
      // one of target files is not accessible, but the containing directory
      // is accessible:
      try {
        FileUtil.chmod(notADirectory.getAbsolutePath(), "0000");
      } catch (InterruptedException ie) {
        // should never happen since that method never throws InterruptedException.
        assertNull(ie);
      }
      assertFalse(FileUtil.canRead(notADirectory));
      final long du3 = FileUtil.getDU(partitioned);
      assertEquals(expected, du3);

      // some target files and containing directory are not accessible:
      try {
        FileUtil.chmod(partitioned.getAbsolutePath(), "0000");
      } catch (InterruptedException ie) {
        // should never happen since that method never throws InterruptedException.
        assertNull(ie);
      }
      assertFalse(FileUtil.canRead(partitioned));
      final long du4 = FileUtil.getDU(partitioned);
      assertEquals(0, du4);
    } finally {
      // Restore the permissions so that we can delete the folder
      // in @After method:
      FileUtil.chmod(partitioned.getAbsolutePath(), "0777", true /*recursive*/);
    }
  }
 @Before
 public void setup() throws Exception {
   FileContext files = FileContext.getLocalFSFileContext();
   Path workSpacePath = new Path(workSpace.getAbsolutePath());
   files.mkdir(workSpacePath, null, true);
   FileUtil.chmod(workSpace.getAbsolutePath(), "777");
   File localDir = new File(workSpace.getAbsoluteFile(), "localDir");
   files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"), false);
   File logDir = new File(workSpace.getAbsoluteFile(), "logDir");
   files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"), false);
   String exec_path = System.getProperty("container-executor.path");
   if (exec_path != null && !exec_path.isEmpty()) {
     Configuration conf = new Configuration(false);
     LOG.info("Setting " + YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH + "=" + exec_path);
     conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path);
     exec = new LinuxContainerExecutor();
     exec.setConf(conf);
     conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
     conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
     dirsHandler = new LocalDirsHandlerService();
     dirsHandler.init(conf);
   }
   appSubmitter = System.getProperty("application.submitter");
   if (appSubmitter == null || appSubmitter.isEmpty()) {
     appSubmitter = "nobody";
   }
 }
Пример #3
0
  /**
   * Start the child process to handle the task for us.
   *
   * @param conf the task's configuration
   * @param recordReader the fake record reader to update progress with
   * @param output the collector to send output to
   * @param reporter the reporter for the task
   * @param outputKeyClass the class of the output keys
   * @param outputValueClass the class of the output values
   * @throws IOException
   * @throws InterruptedException
   */
  Application(
      JobConf conf,
      RecordReader<FloatWritable, NullWritable> recordReader,
      OutputCollector<K2, V2> output,
      Reporter reporter,
      Class<? extends K2> outputKeyClass,
      Class<? extends V2> outputValueClass)
      throws IOException, InterruptedException {
    serverSocket = new ServerSocket(0);
    Map<String, String> env = new HashMap<String, String>();
    // add TMPDIR environment variable with the value of java.io.tmpdir
    env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
    env.put("hadoop.pipes.command.port", Integer.toString(serverSocket.getLocalPort()));
    List<String> cmd = new ArrayList<String>();
    String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
    FileUtil.chmod(executable, "a+x");
    cmd.add(executable);
    // wrap the command in a stdout/stderr capture
    TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id"));
    File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
    File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
    long logLength = TaskLog.getTaskLogLength(conf);
    cmd = TaskLog.captureOutAndError(cmd, stdout, stderr, logLength);

    process = runClient(cmd, env);
    clientSocket = serverSocket.accept();
    handler = new OutputHandler<K2, V2>(output, reporter, recordReader);
    K2 outputKey = (K2) ReflectionUtils.newInstance(outputKeyClass, conf);
    V2 outputValue = (V2) ReflectionUtils.newInstance(outputValueClass, conf);
    downlink =
        new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, outputKey, outputValue, conf);
    downlink.start();
    downlink.setJobConf(conf);
  }
Пример #4
0
  // the method which actually copies the caches locally and unjars/unzips them
  // and does chmod for the files
  private static Path localizeCache(
      Configuration conf, URI cache, long confFileStamp, CacheStatus cacheStatus, boolean isArchive)
      throws IOException {
    FileSystem fs = getFileSystem(cache, conf);
    FileSystem localFs = FileSystem.getLocal(conf);
    Path parchive = null;

    if (isArchive) {
      parchive =
          new Path(
              cacheStatus.localizedLoadPath, new Path(cacheStatus.localizedLoadPath.getName()));
    } else {
      parchive = cacheStatus.localizedLoadPath;
    }
    if (!localFs.mkdirs(parchive.getParent())) {
      throw new IOException(
          "Mkdirs failed to create directory " + cacheStatus.localizedLoadPath.toString());
    }
    String cacheId = cache.getPath();

    fs.copyToLocalFile(new Path(cacheId), parchive);
    if (isArchive) {
      String tmpArchive = parchive.toString().toLowerCase();
      File srcFile = new File(parchive.toString());
      File destDir = new File(parchive.getParent().toString());
      if (tmpArchive.endsWith(".jar")) {
        RunJar.unJar(srcFile, destDir);
      } else if (tmpArchive.endsWith(".zip")) {
        FileUtil.unZip(srcFile, destDir);
      } else if (isTarFile(tmpArchive)) {
        FileUtil.unTar(srcFile, destDir);
      }
      // else will not do anyhting
      // and copy the file into the dir as it is
    }
    long cacheSize = FileUtil.getDU(new File(parchive.getParent().toString()));
    cacheStatus.size = cacheSize;
    addCacheInfoUpdate(cacheStatus);

    // do chmod here
    try {
      // Setting recursive permission to grant everyone read and execute
      Path localDir = new Path(cacheStatus.localizedBaseDir, cacheStatus.uniqueParentDir);
      LOG.info("Doing chmod on localdir :" + localDir);
      FileUtil.chmod(localDir.toString(), "ugo+rx", true);
    } catch (InterruptedException e) {
      LOG.warn("Exception in chmod" + e.toString());
    }

    // update cacheStatus to reflect the newly cached file
    cacheStatus.mtime = getTimestamp(conf, cache);
    return cacheStatus.localizedLoadPath;
  }
Пример #5
0
  private String downloadResource(String value, boolean convertToUnix) {
    if (canDownloadResource(value)) {
      getConsole().printInfo("converting to local " + value);
      File resourceDir = new File(getConf().getVar(HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR));
      String destinationName = new Path(value).getName();
      File destinationFile = new File(resourceDir, destinationName);
      if (resourceDir.exists() && !resourceDir.isDirectory()) {
        throw new RuntimeException(
            "The resource directory is not a directory, resourceDir is set to" + resourceDir);
      }
      if (!resourceDir.exists() && !resourceDir.mkdirs()) {
        throw new RuntimeException("Couldn't create directory " + resourceDir);
      }
      try {
        FileSystem fs = FileSystem.get(new URI(value), conf);
        fs.copyToLocalFile(new Path(value), new Path(destinationFile.getCanonicalPath()));
        value = destinationFile.getCanonicalPath();

        // add "execute" permission to downloaded resource file (needed when loading dll file)
        FileUtil.chmod(value, "ugo+rx", true);
        if (convertToUnix && DosToUnix.isWindowsScript(destinationFile)) {
          try {
            DosToUnix.convertWindowsScriptToUnix(destinationFile);
          } catch (Exception e) {
            throw new RuntimeException(
                "Caught exception while converting file "
                    + destinationFile
                    + " to unix line endings",
                e);
          }
        }
      } catch (Exception e) {
        throw new RuntimeException("Failed to read external resource " + value, e);
      }
    }
    return value;
  }
Пример #6
0
 static void createPrivateTempFile(Path p) throws IOException, InterruptedException {
   createTempFile(p);
   FileUtil.chmod(p.toString(), "0770", true);
 }
Пример #7
0
  @SuppressWarnings({"unchecked", "deprecation"})
  @Test
  public void testNameNodeMXBeanInfo() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;

    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();

      FSNamesystem fsn = cluster.getNameNode().namesystem;

      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
      ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
      // get attribute "ClusterId"
      String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
      assertEquals(fsn.getClusterId(), clusterId);
      // get attribute "BlockPoolId"
      String blockpoolId = (String) mbs.getAttribute(mxbeanName, "BlockPoolId");
      assertEquals(fsn.getBlockPoolId(), blockpoolId);
      // get attribute "Version"
      String version = (String) mbs.getAttribute(mxbeanName, "Version");
      assertEquals(fsn.getVersion(), version);
      assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
      // get attribute "Used"
      Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
      assertEquals(fsn.getUsed(), used.longValue());
      // get attribute "Total"
      Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
      assertEquals(fsn.getTotal(), total.longValue());
      // get attribute "safemode"
      String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
      assertEquals(fsn.getSafemode(), safemode);
      // get attribute nondfs
      Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
      assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
      // get attribute percentremaining
      Float percentremaining = (Float) (mbs.getAttribute(mxbeanName, "PercentRemaining"));
      assertEquals(fsn.getPercentRemaining(), percentremaining.floatValue(), DELTA);
      // get attribute Totalblocks
      Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
      assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
      // get attribute alivenodeinfo
      String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
      Map<String, Map<String, Object>> liveNodes =
          (Map<String, Map<String, Object>>) JSON.parse(alivenodeinfo);
      assertTrue(liveNodes.size() > 0);
      for (Map<String, Object> liveNode : liveNodes.values()) {
        assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
        assertTrue(((Long) liveNode.get("nonDfsUsedSpace")) > 0);
        assertTrue(liveNode.containsKey("capacity"));
        assertTrue(((Long) liveNode.get("capacity")) > 0);
        assertTrue(liveNode.containsKey("numBlocks"));
        assertTrue(((Long) liveNode.get("numBlocks")) == 0);
      }
      assertEquals(fsn.getLiveNodes(), alivenodeinfo);
      // get attribute deadnodeinfo
      String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes"));
      assertEquals(fsn.getDeadNodes(), deadnodeinfo);
      // get attribute NameDirStatuses
      String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
      assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
      Map<String, Map<String, String>> statusMap =
          (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
      Collection<URI> nameDirUris = cluster.getNameDirs(0);
      for (URI nameDirUri : nameDirUris) {
        File nameDir = new File(nameDirUri);
        System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
        assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
      }
      assertEquals(2, statusMap.get("active").size());
      assertEquals(0, statusMap.get("failed").size());

      // This will cause the first dir to fail.
      File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
      assertEquals(0, FileUtil.chmod(new File(failedNameDir, "current").getAbsolutePath(), "000"));
      cluster.getNameNodeRpc().rollEditLog();

      nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
      statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
      for (URI nameDirUri : nameDirUris) {
        File nameDir = new File(nameDirUri);
        String expectedStatus = nameDir.equals(failedNameDir) ? "failed" : "active";
        System.out.println(
            "Checking for the presence of " + nameDir + " in " + expectedStatus + " name dirs.");
        assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
      }
      assertEquals(1, statusMap.get("active").size());
      assertEquals(1, statusMap.get("failed").size());
    } finally {
      if (cluster != null) {
        for (URI dir : cluster.getNameDirs(0)) {
          FileUtil.chmod(new File(new File(dir), "current").getAbsolutePath(), "755");
        }
        cluster.shutdown();
      }
    }
  }