Example #1
0
  /**
   * Tests if fullyDelete deletes (a) dangling symlink to file properly (b) dangling symlink to
   * directory properly
   *
   * @throws IOException
   */
  @Test(timeout = 30000)
  public void testFullyDeleteDanglingSymlinks() throws IOException {
    setupDirs();
    // delete the directory tmp to make tmpDir a dangling link to dir tmp and
    // to make y as a dangling link to file tmp/x
    boolean ret = FileUtil.fullyDelete(tmp);
    Assert.assertTrue(ret);
    Assert.assertFalse(tmp.exists());

    // dangling symlink to file
    File link = new File(del, LINK);
    Assert.assertEquals(5, del.list().length);
    // Even though 'y' is dangling symlink to file tmp/x, fullyDelete(y)
    // should delete 'y' properly.
    ret = FileUtil.fullyDelete(link);
    Assert.assertTrue(ret);
    Assert.assertEquals(4, del.list().length);

    // dangling symlink to directory
    File linkDir = new File(del, "tmpDir");
    // Even though tmpDir is dangling symlink to tmp, fullyDelete(tmpDir) should
    // delete tmpDir properly.
    ret = FileUtil.fullyDelete(linkDir);
    Assert.assertTrue(ret);
    Assert.assertEquals(3, del.list().length);
  }
Example #2
0
  private void cleanupImpl() throws IOException {
    FileUtil.fullyDelete(del, true);
    Assert.assertTrue(!del.exists());

    FileUtil.fullyDelete(tmp, true);
    Assert.assertTrue(!tmp.exists());

    FileUtil.fullyDelete(partitioned, true);
    Assert.assertTrue(!partitioned.exists());
  }
 /**
  * Clear and re-create storage directory.
  *
  * <p>Removes contents of the current directory and creates an empty directory.
  *
  * <p>This does not fully format storage directory. It cannot write the version file since it
  * should be written last after all other storage type dependent files are written. Derived
  * storage is responsible for setting specific storage values and writing the version file to
  * disk.
  *
  * @throws IOException
  */
 public void clearDirectory() throws IOException {
   File curDir = this.getCurrentDir();
   if (curDir.exists())
     if (!(FileUtil.fullyDelete(curDir)))
       throw new IOException("Cannot remove current directory: " + curDir);
   if (!curDir.mkdirs()) throw new IOException("Cannot create directory " + curDir);
 }
Example #4
0
  /** Copy local files to a FileSystem. */
  public static boolean copy(
      File src, FileSystem dstFS, Path dst, boolean deleteSource, Configuration conf)
      throws IOException {
    dst = checkDest(src.getName(), dstFS, dst, false);

    if (src.isDirectory()) {
      if (!dstFS.mkdirs(dst)) {
        return false;
      }
      File contents[] = listFiles(src);
      for (int i = 0; i < contents.length; i++) {
        copy(contents[i], dstFS, new Path(dst, contents[i].getName()), deleteSource, conf);
      }
    } else if (src.isFile()) {
      InputStream in = null;
      OutputStream out = null;
      try {
        in = new FileInputStream(src);
        out = dstFS.create(dst);
        IOUtils.copyBytes(in, out, conf);
      } catch (IOException e) {
        IOUtils.closeStream(out);
        IOUtils.closeStream(in);
        throw e;
      }
    } else {
      throw new IOException(src.toString() + ": No such file or directory");
    }
    if (deleteSource) {
      return FileUtil.fullyDelete(src);
    } else {
      return true;
    }
  }
Example #5
0
  @Before
  public void setUpNameDirs() throws Exception {
    config = new Configuration();
    String baseDir = System.getProperty("test.build.data", "/tmp");

    hdfsDir = new File(baseDir, "dfs");
    if (hdfsDir.exists()) {
      FileUtil.fullyDelete(hdfsDir);
    }

    hdfsDir.mkdir();
    path1 = new File(hdfsDir, "name1");
    path2 = new File(hdfsDir, "name2");
    path3 = new File(hdfsDir, "name3");

    path1.mkdir();
    path2.mkdir();
    path3.mkdir();

    String nameDir = new String(path1.getPath() + "," + path2.getPath());
    config.set("dfs.name.dir", nameDir);
    config.set("dfs.name.edits.dir", nameDir + "," + path3.getPath());
    config.set("fs.checkpoint.dir", new File(hdfsDir, "secondary").getPath());

    FileSystem.setDefaultUri(config, "hdfs://" + NAME_NODE_HOST + "0");
    config.set("dfs.secondary.http.address", "0.0.0.0:0");
    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
  }
Example #6
0
  private void doUntarAndVerify(File tarFile, File untarDir) throws IOException {
    if (untarDir.exists() && !FileUtil.fullyDelete(untarDir)) {
      throw new IOException("Could not delete directory '" + untarDir + "'");
    }
    FileUtil.unTar(tarFile, untarDir);

    String parentDir = untarDir.getCanonicalPath() + Path.SEPARATOR + "name";
    File testFile = new File(parentDir + Path.SEPARATOR + "version");
    Assert.assertTrue(testFile.exists());
    Assert.assertTrue(testFile.length() == 0);
    String imageDir = parentDir + Path.SEPARATOR + "image";
    testFile = new File(imageDir + Path.SEPARATOR + "fsimage");
    Assert.assertTrue(testFile.exists());
    Assert.assertTrue(testFile.length() == 157);
    String currentDir = parentDir + Path.SEPARATOR + "current";
    testFile = new File(currentDir + Path.SEPARATOR + "fsimage");
    Assert.assertTrue(testFile.exists());
    Assert.assertTrue(testFile.length() == 4331);
    testFile = new File(currentDir + Path.SEPARATOR + "edits");
    Assert.assertTrue(testFile.exists());
    Assert.assertTrue(testFile.length() == 1033);
    testFile = new File(currentDir + Path.SEPARATOR + "fstime");
    Assert.assertTrue(testFile.exists());
    Assert.assertTrue(testFile.length() == 8);
  }
Example #7
0
  @Before
  public void createHDFS() {
    try {
      Configuration hdConf = new Configuration();

      File baseDir = new File("./target/hdfs/hdfsTest").getAbsoluteFile();
      FileUtil.fullyDelete(baseDir);
      hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
      MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
      hdfsCluster = builder.build();

      hdfsURI =
          "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";

      hdPath = new org.apache.hadoop.fs.Path("/test");
      hdfs = hdPath.getFileSystem(hdConf);
      FSDataOutputStream stream = hdfs.create(hdPath);
      for (int i = 0; i < 10; i++) {
        stream.write("Hello HDFS\n".getBytes());
      }
      stream.close();

    } catch (Throwable e) {
      e.printStackTrace();
      Assert.fail("Test failed " + e.getMessage());
    }
  }
 private void runTestOnTable(Table table)
     throws IOException, InterruptedException, ClassNotFoundException {
   Job job = null;
   try {
     LOG.info("Before map/reduce startup");
     job = new Job(table.getConfiguration(), "process column contents");
     job.setNumReduceTasks(1);
     Scan scan = new Scan();
     scan.addFamily(INPUT_FAMILY);
     TableMapReduceUtil.initTableMapperJob(
         table.getName(),
         scan,
         MultithreadedTableMapper.class,
         ImmutableBytesWritable.class,
         Put.class,
         job);
     MultithreadedTableMapper.setMapperClass(job, ProcessContentsMapper.class);
     MultithreadedTableMapper.setNumberOfThreads(job, NUMBER_OF_THREADS);
     TableMapReduceUtil.initTableReducerJob(
         table.getName().getNameAsString(), IdentityTableReducer.class, job);
     FileOutputFormat.setOutputPath(job, new Path("test"));
     LOG.info("Started " + table.getName());
     assertTrue(job.waitForCompletion(true));
     LOG.info("After map/reduce completion");
     // verify map-reduce results
     verify(table.getName());
   } finally {
     table.close();
     if (job != null) {
       FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir")));
     }
   }
 }
  private void testMapFileOutputCommitterInternal(int version) throws Exception {
    JobConf conf = new JobConf();
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
    conf.setInt(
        org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
            .FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
        version);
    JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
    FileOutputCommitter committer = new FileOutputCommitter();

    // setup
    committer.setupJob(jContext);
    committer.setupTask(tContext);

    // write output
    MapFileOutputFormat theOutputFormat = new MapFileOutputFormat();
    RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(null, conf, partFile, null);
    writeMapFileOutput(theRecordWriter, tContext);

    // do commit
    if (committer.needsTaskCommit(tContext)) {
      committer.commitTask(tContext);
    }
    committer.commitJob(jContext);

    // validate output
    validateMapFileOutputContent(FileSystem.get(conf), outDir);
    FileUtil.fullyDelete(new File(outDir.toString()));
  }
  private void testAbortInternal(int version) throws IOException, InterruptedException {
    JobConf conf = new JobConf();
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
    conf.setInt(
        org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
            .FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
        version);
    JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
    FileOutputCommitter committer = new FileOutputCommitter();

    // do setup
    committer.setupJob(jContext);
    committer.setupTask(tContext);

    // write output
    TextOutputFormat theOutputFormat = new TextOutputFormat();
    RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(null, conf, partFile, null);
    writeOutput(theRecordWriter, tContext);

    // do abort
    committer.abortTask(tContext);
    File out = new File(outDir.toUri().getPath());
    Path workPath = committer.getWorkPath(tContext, outDir);
    File wp = new File(workPath.toUri().getPath());
    File expectedFile = new File(wp, partFile);
    assertFalse("task temp dir still exists", expectedFile.exists());

    committer.abortJob(jContext, JobStatus.State.FAILED);
    expectedFile = new File(out, FileOutputCommitter.TEMP_DIR_NAME);
    assertFalse("job temp dir still exists", expectedFile.exists());
    assertEquals("Output directory not empty", 0, out.listFiles().length);
    FileUtil.fullyDelete(out);
  }
Example #11
0
 @Test(timeout = 30000)
 public void testFailFullyDeleteGrantPermissions() throws IOException {
   setupDirsAndNonWritablePermissions();
   boolean ret = FileUtil.fullyDelete(new MyFile(del), true);
   // this time the directories with revoked permissions *should* be deleted:
   validateAndSetWritablePermissions(false, ret);
 }
  private void testMapOnlyNoOutputInternal(int version) throws Exception {
    JobConf conf = new JobConf();
    // This is not set on purpose. FileOutputFormat.setOutputPath(conf, outDir);
    conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
    conf.setInt(
        org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
            .FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
        version);
    JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
    FileOutputCommitter committer = new FileOutputCommitter();

    // setup
    committer.setupJob(jContext);
    committer.setupTask(tContext);

    if (committer.needsTaskCommit(tContext)) {
      // do commit
      committer.commitTask(tContext);
    }
    committer.commitJob(jContext);

    // validate output
    FileUtil.fullyDelete(new File(outDir.toString()));
  }
Example #13
0
  @Test
  public void testJob5D() throws Exception {
    final String input = "src/test/resources/data/6d_cuboid/";
    final String output = "target/test-output/5d_cuboid";
    final String cubeName = "test_kylin_cube_with_slr_1_new_segment";
    String segmentName = "20130331080000_20131212080000";
    String jobname = "5d_cuboid";
    String level = "2";

    FileUtil.fullyDelete(new File(output));

    String[] args = {
      "-input",
      input,
      "-cubename",
      cubeName,
      "-segmentname",
      segmentName,
      "-output",
      output,
      "-jobname",
      jobname,
      "-level",
      level
    };
    assertEquals("Job failed", 0, ToolRunner.run(conf, new NDCuboidJob(), args));
  }
  @BeforeClass
  public static void setup() throws Exception {
    String testDir = System.getProperty("test.data.dir", "./");
    testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/";
    workDir = new File(new File(testDir).getCanonicalPath());
    FileUtil.fullyDelete(workDir);
    workDir.mkdirs();

    warehousedir = new Path(workDir + "/warehouse");

    // Run hive metastore server
    t = new Thread(new RunMS());
    t.start();

    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    Configuration conf = new Configuration(true);
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");

    FileSystem fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
    mrConf = mrCluster.createJobConf();
    fs.mkdirs(warehousedir);

    initializeSetup();
  }
Example #15
0
  public void testJobRetire() throws Exception {
    MiniMRCluster mr = null;
    try {
      JobConf conf = new JobConf();
      mr = startCluster(conf, 1);

      JobConf jobConf = mr.createJobConf();
      JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker();

      Path inDir = new Path(testDir, "input1");
      Path outDir = new Path(testDir, "output1");

      JobID id1 = validateJobRetire(jobConf, inDir, outDir, jobtracker);

      outDir = new Path(testDir, "output2");
      JobID id2 = validateJobRetire(jobConf, inDir, outDir, jobtracker);

      assertNull("Job not removed from cache", jobtracker.getJobStatus(id1));

      assertEquals("Total job in cache not correct", 1, jobtracker.getAllJobs().length);
    } finally {
      if (mr != null) {
        mr.shutdown();
      }
      FileUtil.fullyDelete(new File(testDir.toString()));
    }
  }
  public void testAbort() throws IOException {
    JobConf job = new JobConf();
    setConfForFileOutputCommitter(job);
    JobContext jContext = new JobContextImpl(job, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
    FileOutputCommitter committer = new FileOutputCommitter();
    FileOutputFormat.setWorkOutputPath(job, committer.getTempTaskOutputPath(tContext));

    // do setup
    committer.setupJob(jContext);
    committer.setupTask(tContext);
    String file = "test.txt";

    // A reporter that does nothing
    Reporter reporter = Reporter.NULL;
    // write output
    FileSystem localFs = FileSystem.getLocal(job);
    TextOutputFormat theOutputFormat = new TextOutputFormat();
    RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(localFs, job, file, reporter);
    writeOutput(theRecordWriter, reporter);

    // do abort
    committer.abortTask(tContext);
    File expectedFile =
        new File(new Path(committer.getTempTaskOutputPath(tContext), file).toString());
    assertFalse("task temp dir still exists", expectedFile.exists());

    committer.abortJob(jContext, JobStatus.State.FAILED);
    expectedFile = new File(new Path(outDir, FileOutputCommitter.TEMP_DIR_NAME).toString());
    assertFalse("job temp dir still exists", expectedFile.exists());
    assertEquals("Output directory not empty", 0, new File(outDir.toString()).listFiles().length);
    FileUtil.fullyDelete(new File(outDir.toString()));
  }
Example #17
0
  // Mostly for setting up the symlinks. Note that when we setup the distributed
  // cache, we didn't create the symlinks. This is done on a per task basis
  // by the currently executing task.
  public static void setupWorkDir(JobConf conf) throws IOException {
    File workDir = new File(".").getAbsoluteFile();
    FileUtil.fullyDelete(workDir);
    if (DistributedCache.getSymlink(conf)) {
      URI[] archives = DistributedCache.getCacheArchives(conf);
      URI[] files = DistributedCache.getCacheFiles(conf);
      Path[] localArchives = DistributedCache.getLocalCacheArchives(conf);
      Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
      if (archives != null) {
        for (int i = 0; i < archives.length; i++) {
          String link = archives[i].getFragment();
          if (link != null) {
            link = workDir.toString() + Path.SEPARATOR + link;
            File flink = new File(link);
            if (!flink.exists()) {
              FileUtil.symLink(localArchives[i].toString(), link);
            }
          }
        }
      }
      if (files != null) {
        for (int i = 0; i < files.length; i++) {
          String link = files[i].getFragment();
          if (link != null) {
            link = workDir.toString() + Path.SEPARATOR + link;
            File flink = new File(link);
            if (!flink.exists()) {
              FileUtil.symLink(localFiles[i].toString(), link);
            }
          }
        }
      }
    }
    File jobCacheDir = null;
    if (conf.getJar() != null) {
      jobCacheDir = new File(new Path(conf.getJar()).getParent().toString());
    }

    // create symlinks for all the files in job cache dir in current
    // workingdir for streaming
    try {
      DistributedCache.createAllSymlink(conf, jobCacheDir, workDir);
    } catch (IOException ie) {
      // Do not exit even if symlinks have not been created.
      LOG.warn(StringUtils.stringifyException(ie));
    }
    // add java.io.tmpdir given by mapred.child.tmp
    String tmp = conf.get("mapred.child.tmp", "./tmp");
    Path tmpDir = new Path(tmp);

    // if temp directory path is not absolute
    // prepend it with workDir.
    if (!tmpDir.isAbsolute()) {
      tmpDir = new Path(workDir.toString(), tmp);
      FileSystem localFs = FileSystem.getLocal(conf);
      if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) {
        throw new IOException("Mkdirs failed to create " + tmpDir.toString());
      }
    }
  }
Example #18
0
 @Override
 protected void tearDown() throws IOException {
   new File(firstCacheFile.toString()).delete();
   new File(secondCacheFile.toString()).delete();
   new File(firstCacheFilePublic.toString()).delete();
   new File(secondCacheFilePublic.toString()).delete();
   FileUtil.fullyDelete(new File(TEST_ROOT_DIR));
 }
 @AfterClass
 public void tearDown() throws IOException {
   // Clean up the staging and/or output directories if necessary
   File testRootDir = new File(TestConstants.TEST_ROOT_DIR);
   if (testRootDir.exists()) {
     FileUtil.fullyDelete(testRootDir);
   }
 }
Example #20
0
 @AfterClass
 public static void teardown() {
   if (yarnCluster != null) {
     yarnCluster.stop();
     yarnCluster = null;
   }
   FileUtil.fullyDelete(localFSDirBase);
 }
  /**
   * Initialize the data structures used by this class. IMPORTANT NOTE: This method must be called
   * once before calling any other public method on this class.
   *
   * <p>Creates a singleton master populated storage directory for a Namenode (contains edits,
   * fsimage, version, and time files) and a Datanode (contains version and block files). This can
   * be a lengthy operation.
   */
  public static void initialize() throws Exception {
    createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
    Configuration config = new Configuration();
    config.set("dfs.name.dir", namenodeStorage.toString());
    config.set("dfs.data.dir", datanodeStorage.toString());
    MiniDFSCluster cluster = null;
    try {
      // format data-node
      createEmptyDirs(new String[] {datanodeStorage.toString()});

      // format and start NameNode and start DataNode
      NameNode.format(config);
      cluster = new MiniDFSCluster(config, 1, StartupOption.REGULAR);

      NameNode namenode = cluster.getNameNode();
      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
      namenodeStorageFsscTime = namenode.versionRequest().getCTime();

      FileSystem fs = FileSystem.get(config);
      Path baseDir = new Path("/TestUpgrade");
      fs.mkdirs(baseDir);

      // write some files
      int bufferSize = 4096;
      byte[] buffer = new byte[bufferSize];
      for (int i = 0; i < bufferSize; i++) buffer[i] = (byte) ('0' + i % 50);
      writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
      writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);

      // save image
      namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      namenode.saveNamespace();
      namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

      // write more files
      writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
      writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
    } finally {
      // shutdown
      if (cluster != null) cluster.shutdown();
      FileUtil.fullyDelete(new File(namenodeStorage, "in_use.lock"));
      FileUtil.fullyDelete(new File(datanodeStorage, "in_use.lock"));
    }
    namenodeStorageChecksum = checksumContents(NAME_NODE, new File(namenodeStorage, "current"));
    datanodeStorageChecksum = checksumContents(DATA_NODE, new File(datanodeStorage, "current"));
  }
 /** Delete all files and directories in the trash directories. */
 public void clearTrash() {
   for (StorageDirectory sd : storageDirs) {
     File trashRoot = getTrashRootDir(sd);
     Preconditions.checkState(!(trashRoot.exists() && sd.getPreviousDir().exists()));
     FileUtil.fullyDelete(trashRoot);
     LOG.info("Cleared trash for storage directory " + sd);
   }
 }
Example #23
0
 @Test(timeout = 30000)
 public void testFullyDelete() throws IOException {
   setupDirs();
   boolean ret = FileUtil.fullyDelete(del);
   Assert.assertTrue(ret);
   Assert.assertFalse(del.exists());
   validateTmpDir();
 }
 /** Create empty directories. If a specified directory already exists then it is first removed. */
 public static void createEmptyDirs(String[] dirs) throws IOException {
   for (String d : dirs) {
     File dir = new File(d);
     if (dir.exists()) {
       FileUtil.fullyDelete(dir);
     }
     dir.mkdirs();
   }
 }
  /**
   * Create a directory to mimic the procfs file system's root.
   *
   * @param procfsRootDir root directory to create.
   * @throws IOException if could not delete the procfs root directory
   */
  public static void setupProcfsRootDir(File procfsRootDir) throws IOException {
    // cleanup any existing process root dir.
    if (procfsRootDir.exists()) {
      Assert.assertTrue(FileUtil.fullyDelete(procfsRootDir));
    }

    // create afresh
    Assert.assertTrue(procfsRootDir.mkdirs());
  }
Example #26
0
  private void testDirsFailures(boolean localORLogDirs) throws IOException {
    String dirType = localORLogDirs ? "local" : "log";
    String dirsProperty =
        localORLogDirs ? YarnConfiguration.NM_LOCAL_DIRS : YarnConfiguration.NM_LOG_DIRS;

    Configuration conf = new Configuration();
    // set disk health check interval to a small value (say 1 sec).
    conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, DISK_HEALTH_CHECK_INTERVAL);

    // If 2 out of the total 4 local-dirs fail OR if 2 Out of the total 4
    // log-dirs fail, then the node's health status should become unhealthy.
    conf.setFloat(YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION, 0.60F);

    if (yarnCluster != null) {
      yarnCluster.stop();
      FileUtil.fullyDelete(localFSDirBase);
      localFSDirBase.mkdirs();
    }
    LOG.info("Starting up YARN cluster");
    yarnCluster =
        new MiniYARNCluster(TestDiskFailures.class.getName(), 1, numLocalDirs, numLogDirs);
    yarnCluster.init(conf);
    yarnCluster.start();

    NodeManager nm = yarnCluster.getNodeManager(0);
    LOG.info("Configured nm-" + dirType + "-dirs=" + nm.getConfig().get(dirsProperty));
    dirsHandler = nm.getNodeHealthChecker().getDiskHandler();
    List<String> list = localORLogDirs ? dirsHandler.getLocalDirs() : dirsHandler.getLogDirs();
    String[] dirs = list.toArray(new String[list.size()]);
    Assert.assertEquals("Number of nm-" + dirType + "-dirs is wrong.", numLocalDirs, dirs.length);
    String expectedDirs = StringUtils.join(",", list);
    // validate the health of disks initially
    verifyDisksHealth(localORLogDirs, expectedDirs, true);

    // Make 1 nm-local-dir fail and verify if "the nodemanager can identify
    // the disk failure(s) and can update the list of good nm-local-dirs.
    prepareDirToFail(dirs[2]);
    expectedDirs = dirs[0] + "," + dirs[1] + "," + dirs[3];
    verifyDisksHealth(localORLogDirs, expectedDirs, true);

    // Now, make 1 more nm-local-dir/nm-log-dir fail and verify if "the
    // nodemanager can identify the disk failures and can update the list of
    // good nm-local-dirs/nm-log-dirs and can update the overall health status
    // of the node to unhealthy".
    prepareDirToFail(dirs[0]);
    expectedDirs = dirs[1] + "," + dirs[3];
    verifyDisksHealth(localORLogDirs, expectedDirs, false);

    // Fail the remaining 2 local-dirs/log-dirs and verify if NM remains with
    // empty list of local-dirs/log-dirs and the overall health status is
    // unhealthy.
    prepareDirToFail(dirs[1]);
    prepareDirToFail(dirs[3]);
    expectedDirs = "";
    verifyDisksHealth(localORLogDirs, expectedDirs, false);
  }
Example #27
0
 /**
  * Delete the given path to a file or directory.
  *
  * @param p the path to delete
  * @param recursive to delete sub-directories
  * @return true if the file or directory and all its contents were deleted
  * @throws IOException if p is non-empty and recursive is false
  */
 @Override
 public boolean delete(Path p, boolean recursive) throws IOException {
   File f = pathToFile(p);
   if (f.isFile()) {
     return f.delete();
   } else if (!recursive && f.isDirectory() && (FileUtil.listFiles(f).length != 0)) {
     throw new IOException("Directory " + f.toString() + " is not empty");
   }
   return FileUtil.fullyDelete(f);
 }
Example #28
0
 private static MiniDFSCluster startMini(String testName) throws IOException {
   File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile();
   FileUtil.fullyDelete(baseDir);
   Configuration conf = new Configuration();
   conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
   MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
   MiniDFSCluster hdfsCluster = builder.clusterId(testName).build();
   hdfsCluster.waitActive();
   return hdfsCluster;
 }
  public static void main(String[] args) throws Exception {
    // 引数を固定で設定
    String in = "input/Employee";
    String out = Util.getJobOutputDirPath(SortByDeptAndAgeUsingComparator.class);
    args = new String[] {in, out};
    // 出力先のディレクトリが存在するとFileAlreadyExistsExceptionとなるため事前に削除しています
    FileUtil.fullyDelete(new File(out));

    int res = ToolRunner.run(new SortByDeptAndAgeUsingComparator(), args);
    System.exit(res);
  }
 private void recreateDir(File dir) throws IOException {
   // Lily change: take clearData flag into account
   if (clearData && dir.exists()) {
     FileUtil.fullyDelete(dir);
   }
   try {
     dir.mkdirs();
   } catch (SecurityException e) {
     throw new IOException("creating dir: " + dir, e);
   }
 }