private void makeDoneSubdir(Path path) throws IOException {
   try {
     doneDirFc.getFileStatus(path);
     existingDoneSubdirs.add(path);
   } catch (FileNotFoundException fnfE) {
     try {
       FsPermission fsp = new FsPermission(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION);
       doneDirFc.mkdir(path, fsp, true);
       FileStatus fsStatus = doneDirFc.getFileStatus(path);
       /* LOG.info("Perms after creating "+fsStatus.getPermission().toShort()+", Expected: "+fsp.toShort()) */
       LOG.perms_after_creating_expected(
               String.valueOf(fsStatus.getPermission().toShort()), String.valueOf(fsp.toShort()))
           .tag("methodCall")
           .info();
       if (fsStatus.getPermission().toShort() != fsp.toShort()) {
         /* LOG.info("Explicitly setting permissions to : "+fsp.toShort()+", "+fsp) */
         LOG.explicitly_setting_permissions(String.valueOf(fsp.toShort()), fsp.toString())
             .tag("methodCall")
             .info();
         doneDirFc.setPermission(path, fsp);
       }
       existingDoneSubdirs.add(path);
     } catch (FileAlreadyExistsException faeE) { // Nothing to do.
     }
   }
 }
 private String getJobSummary(FileContext fc, Path path) throws IOException {
   Path qPath = fc.makeQualified(path);
   FSDataInputStream in = fc.open(qPath);
   String jobSummaryString = in.readUTF();
   in.close();
   return jobSummaryString;
 }
 @Before
 public void setup() throws Exception {
   FileContext files = FileContext.getLocalFSFileContext();
   Path workSpacePath = new Path(workSpace.getAbsolutePath());
   files.mkdir(workSpacePath, null, true);
   FileUtil.chmod(workSpace.getAbsolutePath(), "777");
   File localDir = new File(workSpace.getAbsoluteFile(), "localDir");
   files.mkdir(new Path(localDir.getAbsolutePath()), new FsPermission("777"), false);
   File logDir = new File(workSpace.getAbsoluteFile(), "logDir");
   files.mkdir(new Path(logDir.getAbsolutePath()), new FsPermission("777"), false);
   String exec_path = System.getProperty("container-executor.path");
   if (exec_path != null && !exec_path.isEmpty()) {
     Configuration conf = new Configuration(false);
     LOG.info("Setting " + YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH + "=" + exec_path);
     conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, exec_path);
     exec = new LinuxContainerExecutor();
     exec.setConf(conf);
     conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath());
     conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.getAbsolutePath());
     dirsHandler = new LocalDirsHandlerService();
     dirsHandler.init(conf);
   }
   appSubmitter = System.getProperty("application.submitter");
   if (appSubmitter == null || appSubmitter.isEmpty()) {
     appSubmitter = "nobody";
   }
 }
 public LogReader(Configuration conf, Path remoteAppLogFile) throws IOException {
   FileContext fileContext = FileContext.getFileContext(conf);
   this.fsDataIStream = fileContext.open(remoteAppLogFile);
   reader =
       new TFile.Reader(
           this.fsDataIStream, fileContext.getFileStatus(remoteAppLogFile).getLen(), conf);
   this.scanner = reader.createScanner();
 }
Esempio n. 5
0
  /**
   * Read a script file of the form: lines of text with duration in seconds, read probability and
   * write probability, separated by white space.
   *
   * @param filename Script file
   * @return 0 if successful, -1 if not
   * @throws IOException if errors with file IO
   */
  protected static int loadScriptFile(String filename, boolean readLocally) throws IOException {

    FileContext fc;
    if (readLocally) { // read locally - program is run without MR
      fc = FileContext.getLocalFSFileContext();
    } else {
      fc = FileContext.getFileContext(); // use default file system
    }
    DataInputStream in = null;
    try {
      in = fc.open(new Path(filename));
    } catch (IOException e) {
      System.err.println("Unable to open scriptFile: " + filename);

      System.exit(-1);
    }
    InputStreamReader inr = new InputStreamReader(in);

    BufferedReader br = new BufferedReader(inr);
    ArrayList<Long> duration = new ArrayList<Long>();
    ArrayList<Double> readProb = new ArrayList<Double>();
    ArrayList<Double> writeProb = new ArrayList<Double>();
    int lineNum = 0;

    String line;
    // Read script, parse values, build array of duration, read and write probs

    try {
      while ((line = br.readLine()) != null) {
        lineNum++;
        if (line.startsWith("#") || line.isEmpty()) // skip comments and blanks
        continue;

        parseScriptLine(line, duration, readProb, writeProb);
      }
    } catch (IllegalArgumentException e) {
      System.err.println("Line: " + lineNum + ", " + e.getMessage());
      return -1;
    } finally {
      IOUtils.cleanup(LOG, br);
    }

    // Copy vectors to arrays of values, to avoid autoboxing overhead later
    durations = new long[duration.size()];
    readProbs = new double[readProb.size()];
    writeProbs = new double[writeProb.size()];

    for (int i = 0; i < durations.length; i++) {
      durations[i] = duration.get(i);
      readProbs[i] = readProb.get(i);
      writeProbs[i] = writeProb.get(i);
    }

    if (durations[0] == 0)
      System.err.println("Initial duration set to 0.  " + "Will loop until stopped manually.");

    return 0;
  }
Esempio n. 6
0
 @BeforeClass
 public static void setup()
     throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException,
         IOException {
   localFS = FileContext.getLocalFSFileContext();
   localFS.delete(new Path(localFSDirBase.getAbsolutePath()), true);
   localFSDirBase.mkdirs();
   // Do not start cluster here
 }
Esempio n. 7
0
 public static Path getPreviousJobHistoryPath(
     Configuration conf, ApplicationAttemptId applicationAttemptId) throws IOException {
   String jobId = TypeConverter.fromYarn(applicationAttemptId.getApplicationId()).toString();
   String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
   Path histDirPath = FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
   FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
   return fc.makeQualified(
       JobHistoryUtils.getStagingJobHistoryFile(
           histDirPath, jobId, (applicationAttemptId.getAttemptId() - 1)));
 }
Esempio n. 8
0
  /**
   * Looks for the dirs to clean. The folder structure is YYYY/MM/DD/Serial so we can use that to
   * more efficiently find the directories to clean by comparing the cutoff timestamp with the
   * timestamp from the folder structure.
   *
   * @param fc done dir FileContext
   * @param root folder for completed jobs
   * @param cutoff The cutoff for the max history age
   * @return The list of directories for cleaning
   * @throws IOException
   */
  public static List<FileStatus> getHistoryDirsForCleaning(FileContext fc, Path root, long cutoff)
      throws IOException {
    List<FileStatus> fsList = new ArrayList<FileStatus>();
    Calendar cCal = Calendar.getInstance();
    cCal.setTimeInMillis(cutoff);
    int cYear = cCal.get(Calendar.YEAR);
    int cMonth = cCal.get(Calendar.MONTH) + 1;
    int cDate = cCal.get(Calendar.DATE);

    RemoteIterator<FileStatus> yearDirIt = fc.listStatus(root);
    while (yearDirIt.hasNext()) {
      FileStatus yearDir = yearDirIt.next();
      try {
        int year = Integer.parseInt(yearDir.getPath().getName());
        if (year <= cYear) {
          RemoteIterator<FileStatus> monthDirIt = fc.listStatus(yearDir.getPath());
          while (monthDirIt.hasNext()) {
            FileStatus monthDir = monthDirIt.next();
            try {
              int month = Integer.parseInt(monthDir.getPath().getName());
              // If we only checked the month here, then something like 07/2013
              // would incorrectly not pass when the cutoff is 06/2014
              if (year < cYear || month <= cMonth) {
                RemoteIterator<FileStatus> dateDirIt = fc.listStatus(monthDir.getPath());
                while (dateDirIt.hasNext()) {
                  FileStatus dateDir = dateDirIt.next();
                  try {
                    int date = Integer.parseInt(dateDir.getPath().getName());
                    // If we only checked the date here, then something like
                    // 07/21/2013 would incorrectly not pass when the cutoff is
                    // 08/20/2013 or 07/20/2012
                    if (year < cYear || month < cMonth || date <= cDate) {
                      fsList.addAll(remoteIterToList(fc.listStatus(dateDir.getPath())));
                    }
                  } catch (NumberFormatException nfe) {
                    // the directory didn't fit the format we're looking for so
                    // skip the dir
                  }
                }
              }
            } catch (NumberFormatException nfe) {
              // the directory didn't fit the format we're looking for so skip
              // the dir
            }
          }
        }
      } catch (NumberFormatException nfe) {
        // the directory didn't fit the format we're looking for so skip the dir
      }
    }
    return fsList;
  }
 private static List<FileStatus> scanDirectory(Path path, FileContext fc, PathFilter pathFilter)
     throws IOException {
   path = fc.makeQualified(path);
   List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
   RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
   while (fileStatusIter.hasNext()) {
     FileStatus fileStatus = fileStatusIter.next();
     Path filePath = fileStatus.getPath();
     if (fileStatus.isFile() && pathFilter.accept(filePath)) {
       jhStatusList.add(fileStatus);
     }
   }
   return jhStatusList;
 }
 @Override
 public void run() {
   if (LOG.isDebugEnabled()) {
     LOG.debug(this);
   }
   boolean error = false;
   if (null == user) {
     if (baseDirs == null || baseDirs.size() == 0) {
       LOG.debug("NM deleting absolute path : " + subDir);
       try {
         lfs.delete(subDir, true);
       } catch (IOException e) {
         error = true;
         LOG.warn("Failed to delete " + subDir);
       }
     } else {
       for (Path baseDir : baseDirs) {
         Path del = subDir == null ? baseDir : new Path(baseDir, subDir);
         LOG.debug("NM deleting path : " + del);
         try {
           lfs.delete(del, true);
         } catch (IOException e) {
           error = true;
           LOG.warn("Failed to delete " + subDir);
         }
       }
     }
   } else {
     try {
       LOG.debug("Deleting path: [" + subDir + "] as user: [" + user + "]");
       if (baseDirs == null || baseDirs.size() == 0) {
         delService.exec.deleteAsUser(user, subDir, (Path[]) null);
       } else {
         delService.exec.deleteAsUser(user, subDir, baseDirs.toArray(new Path[0]));
       }
     } catch (IOException e) {
       error = true;
       LOG.warn("Failed to delete as user " + user, e);
     } catch (InterruptedException e) {
       error = true;
       LOG.warn("Failed to delete as user " + user, e);
     }
   }
   if (error) {
     setSuccess(!error);
   }
   fileDeletionTaskFinished();
 }
 private void moveToDoneNow(final Path src, final Path target) throws IOException {
   /* LOG.info("Moving "+src.toString()+" to "+target.toString()) */
   LOG.moving(String.valueOf(src.toString()), String.valueOf(target.toString()))
       .tag("methodCall")
       .info();
   intermediateDoneDirFc.rename(src, target, Options.Rename.NONE);
 }
 static final FileContext getLfs() {
   try {
     return FileContext.getLocalFSFileContext();
   } catch (UnsupportedFileSystemException e) {
     throw new RuntimeException(e);
   }
 }
Esempio n. 13
0
 /**
  * Obtain the tokens needed by the job and put them in the UGI
  *
  * @param conf
  */
 protected void downloadTokensAndSetupUGI(Configuration conf) {
   try {
     this.currentUser = UserGroupInformation.getCurrentUser();
     if (UserGroupInformation.isSecurityEnabled()) {
       // Read the file-system tokens from the localized tokens-file.
       Path jobSubmitDir =
           FileContext.getLocalFSFileContext()
               .makeQualified(
                   new Path(new File(DragonJobConfig.JOB_SUBMIT_DIR).getAbsolutePath()));
       Path jobTokenFile = new Path(jobSubmitDir, DragonJobConfig.APPLICATION_TOKENS_FILE);
       fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
       LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile=" + jobTokenFile);
       for (Token<? extends TokenIdentifier> tk : fsTokens.getAllTokens()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug(
               "Token of kind "
                   + tk.getKind()
                   + "in current ugi in the AppMaster for service "
                   + tk.getService());
         }
         currentUser.addToken(tk); // For use by AppMaster itself.
       }
     }
   } catch (IOException e) {
     throw new YarnException(e);
   }
 }
Esempio n. 14
0
  public HdfsDirectory(Path hdfsDirPath, LockFactory lockFactory, Configuration configuration)
      throws IOException {
    super(lockFactory);
    this.hdfsDirPath = hdfsDirPath;
    this.configuration = configuration;
    fileSystem = FileSystem.get(hdfsDirPath.toUri(), configuration);
    fileContext = FileContext.getFileContext(hdfsDirPath.toUri(), configuration);

    if (fileSystem instanceof DistributedFileSystem) {
      // Make sure dfs is not in safe mode
      while (((DistributedFileSystem) fileSystem).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
        LOG.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
        try {
          Thread.sleep(5000);
        } catch (InterruptedException e) {
          Thread.interrupted();
          // continue
        }
      }
    }

    try {
      if (!fileSystem.exists(hdfsDirPath)) {
        boolean success = fileSystem.mkdirs(hdfsDirPath);
        if (!success) {
          throw new RuntimeException("Could not create directory: " + hdfsDirPath);
        }
      }
    } catch (Exception e) {
      org.apache.solr.common.util.IOUtils.closeQuietly(fileSystem);
      throw new RuntimeException("Problem creating directory: " + hdfsDirPath, e);
    }
  }
Esempio n. 15
0
 protected static void printResults(PrintStream out) throws UnsupportedFileSystemException {
   out.println(
       "Result of running LoadGenerator against fileSystem: "
           + FileContext.getFileContext().getDefaultFileSystem().getUri());
   if (numOfOps[OPEN] != 0) {
     out.println(
         "Average open execution time: " + (double) executionTime[OPEN] / numOfOps[OPEN] + "ms");
   }
   if (numOfOps[LIST] != 0) {
     out.println(
         "Average list execution time: " + (double) executionTime[LIST] / numOfOps[LIST] + "ms");
   }
   if (numOfOps[DELETE] != 0) {
     out.println(
         "Average deletion execution time: "
             + (double) executionTime[DELETE] / numOfOps[DELETE]
             + "ms");
     out.println(
         "Average create execution time: "
             + (double) executionTime[CREATE] / numOfOps[CREATE]
             + "ms");
     out.println(
         "Average write_close execution time: "
             + (double) executionTime[WRITE_CLOSE] / numOfOps[WRITE_CLOSE]
             + "ms");
   }
   if (totalTime != 0) {
     out.println("Average operations per second: " + (double) totalOps / totalTime + "ops/s");
   }
   out.println();
 }
Esempio n. 16
0
  @Before
  public void setUp() throws Exception {
    resourceMgrDelegate = mock(ResourceMgrDelegate.class);
    conf = new YarnConfiguration();
    conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
    clientCache = new ClientCache(conf, resourceMgrDelegate);
    clientCache = spy(clientCache);
    yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache);
    yarnRunner = spy(yarnRunner);
    submissionContext = mock(ApplicationSubmissionContext.class);
    doAnswer(
            new Answer<ApplicationSubmissionContext>() {
              @Override
              public ApplicationSubmissionContext answer(InvocationOnMock invocation)
                  throws Throwable {
                return submissionContext;
              }
            })
        .when(yarnRunner)
        .createApplicationSubmissionContext(
            any(Configuration.class), any(String.class), any(Credentials.class));

    appId = recordFactory.newRecordInstance(ApplicationId.class);
    appId.setClusterTimestamp(System.currentTimeMillis());
    appId.setId(1);
    jobId = TypeConverter.fromYarn(appId);
    if (testWorkDir.exists()) {
      FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true);
    }
    testWorkDir.mkdirs();
  }
  /**
   * Clean up older history files.
   *
   * @throws IOException on any error trying to remove the entries.
   */
  @SuppressWarnings("unchecked")
  void clean() throws IOException {
    // TODO this should be replaced by something that knows about the directory
    // structure and will put less of a load on HDFS.
    long cutoff = System.currentTimeMillis() - maxHistoryAge;
    boolean halted = false;
    // TODO Delete YYYY/MM/DD directories.
    List<FileStatus> serialDirList = findTimestampedDirectories();
    // Sort in ascending order. Relies on YYYY/MM/DD/Serial
    Collections.sort(serialDirList);
    for (FileStatus serialDir : serialDirList) {
      List<FileStatus> historyFileList =
          scanDirectoryForHistoryFiles(serialDir.getPath(), doneDirFc);
      for (FileStatus historyFile : historyFileList) {
        JobIndexInfo jobIndexInfo =
            FileNameIndexUtils.getIndexInfo(historyFile.getPath().getName());
        long effectiveTimestamp = getEffectiveTimestamp(jobIndexInfo.getFinishTime(), historyFile);
        if (effectiveTimestamp <= cutoff) {
          HistoryFileInfo fileInfo = this.jobListCache.get(jobIndexInfo.getJobId());
          if (fileInfo == null) {
            String confFileName =
                JobHistoryUtils.getIntermediateConfFileName(jobIndexInfo.getJobId());

            fileInfo =
                new HistoryFileInfo(
                    historyFile.getPath(),
                    new Path(historyFile.getPath().getParent(), confFileName),
                    null,
                    jobIndexInfo,
                    true);
          }
          deleteJobFromDone(fileInfo);
        } else {
          halted = true;
          break;
        }
      }
      if (!halted) {
        doneDirFc.delete(doneDirFc.makeQualified(serialDir.getPath()), true);
        removeDirectoryFromSerialNumberIndex(serialDir.getPath());
        existingDoneSubdirs.remove(serialDir.getPath());
      } else {
        break; // Don't scan any more directories.
      }
    }
  }
  // Equivalence of @Before for cluster mode testing.
  private void initClusterModeTest() throws IOException {

    LOG = LogFactory.getLog(TestWriteRead.class);
    LOG.info("initClusterModeTest");

    conf = new Configuration();
    mfc = FileContext.getFileContext();
    mfs = FileSystem.get(conf);
  }
Esempio n. 19
0
 boolean stopFileCreated() {
   try {
     fc.getFileStatus(flagFile);
   } catch (FileNotFoundException e) {
     return false;
   } catch (IOException e) {
     LOG.error("Got error when checking if file exists:" + flagFile, e);
   }
   LOG.info("Flag file was created. Stopping the test.");
   return true;
 }
  private void mkdir(FileContext fc, Path path, FsPermission fsp) throws IOException {
    if (!fc.util().exists(path)) {
      try {
        fc.mkdir(path, fsp, true);

        FileStatus fsStatus = fc.getFileStatus(path);
        LOG.info(
            "Perms after creating "
                + fsStatus.getPermission().toShort()
                + ", Expected: "
                + fsp.toShort());
        if (fsStatus.getPermission().toShort() != fsp.toShort()) {
          LOG.info("Explicitly setting permissions to : " + fsp.toShort() + ", " + fsp);
          fc.setPermission(path, fsp);
        }
      } catch (FileAlreadyExistsException e) {
        LOG.info("Directory: [" + path + "] already exists.");
      }
    }
  }
Esempio n. 21
0
  // hasMismatches is just used to return a second value if you want
  // one. I would have used MutableBoxedBoolean if such had been provided.
  public static List<FileStatus> localGlobber(
      FileContext fc, Path root, String tail, PathFilter filter, AtomicBoolean hasFlatFiles)
      throws IOException {
    if (tail.equals("")) {
      return (listFilteredStatus(fc, root, filter));
    }

    if (tail.startsWith("/*")) {
      Path[] subdirs =
          filteredStat2Paths(remoteIterToList(fc.listStatus(root)), true, hasFlatFiles);

      List<List<FileStatus>> subsubdirs = new LinkedList<List<FileStatus>>();

      int subsubdirCount = 0;

      if (subdirs.length == 0) {
        return new LinkedList<FileStatus>();
      }

      String newTail = tail.substring(2);

      for (int i = 0; i < subdirs.length; ++i) {
        subsubdirs.add(localGlobber(fc, subdirs[i], newTail, filter, null));
        // subsubdirs.set(i, localGlobber(fc, subdirs[i], newTail, filter,
        // null));
        subsubdirCount += subsubdirs.get(i).size();
      }

      List<FileStatus> result = new LinkedList<FileStatus>();

      for (int i = 0; i < subsubdirs.size(); ++i) {
        result.addAll(subsubdirs.get(i));
      }

      return result;
    }

    if (tail.startsWith("/")) {
      int split = tail.indexOf('/', 1);

      if (split < 0) {
        return listFilteredStatus(fc, new Path(root, tail.substring(1)), filter);
      } else {
        String thisSegment = tail.substring(1, split);
        String newTail = tail.substring(split);
        return localGlobber(fc, new Path(root, thisSegment), newTail, filter, hasFlatFiles);
      }
    }

    IOException e = new IOException("localGlobber: bad tail");

    throw e;
  }
Esempio n. 22
0
 /**
  * Main function called by tool runner. It first initializes data by parsing the command line
  * arguments. It then calls the loadGenerator
  */
 @Override
 public int run(String[] args) throws Exception {
   int exitCode = parseArgs(false, args);
   if (exitCode != 0) {
     return exitCode;
   }
   System.out.println(
       "Running LoadGenerator against fileSystem: "
           + FileContext.getFileContext().getDefaultFileSystem().getUri());
   exitCode = generateLoadOnNN();
   printResults(System.out);
   return exitCode;
 }
  @Test
  public void testContainerLaunch() throws IOException {
    if (!shouldRun()) {
      return;
    }

    File touchFile = new File(workSpace, "touch-file");
    int ret = runAndBlock("touch", touchFile.getAbsolutePath());

    assertEquals(0, ret);
    FileStatus fileStatus =
        FileContext.getLocalFSFileContext().getFileStatus(new Path(touchFile.getAbsolutePath()));
    assertEquals(appSubmitter, fileStatus.getOwner());
  }
Esempio n. 24
0
 private static List<FileStatus> listFilteredStatus(FileContext fc, Path root, PathFilter filter)
     throws IOException {
   List<FileStatus> fsList = remoteIterToList(fc.listStatus(root));
   if (filter == null) {
     return fsList;
   } else {
     List<FileStatus> filteredList = new LinkedList<FileStatus>();
     for (FileStatus fs : fsList) {
       if (filter.accept(fs.getPath())) {
         filteredList.add(fs);
       }
     }
     return filteredList;
   }
 }
  private void mkdir(FileContext fc, Path path, FsPermission fsp) throws IOException {
    if (!fc.util().exists(path)) {
      try {
        fc.mkdir(path, fsp, true);

        FileStatus fsStatus = fc.getFileStatus(path);
        /* LOG.info("Perms after creating "+fsStatus.getPermission().toShort()+", Expected: "+fsp.toShort()) */
        LOG.perms_after_creating_expected(
                String.valueOf(fsStatus.getPermission().toShort()), String.valueOf(fsp.toShort()))
            .tag("methodCall")
            .info();
        if (fsStatus.getPermission().toShort() != fsp.toShort()) {
          /* LOG.info("Explicitly setting permissions to : "+fsp.toShort()+", "+fsp) */
          LOG.explicitly_setting_permissions(String.valueOf(fsp.toShort()), fsp.toString())
              .tag("methodCall")
              .info();
          fc.setPermission(path, fsp);
        }
      } catch (FileAlreadyExistsException e) {
        /* LOG.info("Directory: ["+path+"] already exists.") */
        LOG.directory_already_exists(path.toString()).info();
      }
    }
  }
Esempio n. 26
0
  /** Creates a LocalResource instance for the JAR file referenced by the given Path. */
  public LocalResource getLocalResourceForPath(final Path jarPath, final LocalResourceType type)
      throws IOException {

    final FileStatus status =
        FileContext.getFileContext(fileSystem.getUri()).getFileStatus(jarPath);

    final LocalResource localResource = Records.newRecord(LocalResource.class);
    localResource.setType(type);
    localResource.setVisibility(LocalResourceVisibility.APPLICATION);
    localResource.setResource(ConverterUtils.getYarnUrlFromPath(status.getPath()));
    localResource.setTimestamp(status.getModificationTime());
    localResource.setSize(status.getLen());

    return localResource;
  }
Esempio n. 27
0
  /**
   * Create a table that contains all directories under the specified path and another table that
   * contains all files under the specified path and whose name starts with "_file_".
   */
  private void initFileDirTables(Path path) throws IOException {
    FileStatus[] stats = fc.util().listStatus(path);

    for (FileStatus stat : stats) {
      if (stat.isDirectory()) {
        dirs.add(stat.getPath().toString());
        initFileDirTables(stat.getPath());
      } else {
        Path filePath = stat.getPath();
        if (filePath.getName().startsWith(StructureGenerator.FILE_NAME_PREFIX)) {
          files.add(filePath.toString());
        }
      }
    }
  }
  @Before
  public void initJunitModeTest() throws Exception {
    LOG.info("initJunitModeTest");

    conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // 100K
    // blocksize

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();

    mfs = cluster.getFileSystem();
    mfc = FileContext.getFileContext();

    Path rootdir = new Path(ROOT_DIR);
    mfs.mkdirs(rootdir);
  }
Esempio n. 29
0
 @Override
 public void renameFile(String source, String dest) throws IOException {
   Path sourcePath = new Path(hdfsDirPath, source);
   Path destPath = new Path(hdfsDirPath, dest);
   fileContext.rename(sourcePath, destPath);
 }
 private void moveToDoneNow(final Path src, final Path target) throws IOException {
   LOG.info("Moving " + src.toString() + " to " + target.toString());
   intermediateDoneDirFc.rename(src, target, Options.Rename.NONE);
 }