Пример #1
0
  @Test
  public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
    Path testDir = qualifiedPath("test/hadoop", fc2);
    Assert.assertFalse(exists(fc2, testDir));
    fc2.mkdir(testDir, FsPermission.getDefault(), true);
    Assert.assertTrue(exists(fc2, testDir));

    // Create file on fc1 using fc2 context
    createFile(fc1, qualifiedPath("test/hadoop/file", fc2));

    Path testSubDir = qualifiedPath("test/hadoop/file/subdir", fc2);
    try {
      fc1.mkdir(testSubDir, FsPermission.getDefault(), true);
      Assert.fail("Should throw IOException.");
    } catch (IOException e) {
      // expected
    }
    Assert.assertFalse(exists(fc1, testSubDir));

    Path testDeepSubDir = qualifiedPath("test/hadoop/file/deep/sub/dir", fc1);
    try {
      fc2.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
      Assert.fail("Should throw IOException.");
    } catch (IOException e) {
      // expected
    }
    Assert.assertFalse(exists(fc1, testDeepSubDir));
  }
Пример #2
0
  @Test
  public void testDeleteMissing() {
    TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
    JobContext jobContext =
        new JobContextImpl(
            taskAttemptContext.getConfiguration(),
            taskAttemptContext.getTaskAttemptID().getJobID());
    Configuration conf = jobContext.getConfiguration();

    String sourceBase;
    String targetBase;
    FileSystem fs = null;
    try {
      OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
      fs = FileSystem.get(conf);
      sourceBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
      targetBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
      String targetBaseAdd = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
      fs.rename(new Path(targetBaseAdd), new Path(targetBase));

      DistCpOptions options =
          new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out"));
      options.setSyncFolder(true);
      options.setDeleteMissing(true);
      options.appendToConf(conf);

      CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
      Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
      listing.buildListing(listingFile, options);

      conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
      conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase);

      committer.commitJob(jobContext);
      if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
        Assert.fail("Source and target folders are not in sync");
      }
      if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) {
        Assert.fail("Source and target folders are not in sync");
      }

      // Test for idempotent commit
      committer.commitJob(jobContext);
      if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
        Assert.fail("Source and target folders are not in sync");
      }
      if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) {
        Assert.fail("Source and target folders are not in sync");
      }
    } catch (Throwable e) {
      LOG.error("Exception encountered while testing for delete missing", e);
      Assert.fail("Delete missing failure");
    } finally {
      TestDistCpUtils.delete(fs, "/tmp1");
      conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false");
    }
  }
Пример #3
0
  /**
   * Convert IGFS file attributes into Hadoop permission.
   *
   * @param file File info.
   * @return Hadoop permission.
   */
  private FsPermission permission(IgfsFile file) {
    String perm = file.property(IgfsUtils.PROP_PERMISSION, null);

    if (perm == null) return FsPermission.getDefault();

    try {
      return new FsPermission((short) Integer.parseInt(perm, 8));
    } catch (NumberFormatException ignore) {
      return FsPermission.getDefault();
    }
  }
Пример #4
0
  @Test
  public void testDeleteDirectory() throws IOException {
    String dirName = "dirTest";
    Path testDirPath = qualifiedPath(dirName, fc2);
    // Ensure directory does not exist
    Assert.assertFalse(exists(fc2, testDirPath));

    // Create a directory on fc2's file system using fc1
    fc1.mkdir(testDirPath, FsPermission.getDefault(), true);

    // Ensure dir is created
    Assert.assertTrue(exists(fc2, testDirPath));
    Assert.assertTrue(isDir(fc2, testDirPath));

    fc2.delete(testDirPath, true);

    // Ensure that directory is deleted
    Assert.assertFalse(isDir(fc2, testDirPath));

    // TestCase - Create and delete multiple directories
    String dirNames[] = {
      "deleteTest/testDir",
      "deleteTest/test Dir",
      "deleteTest/test*Dir",
      "deleteTest/test#Dir",
      "deleteTest/test1234",
      "deleteTest/1234Test",
      "deleteTest/test)Dir",
      "deleteTest/test_DIr",
      "deleteTest/()&^%$#@!~_+}{><?",
      "  ",
      "^ "
    };

    for (String f : dirNames) {
      // Create a file on fc2's file system using fc1
      Path testPath = qualifiedPath(f, fc2);
      // Ensure file does not exist
      Assert.assertFalse(exists(fc2, testPath));

      // Now create directory
      fc1.mkdir(testPath, FsPermission.getDefault(), true);
      // Ensure fc2 has the created directory
      Assert.assertTrue(exists(fc2, testPath));
      Assert.assertTrue(isDir(fc2, testPath));
      // Delete dir
      Assert.assertTrue(fc2.delete(testPath, true));
      // verify if directory is deleted
      Assert.assertFalse(exists(fc2, testPath));
      Assert.assertFalse(isDir(fc2, testPath));
    }
  }
    void generateInputs(int[] ignore) throws IOException {
      int nrDatanodes = getNumDatanodes();
      int nrBlocks = (int) Math.ceil((double) blocksPerReport * nrDatanodes / replication);
      int nrFiles = (int) Math.ceil((double) nrBlocks / blocksPerFile);
      datanodes = new TinyDatanode[nrDatanodes];
      // create data-nodes
      String prevDNName = "";
      for (int idx = 0; idx < nrDatanodes; idx++) {
        datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
        datanodes[idx].register();
        assert datanodes[idx].getName().compareTo(prevDNName) > 0
            : "Data-nodes must be sorted lexicographically.";
        datanodes[idx].sendHeartbeat();
        prevDNName = datanodes[idx].getName();
      }

      // create files
      LOG.info("Creating " + nrFiles + " with " + blocksPerFile + " blocks each.");
      FileNameGenerator nameGenerator;
      nameGenerator = new FileNameGenerator(getBaseDir(), 100);
      String clientName = getClientName(007);
      nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
      for (int idx = 0; idx < nrFiles; idx++) {
        String fileName = nameGenerator.getNextFileName("ThroughputBench");
        nameNode.create(
            fileName, FsPermission.getDefault(), clientName, true, replication, BLOCK_SIZE);
        addBlocks(fileName, clientName);
        nameNode.complete(fileName, clientName);
      }
      // prepare block reports
      for (int idx = 0; idx < nrDatanodes; idx++) {
        datanodes[idx].formBlockReport();
      }
    }
Пример #6
0
 /**
  * 지정한 경로를 생성한다.
  *
  * @param fs FileSystem
  * @param path 생성할 경로
  * @return 정상적으로 생성한 경우 <tt>true</tt>
  */
 public static boolean mkdir(FileSystem fs, String path) {
   try {
     return FileSystem.mkdirs(fs, new Path(path), FsPermission.getDefault());
   } catch (Exception ex) {
     throw new FileSystemException(ExceptionUtils.getMessage("Cannot create '{}'", path), ex);
   }
 }
Пример #7
0
  @Test
  public void testDeleteNonExistingDirectory() throws IOException {
    String testDirName = "testFile";
    Path testPath = qualifiedPath(testDirName, fc2);

    // TestCase1 : Test delete on directory never existed
    // Ensure directory does not exist
    Assert.assertFalse(exists(fc2, testPath));

    // Delete on non existing directory should return false
    Assert.assertFalse(fc2.delete(testPath, false));

    // TestCase2 : Create dir, Delete dir, Delete dir
    // Create a file on fc2's file system using fc1

    fc1.mkdir(testPath, FsPermission.getDefault(), true);
    // Ensure dir exist
    Assert.assertTrue(exists(fc2, testPath));

    // Delete test file, deleting existing file should return true
    Assert.assertTrue(fc2.delete(testPath, false));
    // Ensure file does not exist
    Assert.assertFalse(exists(fc2, testPath));
    // Delete on non existing file should return false
    Assert.assertFalse(fc2.delete(testPath, false));
  }
Пример #8
0
  /**
   * A stream obtained via this call must be closed before using other APIs of this class or else
   * the invocation will block.
   */
  public FSDataOutputStream create(
      Path file,
      FsPermission permission,
      boolean overwrite,
      int bufferSize,
      short replication,
      long blockSize,
      Progressable progress)
      throws IOException {
    final FTPClient client = connect();
    Path workDir = new Path(client.printWorkingDirectory());
    Path absolute = makeAbsolute(workDir, file);
    if (exists(client, file)) {
      if (overwrite) {
        delete(client, file);
      } else {
        disconnect(client);
        throw new IOException("File already exists: " + file);
      }
    }

    Path parent = absolute.getParent();
    if (parent == null || !mkdirs(client, parent, FsPermission.getDefault())) {
      parent = (parent == null) ? new Path("/") : parent;
      disconnect(client);
      throw new IOException("create(): Mkdirs failed to create: " + parent);
    }
    client.allocate(bufferSize);
    // Change to parent directory on the server. Only then can we write to the
    // file on the server by opening up an OutputStream. As a side effect the
    // working directory on the server is changed to the parent directory of the
    // file. The FTP client connection is closed when close() is called on the
    // FSDataOutputStream.
    client.changeWorkingDirectory(parent.toUri().getPath());
    FSDataOutputStream fos =
        new FSDataOutputStream(client.storeFileStream(file.getName()), statistics) {

          public void close() throws IOException {
            super.close();
            if (!client.isConnected()) {
              throw new FTPException("Client not connected");
            }
            boolean cmdCompleted = client.completePendingCommand();
            disconnect(client);
            if (!cmdCompleted) {
              throw new FTPException(
                  "Could not complete transfer, Reply Code - " + client.getReplyCode());
            }
          }
        };
    if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
      // The ftpClient is an inconsistent state. Must close the stream
      // which in turn will logout and disconnect from FTP server
      fos.close();
      throw new IOException("Unable to create file: " + file + ", Aborting");
    }
    return fos;
  }
Пример #9
0
  public BaseDataPublisher(State state) throws IOException {
    super(state);
    this.closer = Closer.create();
    Configuration conf = new Configuration();

    // Add all job configuration properties so they are picked up by Hadoop
    for (String key : this.getState().getPropertyNames()) {
      conf.set(key, this.getState().getProp(key));
    }

    this.numBranches = this.getState().getPropAsInt(ConfigurationKeys.FORK_BRANCHES_KEY, 1);

    this.fileSystemByBranches = Lists.newArrayListWithCapacity(this.numBranches);
    this.publisherFinalDirOwnerGroupsByBranches = Lists.newArrayListWithCapacity(this.numBranches);
    this.permissions = Lists.newArrayListWithCapacity(this.numBranches);

    // Get a FileSystem instance for each branch
    for (int i = 0; i < this.numBranches; i++) {
      URI uri =
          URI.create(
              this.getState()
                  .getProp(
                      ForkOperatorUtils.getPropertyNameForBranch(
                          ConfigurationKeys.WRITER_FILE_SYSTEM_URI, this.numBranches, i),
                      ConfigurationKeys.LOCAL_FS_URI));
      this.fileSystemByBranches.add(FileSystem.get(uri, conf));

      // The group(s) will be applied to the final publisher output directory(ies)
      this.publisherFinalDirOwnerGroupsByBranches.add(
          Optional.fromNullable(
              this.getState()
                  .getProp(
                      ForkOperatorUtils.getPropertyNameForBranch(
                          ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR_GROUP, this.numBranches, i))));

      // The permission(s) will be applied to all directories created by the publisher,
      // which do NOT include directories created by the writer and moved by the publisher.
      // The permissions of those directories are controlled by writer.file.permissions and
      // writer.dir.permissions.
      this.permissions.add(
          new FsPermission(
              state.getPropAsShortWithRadix(
                  ForkOperatorUtils.getPropertyNameForBranch(
                      ConfigurationKeys.DATA_PUBLISHER_PERMISSIONS, numBranches, i),
                  FsPermission.getDefault().toShort(),
                  ConfigurationKeys.PERMISSION_PARSING_RADIX)));
    }

    this.parallelRunnerThreads =
        state.getPropAsInt(
            ParallelRunner.PARALLEL_RUNNER_THREADS_KEY,
            ParallelRunner.DEFAULT_PARALLEL_RUNNER_THREADS);
  }
 /** Do file create. */
 long executeOp(int daemonId, int inputIdx, String clientName) throws IOException {
   long start = System.currentTimeMillis();
   // dummyActionNoSynch(fileIdx);
   nameNode.create(
       fileNames[daemonId][inputIdx],
       FsPermission.getDefault(),
       clientName,
       true,
       replication,
       BLOCK_SIZE);
   long end = System.currentTimeMillis();
   for (boolean written = !closeUponCreate;
       !written;
       written = nameNode.complete(fileNames[daemonId][inputIdx], clientName)) ;
   return end - start;
 }
Пример #11
0
  // creates a file using DistributedFileSystem.createNonRecursive()
  static FSDataOutputStream createNonRecursive(
      FileSystem fs, Path name, int repl, boolean overwrite) throws IOException {
    System.out.println("createNonRecursive: Created " + name + " with " + repl + " replica.");
    FSDataOutputStream stm =
        ((DistributedFileSystem) fs)
            .createNonRecursive(
                name,
                FsPermission.getDefault(),
                overwrite,
                fs.getConf().getInt("io.file.buffer.size", 4096),
                (short) repl,
                (long) blockSize,
                null);

    return stm;
  }
Пример #12
0
 /**
  * Convenience method, so that we don't open a new connection when using this method from within
  * another method. Otherwise every API invocation incurs the overhead of opening/closing a TCP
  * connection.
  */
 private boolean mkdirs(FTPClient client, Path file, FsPermission permission) throws IOException {
   boolean created = true;
   Path workDir = new Path(client.printWorkingDirectory());
   Path absolute = makeAbsolute(workDir, file);
   String pathName = absolute.getName();
   if (!exists(client, absolute)) {
     Path parent = absolute.getParent();
     created = (parent == null || mkdirs(client, parent, FsPermission.getDefault()));
     if (created) {
       String parentDir = parent.toUri().getPath();
       client.changeWorkingDirectory(parentDir);
       created = created & client.makeDirectory(pathName);
     }
   } else if (isFile(client, absolute)) {
     throw new IOException(
         String.format("Can't make directory for path %s since it is a file.", absolute));
   }
   return created;
 }
Пример #13
0
  @Test
  public void testIsDirectory() throws IOException {
    String dirName = "dirTest";
    String invalidDir = "nonExistantDir";
    String rootDir = "/";

    Path existingPath = qualifiedPath(dirName, fc2);
    Path nonExistingPath = qualifiedPath(invalidDir, fc2);
    Path pathToRootDir = qualifiedPath(rootDir, fc2);

    // Create a directory on fc2's file system using fc1
    fc1.mkdir(existingPath, FsPermission.getDefault(), true);

    // Ensure fc2 has directory
    Assert.assertTrue(isDir(fc2, existingPath));
    Assert.assertTrue(isDir(fc2, pathToRootDir));

    // Negative test case
    Assert.assertFalse(isDir(fc2, nonExistingPath));
  }
Пример #14
0
 /**
  * Constructor
  *
  * @param length the number of bytes the file has
  * @param isdir if the path is a directory
  * @param block_replication the replication factor
  * @param blocksize the block size
  * @param modification_time modification time
  * @param access_time access time
  * @param permission permission
  * @param owner the owner of the path
  * @param group the group of the path
  * @param path the local name in java UTF8 encoding the same as that in-memory
  */
 public HdfsFileStatus(
     long length,
     boolean isdir,
     int block_replication,
     long blocksize,
     long modification_time,
     long access_time,
     FsPermission permission,
     String owner,
     String group,
     byte[] path) {
   this.length = length;
   this.isdir = isdir;
   this.block_replication = (short) block_replication;
   this.blocksize = blocksize;
   this.modification_time = modification_time;
   this.access_time = access_time;
   this.permission = (permission == null) ? FsPermission.getDefault() : permission;
   this.owner = (owner == null) ? "" : owner;
   this.group = (group == null) ? "" : group;
   this.path = path;
 }
  /**
   * Deprecated. Remains for legacy support. Should be removed when {@link Stat} gains support for
   * Windows and other operating systems.
   */
  @Deprecated
  private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) throws IOException {
    String target = FileUtil.readLink(new File(f.toString()));

    try {
      FileStatus fs = getFileStatus(f);
      // If f refers to a regular file or directory
      if (target.isEmpty()) {
        return fs;
      }
      // Otherwise f refers to a symlink
      return new FileStatus(
          fs.getLen(),
          false,
          fs.getReplication(),
          fs.getBlockSize(),
          fs.getModificationTime(),
          fs.getAccessTime(),
          fs.getPermission(),
          fs.getOwner(),
          fs.getGroup(),
          new Path(target),
          f);
    } catch (FileNotFoundException e) {
      /* The exists method in the File class returns false for dangling
       * links so we can get a FileNotFoundException for links that exist.
       * It's also possible that we raced with a delete of the link. Use
       * the readBasicFileAttributes method in java.nio.file.attributes
       * when available.
       */
      if (!target.isEmpty()) {
        return new FileStatus(
            0, false, 0, 0, 0, 0, FsPermission.getDefault(), "", "", new Path(target), f);
      }
      // f refers to a file or directory that does not exist
      throw e;
    }
  }
 public FileStatus(
     long length,
     boolean isdir,
     int block_replication,
     long blocksize,
     long modification_time,
     long access_time,
     FsPermission permission,
     String owner,
     String group,
     Path symlink,
     Path path) {
   this.length = length;
   this.isdir = isdir;
   this.block_replication = (short) block_replication;
   this.blocksize = blocksize;
   this.modification_time = modification_time;
   this.access_time = access_time;
   if (permission != null) {
     this.permission = permission;
   } else if (isdir) {
     this.permission = FsPermission.getDirDefault();
   } else if (symlink != null) {
     this.permission = FsPermission.getDefault();
   } else {
     this.permission = FsPermission.getFileDefault();
   }
   this.owner = (owner == null) ? "" : owner;
   this.group = (group == null) ? "" : group;
   this.symlink = symlink;
   this.path = path;
   // The variables isdir and symlink indicate the type:
   // 1. isdir implies directory, in which case symlink must be null.
   // 2. !isdir implies a file or symlink, symlink != null implies a
   //    symlink, otherwise it's a file.
   assert (isdir && symlink == null) || !isdir;
 }
Пример #17
0
  @Test
  public void testCreateDirectory() throws IOException {

    Path path = qualifiedPath("test/hadoop", fc2);
    Path falsePath = qualifiedPath("path/doesnot.exist", fc2);
    Path subDirPath = qualifiedPath("dir0", fc2);

    // Ensure that testPath does not exist in fc1
    Assert.assertFalse(exists(fc1, path));
    Assert.assertFalse(isFile(fc1, path));
    Assert.assertFalse(isDir(fc1, path));

    // Create a directory on fc2's file system using fc1
    fc1.mkdir(path, FsPermission.getDefault(), true);

    // Ensure fc2 has directory
    Assert.assertTrue(isDir(fc2, path));
    Assert.assertTrue(exists(fc2, path));
    Assert.assertFalse(isFile(fc2, path));

    // Test to create same dir twice, (HDFS mkdir is similar to mkdir -p )
    fc1.mkdir(subDirPath, FsPermission.getDefault(), true);
    // This should not throw exception
    fc1.mkdir(subDirPath, FsPermission.getDefault(), true);

    // Create Sub Dirs
    fc1.mkdir(subDirPath, FsPermission.getDefault(), true);

    // Check parent dir
    Path parentDir = path.getParent();
    Assert.assertTrue(exists(fc2, parentDir));
    Assert.assertFalse(isFile(fc2, parentDir));

    // Check parent parent dir
    Path grandparentDir = parentDir.getParent();
    Assert.assertTrue(exists(fc2, grandparentDir));
    Assert.assertFalse(isFile(fc2, grandparentDir));

    // Negative test cases
    Assert.assertFalse(exists(fc2, falsePath));
    Assert.assertFalse(isDir(fc2, falsePath));

    // TestCase - Create multiple directories
    String dirNames[] = {
      "createTest/testDir",
      "createTest/test Dir",
      "deleteTest/test*Dir",
      "deleteTest/test#Dir",
      "deleteTest/test1234",
      "deleteTest/test_DIr",
      "deleteTest/1234Test",
      "deleteTest/test)Dir",
      "deleteTest/()&^%$#@!~_+}{><?",
      "  ",
      "^ "
    };

    for (String f : dirNames) {
      // Create a file on fc2's file system using fc1
      Path testPath = qualifiedPath(f, fc2);
      // Ensure file does not exist
      Assert.assertFalse(exists(fc2, testPath));

      // Now create directory
      fc1.mkdir(testPath, FsPermission.getDefault(), true);
      // Ensure fc2 has the created directory
      Assert.assertTrue(exists(fc2, testPath));
      Assert.assertTrue(isDir(fc2, testPath));
    }
  }
Пример #18
0
  /**
   * Convert Hadoop permission into IGFS file attribute.
   *
   * @param perm Hadoop permission.
   * @return IGFS attributes.
   */
  private Map<String, String> permission(FsPermission perm) {
    if (perm == null) perm = FsPermission.getDefault();

    return F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm));
  }
Пример #19
0
  @Test
  public void testListStatus() throws Exception {
    final String hPrefix = "test/hadoop";
    final String[] dirs = {
      hPrefix + "/a",
      hPrefix + "/b",
      hPrefix + "/c",
      hPrefix + "/1",
      hPrefix + "/#@#@",
      hPrefix + "/&*#$#$@234"
    };
    ArrayList<Path> testDirs = new ArrayList<Path>();

    for (String d : dirs) {
      testDirs.add(qualifiedPath(d, fc2));
    }
    Assert.assertFalse(exists(fc1, testDirs.get(0)));

    for (Path path : testDirs) {
      fc1.mkdir(path, FsPermission.getDefault(), true);
    }

    // test listStatus that returns an array of FileStatus
    FileStatus[] paths = fc1.util().listStatus(qualifiedPath("test", fc1));
    Assert.assertEquals(1, paths.length);
    Assert.assertEquals(qualifiedPath(hPrefix, fc1), paths[0].getPath());

    paths = fc1.util().listStatus(qualifiedPath(hPrefix, fc1));
    Assert.assertEquals(6, paths.length);
    for (int i = 0; i < dirs.length; i++) {
      boolean found = false;
      for (int j = 0; j < paths.length; j++) {
        if (qualifiedPath(dirs[i], fc1).equals(paths[j].getPath())) {
          found = true;
        }
      }
      Assert.assertTrue(dirs[i] + " not found", found);
    }

    paths = fc1.util().listStatus(qualifiedPath(dirs[0], fc1));
    Assert.assertEquals(0, paths.length);

    // test listStatus that returns an iterator of FileStatus
    RemoteIterator<FileStatus> pathsItor = fc1.listStatus(qualifiedPath("test", fc1));
    Assert.assertEquals(qualifiedPath(hPrefix, fc1), pathsItor.next().getPath());
    Assert.assertFalse(pathsItor.hasNext());

    pathsItor = fc1.listStatus(qualifiedPath(hPrefix, fc1));
    int dirLen = 0;
    for (; pathsItor.hasNext(); dirLen++) {
      boolean found = false;
      FileStatus stat = pathsItor.next();
      for (int j = 0; j < dirs.length; j++) {
        if (qualifiedPath(dirs[j], fc1).equals(stat.getPath())) {
          found = true;
          break;
        }
      }
      Assert.assertTrue(stat.getPath() + " not found", found);
    }
    Assert.assertEquals(6, dirLen);

    pathsItor = fc1.listStatus(qualifiedPath(dirs[0], fc1));
    Assert.assertFalse(pathsItor.hasNext());
  }
Пример #20
0
/**
 * {@link HadoopFile} implementation for the HDFS protocol.
 *
 * @author Maxence Bernard
 */
public class HDFSFile extends HadoopFile {

  // TODO: allow a custom group to be set (see TODO below)
  //    /** Name of the property holding the file's group */
  //    public static final String GROUP_PROPERTY_NAME = "group";

  /** Default username */
  private static String DEFAULT_USERNAME;

  /** Default group */
  private static String DEFAULT_GROUP;

  /** Default file permissions */
  private static final FilePermissions DEFAULT_PERMISSIONS =
      new SimpleFilePermissions(
          FsPermission.getDefault()
                  .applyUMask(FsPermission.getUMask(DEFAULT_CONFIGURATION))
                  .toShort()
              & PermissionBits.FULL_PERMISSION_INT);

  static {
    try {
      UnixUserGroupInformation ugi = UnixUserGroupInformation.login(DEFAULT_CONFIGURATION);
      DEFAULT_USERNAME = ugi.getUserName();
      // Do not use default groups, as these are pretty much useless
    } catch (Exception e) {
      // Should never happen but default to a reasonable value if it does
      DEFAULT_USERNAME = System.getProperty("user.name");
    }

    DEFAULT_GROUP = DEFAULT_CONFIGURATION.get("dfs.permissions.supergroup", "supergroup");
  }

  protected HDFSFile(FileURL url) throws IOException {
    super(url);
  }

  protected HDFSFile(FileURL url, FileSystem fs, FileStatus fileStatus) throws IOException {
    super(url, fs, fileStatus);
  }

  public static String getDefaultUsername() {
    return DEFAULT_USERNAME;
  }

  public static String getDefaultGroup() {
    return DEFAULT_GROUP;
  }

  private static String getUsername(FileURL url) {
    Credentials credentials = url.getCredentials();
    String username;
    if (credentials == null || (username = credentials.getLogin()).equals(""))
      username = getDefaultUsername();

    return username;
  }

  private static String getGroup(FileURL url) {
    //        // Import the group from the URL's 'group' property, if set
    //        String group = url.getProperty(GROUP_PROPERTY_NAME);
    //        if(group==null || group.equals(""))
    //            group = getDefaultGroup();
    //
    //        return group;

    return getDefaultGroup();
  }

  ///////////////////////////////
  // HadoopFile implementation //
  ///////////////////////////////

  @Override
  protected FileSystem getHadoopFileSystem(FileURL url) throws IOException {
    // Note: getRealm returns a fresh instance every time
    FileURL realm = url.getRealm();

    Configuration conf = new Configuration();

    // Import the user from the URL's authority, if set
    // TODO: for some reason, setting the group has no effect: files are still created with the
    // default supergroup
    conf.setStrings(UnixUserGroupInformation.UGI_PROPERTY_NAME, getUsername(url), getGroup(url));

    return FileSystem.get(URI.create(realm.toString(false)), conf);
  }

  @Override
  protected void setDefaultFileAttributes(FileURL url, HadoopFileAttributes atts) {
    atts.setOwner(getUsername(url));
    atts.setGroup(getGroup(url));
    atts.setPermissions(DEFAULT_PERMISSIONS);
  }
}