コード例 #1
0
 protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission) throws IOException {
   if (permission == null) {
     return p2f.mkdir();
   } else {
     if (Shell.WINDOWS && NativeIO.isAvailable()) {
       try {
         NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
         return true;
       } catch (IOException e) {
         if (LOG.isDebugEnabled()) {
           LOG.debug(
               String.format(
                   "NativeIO.createDirectoryWithMode error, path = %s, mode = %o",
                   p2f, permission.toShort()),
               e);
         }
         return false;
       }
     } else {
       boolean b = p2f.mkdir();
       if (b) {
         setPermission(p, permission);
       }
       return b;
     }
   }
 }
コード例 #2
0
  @Test
  public void testRemoveAclExceedsQuota() throws Exception {
    Path filePath = new Path(path, "file1");
    Path fileSnapshotPath = new Path(snapshotPath, "file1");
    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755));
    hdfs.allowSnapshot(path);
    hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
    FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short) 0600)).close();
    hdfs.setPermission(filePath, FsPermission.createImmutable((short) 0600));
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "bruce", READ_WRITE));
    hdfs.modifyAclEntries(filePath, aclSpec);

    hdfs.createSnapshot(path, snapshotName);

    AclStatus s = hdfs.getAclStatus(filePath);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(
        new AclEntry[] {aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE)},
        returned);
    assertPermission((short) 010660, filePath);

    s = hdfs.getAclStatus(fileSnapshotPath);
    returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(
        new AclEntry[] {aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE)},
        returned);
    assertPermission((short) 010660, filePath);

    aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "bruce", READ));
    hdfs.removeAcl(filePath);
  }
コード例 #3
0
  /**
   * Tests backward compatibility. Configuration can be either set with old param dfs.umask that
   * takes decimal umasks or dfs.umaskmode that takes symbolic or octal umask.
   */
  public void testBackwardCompatibility() {
    // Test 1 - old configuration key with decimal
    // umask value should be handled when set using
    // FSPermission.setUMask() API
    FsPermission perm = new FsPermission((short) 18);
    Configuration conf = new Configuration();
    FsPermission.setUMask(conf, perm);
    assertEquals(18, FsPermission.getUMask(conf).toShort());

    // Test 2 - old configuration key set with decimal
    // umask value should be handled
    perm = new FsPermission((short) 18);
    conf = new Configuration();
    conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18");
    assertEquals(18, FsPermission.getUMask(conf).toShort());

    // Test 3 - old configuration key overrides the new one
    conf = new Configuration();
    conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18");
    conf.set(FsPermission.UMASK_LABEL, "000");
    assertEquals(18, FsPermission.getUMask(conf).toShort());

    // Test 4 - new configuration key is handled
    conf = new Configuration();
    conf.set(FsPermission.UMASK_LABEL, "022");
    assertEquals(18, FsPermission.getUMask(conf).toShort());
  }
コード例 #4
0
ファイル: FileUtil.java プロジェクト: luochen01/tongji_thesis
 private static void execSetPermission(File f, FsPermission permission) throws IOException {
   if (NativeIO.isAvailable()) {
     NativeIO.chmod(f.getCanonicalPath(), permission.toShort());
   } else {
     execCommand(f, Shell.SET_PERMISSION_COMMAND, String.format("%04o", permission.toShort()));
   }
 }
コード例 #5
0
 private void makeDoneSubdir(Path path) throws IOException {
   try {
     doneDirFc.getFileStatus(path);
     existingDoneSubdirs.add(path);
   } catch (FileNotFoundException fnfE) {
     try {
       FsPermission fsp = new FsPermission(JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION);
       doneDirFc.mkdir(path, fsp, true);
       FileStatus fsStatus = doneDirFc.getFileStatus(path);
       /* LOG.info("Perms after creating "+fsStatus.getPermission().toShort()+", Expected: "+fsp.toShort()) */
       LOG.perms_after_creating_expected(
               String.valueOf(fsStatus.getPermission().toShort()), String.valueOf(fsp.toShort()))
           .tag("methodCall")
           .info();
       if (fsStatus.getPermission().toShort() != fsp.toShort()) {
         /* LOG.info("Explicitly setting permissions to : "+fsp.toShort()+", "+fsp) */
         LOG.explicitly_setting_permissions(String.valueOf(fsp.toShort()), fsp.toString())
             .tag("methodCall")
             .info();
         doneDirFc.setPermission(path, fsp);
       }
       existingDoneSubdirs.add(path);
     } catch (FileAlreadyExistsException faeE) { // Nothing to do.
     }
   }
 }
コード例 #6
0
  @Test
  public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
    Path testDir = qualifiedPath("test/hadoop", fc2);
    Assert.assertFalse(exists(fc2, testDir));
    fc2.mkdir(testDir, FsPermission.getDefault(), true);
    Assert.assertTrue(exists(fc2, testDir));

    // Create file on fc1 using fc2 context
    createFile(fc1, qualifiedPath("test/hadoop/file", fc2));

    Path testSubDir = qualifiedPath("test/hadoop/file/subdir", fc2);
    try {
      fc1.mkdir(testSubDir, FsPermission.getDefault(), true);
      Assert.fail("Should throw IOException.");
    } catch (IOException e) {
      // expected
    }
    Assert.assertFalse(exists(fc1, testSubDir));

    Path testDeepSubDir = qualifiedPath("test/hadoop/file/deep/sub/dir", fc1);
    try {
      fc2.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
      Assert.fail("Should throw IOException.");
    } catch (IOException e) {
      // expected
    }
    Assert.assertFalse(exists(fc1, testDeepSubDir));
  }
コード例 #7
0
ファイル: JsonUtil.java プロジェクト: aixuebo/had2.6.0
  /** Convert a HdfsFileStatus object to a Json string. */
  public static String toJsonString(final HdfsFileStatus status, boolean includeType) {
    if (status == null) {
      return null;
    }
    final Map<String, Object> m = new TreeMap<String, Object>();
    m.put("pathSuffix", status.getLocalName());
    m.put("type", PathType.valueOf(status));
    if (status.isSymlink()) {
      m.put("symlink", status.getSymlink());
    }

    m.put("length", status.getLen());
    m.put("owner", status.getOwner());
    m.put("group", status.getGroup());
    FsPermission perm = status.getPermission();
    m.put("permission", toString(perm));
    if (perm.getAclBit()) {
      m.put("aclBit", true);
    }
    if (perm.getEncryptedBit()) {
      m.put("encBit", true);
    }
    m.put("accessTime", status.getAccessTime());
    m.put("modificationTime", status.getModificationTime());
    m.put("blockSize", status.getBlockSize());
    m.put("replication", status.getReplication());
    m.put("fileId", status.getFileId());
    m.put("childrenNum", status.getChildrenNum());
    m.put("storagePolicy", status.getStoragePolicy());
    return includeType ? toJsonString(FileStatus.class, m) : JSON.toString(m);
  }
コード例 #8
0
  @Test
  public void testDeleteMissing() {
    TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
    JobContext jobContext =
        new JobContextImpl(
            taskAttemptContext.getConfiguration(),
            taskAttemptContext.getTaskAttemptID().getJobID());
    Configuration conf = jobContext.getConfiguration();

    String sourceBase;
    String targetBase;
    FileSystem fs = null;
    try {
      OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
      fs = FileSystem.get(conf);
      sourceBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
      targetBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
      String targetBaseAdd = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault());
      fs.rename(new Path(targetBaseAdd), new Path(targetBase));

      DistCpOptions options =
          new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out"));
      options.setSyncFolder(true);
      options.setDeleteMissing(true);
      options.appendToConf(conf);

      CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
      Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
      listing.buildListing(listingFile, options);

      conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);
      conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase);

      committer.commitJob(jobContext);
      if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
        Assert.fail("Source and target folders are not in sync");
      }
      if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) {
        Assert.fail("Source and target folders are not in sync");
      }

      // Test for idempotent commit
      committer.commitJob(jobContext);
      if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) {
        Assert.fail("Source and target folders are not in sync");
      }
      if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) {
        Assert.fail("Source and target folders are not in sync");
      }
    } catch (Throwable e) {
      LOG.error("Exception encountered while testing for delete missing", e);
      Assert.fail("Delete missing failure");
    } finally {
      TestDistCpUtils.delete(fs, "/tmp1");
      conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false");
    }
  }
コード例 #9
0
ファイル: Util.java プロジェクト: roshanp/storm-yarn
 /**
  * Checks for a given path whether the Other permissions on it imply the permission in the passed
  * FsAction
  *
  * @param fs
  * @param path
  * @param action
  * @return true if the path in the uri is visible to all, false otherwise
  * @throws IOException
  */
 private static boolean checkPermissionOfOther(FileSystem fs, Path path, FsAction action)
     throws IOException {
   FileStatus status = fs.getFileStatus(path);
   FsPermission perms = status.getPermission();
   FsAction otherAction = perms.getOtherAction();
   if (otherAction.implies(action)) {
     return true;
   }
   return false;
 }
コード例 #10
0
 /** Use the command chmod to set permission. */
 @Override
 public void setPermission(Path p, FsPermission permission) throws IOException {
   if (NativeIO.isAvailable()) {
     NativeIO.POSIX.chmod(pathToFile(p).getCanonicalPath(), permission.toShort());
   } else {
     String perm = String.format("%04o", permission.toShort());
     Shell.execCommand(
         Shell.getSetPermissionCommand(perm, false, FileUtil.makeShellPath(pathToFile(p), true)));
   }
 }
コード例 #11
0
  /**
   * Convert IGFS file attributes into Hadoop permission.
   *
   * @param file File info.
   * @return Hadoop permission.
   */
  private FsPermission permission(IgfsFile file) {
    String perm = file.property(IgfsUtils.PROP_PERMISSION, null);

    if (perm == null) return FsPermission.getDefault();

    try {
      return new FsPermission((short) Integer.parseInt(perm, 8));
    } catch (NumberFormatException ignore) {
      return FsPermission.getDefault();
    }
  }
コード例 #12
0
 /**
  * Changes permission of a path.
  *
  * @param path path to set permission
  * @param permission permission set to path
  * @throws IOException if the path failed to be changed permission
  */
 @Override
 public void setPermission(Path path, FsPermission permission) throws IOException {
   LOG.info("setMode({},{})", path, permission.toString());
   AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
   SetAttributeOptions options =
       SetAttributeOptions.defaults().setMode(new Mode(permission.toShort())).setRecursive(false);
   try {
     mFileSystem.setAttribute(uri, options);
   } catch (AlluxioException e) {
     throw new IOException(e);
   }
 }
コード例 #13
0
  @Test
  public void testDeleteDirectory() throws IOException {
    String dirName = "dirTest";
    Path testDirPath = qualifiedPath(dirName, fc2);
    // Ensure directory does not exist
    Assert.assertFalse(exists(fc2, testDirPath));

    // Create a directory on fc2's file system using fc1
    fc1.mkdir(testDirPath, FsPermission.getDefault(), true);

    // Ensure dir is created
    Assert.assertTrue(exists(fc2, testDirPath));
    Assert.assertTrue(isDir(fc2, testDirPath));

    fc2.delete(testDirPath, true);

    // Ensure that directory is deleted
    Assert.assertFalse(isDir(fc2, testDirPath));

    // TestCase - Create and delete multiple directories
    String dirNames[] = {
      "deleteTest/testDir",
      "deleteTest/test Dir",
      "deleteTest/test*Dir",
      "deleteTest/test#Dir",
      "deleteTest/test1234",
      "deleteTest/1234Test",
      "deleteTest/test)Dir",
      "deleteTest/test_DIr",
      "deleteTest/()&^%$#@!~_+}{><?",
      "  ",
      "^ "
    };

    for (String f : dirNames) {
      // Create a file on fc2's file system using fc1
      Path testPath = qualifiedPath(f, fc2);
      // Ensure file does not exist
      Assert.assertFalse(exists(fc2, testPath));

      // Now create directory
      fc1.mkdir(testPath, FsPermission.getDefault(), true);
      // Ensure fc2 has the created directory
      Assert.assertTrue(exists(fc2, testPath));
      Assert.assertTrue(isDir(fc2, testPath));
      // Delete dir
      Assert.assertTrue(fc2.delete(testPath, true));
      // verify if directory is deleted
      Assert.assertFalse(exists(fc2, testPath));
      Assert.assertFalse(isDir(fc2, testPath));
    }
  }
コード例 #14
0
    void generateInputs(int[] ignore) throws IOException {
      int nrDatanodes = getNumDatanodes();
      int nrBlocks = (int) Math.ceil((double) blocksPerReport * nrDatanodes / replication);
      int nrFiles = (int) Math.ceil((double) nrBlocks / blocksPerFile);
      datanodes = new TinyDatanode[nrDatanodes];
      // create data-nodes
      String prevDNName = "";
      for (int idx = 0; idx < nrDatanodes; idx++) {
        datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
        datanodes[idx].register();
        assert datanodes[idx].getName().compareTo(prevDNName) > 0
            : "Data-nodes must be sorted lexicographically.";
        datanodes[idx].sendHeartbeat();
        prevDNName = datanodes[idx].getName();
      }

      // create files
      LOG.info("Creating " + nrFiles + " with " + blocksPerFile + " blocks each.");
      FileNameGenerator nameGenerator;
      nameGenerator = new FileNameGenerator(getBaseDir(), 100);
      String clientName = getClientName(007);
      nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
      for (int idx = 0; idx < nrFiles; idx++) {
        String fileName = nameGenerator.getNextFileName("ThroughputBench");
        nameNode.create(
            fileName, FsPermission.getDefault(), clientName, true, replication, BLOCK_SIZE);
        addBlocks(fileName, clientName);
        nameNode.complete(fileName, clientName);
      }
      // prepare block reports
      for (int idx = 0; idx < nrDatanodes; idx++) {
        datanodes[idx].formBlockReport();
      }
    }
コード例 #15
0
 @Test
 public void testRemoveAclSnapshotPath() throws Exception {
   FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
   SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
   exception.expect(SnapshotAccessControlException.class);
   hdfs.removeAcl(snapshotPath);
 }
コード例 #16
0
  @Test
  public void testModifyReadsCurrentState() throws Exception {
    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));

    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);

    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "bruce", ALL));
    hdfs.modifyAclEntries(path, aclSpec);

    aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "diana", READ_EXECUTE));
    hdfs.modifyAclEntries(path, aclSpec);

    AclEntry[] expected =
        new AclEntry[] {
          aclEntry(ACCESS, USER, "bruce", ALL),
          aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
          aclEntry(ACCESS, GROUP, NONE)
        };
    AclStatus s = hdfs.getAclStatus(path);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(expected, returned);
    assertPermission((short) 010770, path);
    assertDirPermissionGranted(fsAsBruce, BRUCE, path);
    assertDirPermissionGranted(fsAsDiana, DIANA, path);
  }
コード例 #17
0
 @Before
 public void setUp() {
   PermissionStatus permStatus =
       PermissionStatus.createImmutable(
           SUPERUSER, SUPERGROUP, FsPermission.createImmutable((short) 0755));
   inodeRoot = new INodeDirectory(INodeId.ROOT_INODE_ID, INodeDirectory.ROOT_NAME, permStatus, 0L);
 }
コード例 #18
0
  @Test
  public void testDeleteNonExistingDirectory() throws IOException {
    String testDirName = "testFile";
    Path testPath = qualifiedPath(testDirName, fc2);

    // TestCase1 : Test delete on directory never existed
    // Ensure directory does not exist
    Assert.assertFalse(exists(fc2, testPath));

    // Delete on non existing directory should return false
    Assert.assertFalse(fc2.delete(testPath, false));

    // TestCase2 : Create dir, Delete dir, Delete dir
    // Create a file on fc2's file system using fc1

    fc1.mkdir(testPath, FsPermission.getDefault(), true);
    // Ensure dir exist
    Assert.assertTrue(exists(fc2, testPath));

    // Delete test file, deleting existing file should return true
    Assert.assertTrue(fc2.delete(testPath, false));
    // Ensure file does not exist
    Assert.assertFalse(exists(fc2, testPath));
    // Delete on non existing file should return false
    Assert.assertFalse(fc2.delete(testPath, false));
  }
コード例 #19
0
 /** Serialize a {@link PermissionStatus} from its base components. */
 public static void write(
     DataOutput out, String username, String groupname, FsPermission permission)
     throws IOException {
   Text.writeString(out, username);
   Text.writeString(out, groupname);
   permission.write(out);
 }
コード例 #20
0
  protected final void makeTestFile(Path path, long length, boolean isLazyPersist)
      throws IOException {

    EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE);

    if (isLazyPersist) {
      createFlags.add(LAZY_PERSIST);
    }

    FSDataOutputStream fos = null;
    try {
      fos =
          fs.create(
              path,
              FsPermission.getFileDefault(),
              createFlags,
              BUFFER_LENGTH,
              REPL_FACTOR,
              BLOCK_SIZE,
              null);

      // Allocate a block.
      byte[] buffer = new byte[BUFFER_LENGTH];
      for (int bytesWritten = 0; bytesWritten < length; ) {
        fos.write(buffer, 0, buffer.length);
        bytesWritten += buffer.length;
      }
      if (length > 0) {
        fos.hsync();
      }
    } finally {
      IOUtils.closeQuietly(fos);
    }
  }
コード例 #21
0
ファイル: HdfsUtils.java プロジェクト: YzPaul3/ankus
 /**
  * 지정한 경로를 생성한다.
  *
  * @param fs FileSystem
  * @param path 생성할 경로
  * @return 정상적으로 생성한 경우 <tt>true</tt>
  */
 public static boolean mkdir(FileSystem fs, String path) {
   try {
     return FileSystem.mkdirs(fs, new Path(path), FsPermission.getDefault());
   } catch (Exception ex) {
     throw new FileSystemException(ExceptionUtils.getMessage("Cannot create '{}'", path), ex);
   }
 }
コード例 #22
0
  /*
   * Since NameNode will not persist any locations of the block, addBlock()
   * retry call after restart NN should re-select the locations and return to
   * client. refer HDFS-5257
   */
  @Test
  public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
    final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
    // create file
    nameNodeRpc.create(
        src,
        FsPermission.getFileDefault(),
        "clientName",
        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
        true,
        (short) 3,
        1024,
        null);
    // start first addBlock()
    LOG.info("Starting first addBlock for " + src);
    LocatedBlock lb1 =
        nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null);
    assertTrue("Block locations should be present", lb1.getLocations().length > 0);

    cluster.restartNameNode();
    nameNodeRpc = cluster.getNameNodeRpc();
    LocatedBlock lb2 =
        nameNodeRpc.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null);
    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
    assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
  }
コード例 #23
0
 @Test
 public void testSetAclSnapshotPath() throws Exception {
   FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
   SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
   List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "bruce"));
   exception.expect(SnapshotAccessControlException.class);
   hdfs.setAcl(snapshotPath, aclSpec);
 }
コード例 #24
0
ファイル: FTPFileSystem.java プロジェクト: shahidminhas/abc
  /**
   * A stream obtained via this call must be closed before using other APIs of this class or else
   * the invocation will block.
   */
  public FSDataOutputStream create(
      Path file,
      FsPermission permission,
      boolean overwrite,
      int bufferSize,
      short replication,
      long blockSize,
      Progressable progress)
      throws IOException {
    final FTPClient client = connect();
    Path workDir = new Path(client.printWorkingDirectory());
    Path absolute = makeAbsolute(workDir, file);
    if (exists(client, file)) {
      if (overwrite) {
        delete(client, file);
      } else {
        disconnect(client);
        throw new IOException("File already exists: " + file);
      }
    }

    Path parent = absolute.getParent();
    if (parent == null || !mkdirs(client, parent, FsPermission.getDefault())) {
      parent = (parent == null) ? new Path("/") : parent;
      disconnect(client);
      throw new IOException("create(): Mkdirs failed to create: " + parent);
    }
    client.allocate(bufferSize);
    // Change to parent directory on the server. Only then can we write to the
    // file on the server by opening up an OutputStream. As a side effect the
    // working directory on the server is changed to the parent directory of the
    // file. The FTP client connection is closed when close() is called on the
    // FSDataOutputStream.
    client.changeWorkingDirectory(parent.toUri().getPath());
    FSDataOutputStream fos =
        new FSDataOutputStream(client.storeFileStream(file.getName()), statistics) {

          public void close() throws IOException {
            super.close();
            if (!client.isConnected()) {
              throw new FTPException("Client not connected");
            }
            boolean cmdCompleted = client.completePendingCommand();
            disconnect(client);
            if (!cmdCompleted) {
              throw new FTPException(
                  "Could not complete transfer, Reply Code - " + client.getReplyCode());
            }
          }
        };
    if (!FTPReply.isPositivePreliminary(client.getReplyCode())) {
      // The ftpClient is an inconsistent state. Must close the stream
      // which in turn will logout and disconnect from FTP server
      fos.close();
      throw new IOException("Unable to create file: " + file + ", Aborting");
    }
    return fos;
  }
コード例 #25
0
 private void mkdir(FileSystem fs, Path path, FsPermission fsp) throws IOException {
   if (!fs.exists(path)) {
     try {
       fs.mkdirs(path, fsp);
       FileStatus fsStatus = fs.getFileStatus(path);
       LOG.info(
           "Perms after creating "
               + fsStatus.getPermission().toShort()
               + ", Expected: "
               + fsp.toShort());
       if (fsStatus.getPermission().toShort() != fsp.toShort()) {
         LOG.info("Explicitly setting permissions to : " + fsp.toShort() + ", " + fsp);
         fs.setPermission(path, fsp);
       }
     } catch (FileAlreadyExistsException e) {
       LOG.info("Directory: [" + path + "] already exists.");
     }
   }
 }
コード例 #26
0
 private static INodeDirectory createINodeDirectory(
     INodeDirectory parent, String name, String owner, String group, short perm)
     throws IOException {
   PermissionStatus permStatus =
       PermissionStatus.createImmutable(owner, group, FsPermission.createImmutable(perm));
   INodeDirectory inodeDirectory =
       new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L);
   parent.addChild(inodeDirectory);
   return inodeDirectory;
 }
コード例 #27
0
ファイル: FileUtil.java プロジェクト: luochen01/tongji_thesis
 private static void checkReturnValue(boolean rv, File p, FsPermission permission)
     throws IOException {
   if (!rv) {
     throw new IOException(
         "Failed to set permissions of path: "
             + p
             + " to "
             + String.format("%04o", permission.toShort()));
   }
 }
コード例 #28
0
 public static boolean chmod(Path path, String chmodStr, boolean recursive) {
   boolean result = false;
   try {
     fs.setPermission(path, FsPermission.valueOf(chmodStr));
     if (recursive == true) {
       FileStatus stats[] = fs.listStatus(path);
       for (FileStatus stat : stats) {
         Path subPath = stat.getPath();
         fs.setPermission(subPath, FsPermission.valueOf(chmodStr));
         if (fs.isDirectory(subPath)) {
           chmod(subPath, chmodStr, recursive);
         }
       }
     }
   } catch (Exception e) {
     LOG.error("修改文件权限出错: " + e.getMessage());
     e.printStackTrace();
   }
   return result;
 }
コード例 #29
0
  /**
   * Test for {@link TFS#setPermission(Path, org.apache.hadoop.fs.permission.FsPermission)}. It will
   * test changing the permission of file using TFS.
   */
  @Test
  public void chmodTest() throws Exception {
    Path fileA = new Path("/chmodfileA");

    create(sTFS, fileA);
    FileStatus fs = sTFS.getFileStatus(fileA);
    // Default permission should be 0644
    Assert.assertEquals((short) 0644, fs.getPermission().toShort());

    sTFS.setPermission(fileA, FsPermission.createImmutable((short) 0755));
    Assert.assertEquals((short) 0755, sTFS.getFileStatus(fileA).getPermission().toShort());
  }
コード例 #30
0
 /**
  * Create the root scratch dir on hdfs (if it doesn't already exist) and make it writable
  *
  * @param conf
  * @return
  * @throws IOException
  */
 private Path createRootHDFSDir(HiveConf conf) throws IOException {
   Path rootHDFSDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR));
   FsPermission writableHDFSDirPermission = new FsPermission((short) 00733);
   FileSystem fs = rootHDFSDirPath.getFileSystem(conf);
   if (!fs.exists(rootHDFSDirPath)) {
     Utilities.createDirsWithPermission(conf, rootHDFSDirPath, writableHDFSDirPermission, true);
   }
   FsPermission currentHDFSDirPermission = fs.getFileStatus(rootHDFSDirPath).getPermission();
   LOG.debug(
       "HDFS root scratch dir: " + rootHDFSDirPath + ", permission: " + currentHDFSDirPermission);
   // If the root HDFS scratch dir already exists, make sure it is writeable.
   if (!((currentHDFSDirPermission.toShort() & writableHDFSDirPermission.toShort())
       == writableHDFSDirPermission.toShort())) {
     throw new RuntimeException(
         "The root scratch dir: "
             + rootHDFSDirPath
             + " on HDFS should be writable. Current permissions are: "
             + currentHDFSDirPermission);
   }
   return rootHDFSDirPath;
 }