private void testMovingFiles(boolean useAcl) throws Exception {
    // Create a tmp directory with wide-open permissions and sticky bit
    Path tmpPath = new Path("/tmp");
    Path tmpPath2 = new Path("/tmp2");
    hdfs.mkdirs(tmpPath);
    hdfs.mkdirs(tmpPath2);
    hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
    if (useAcl) {
      applyAcl(tmpPath);
    }
    hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
    if (useAcl) {
      applyAcl(tmpPath2);
    }

    // Write a file to the new tmp directory as a regular user
    Path file = new Path(tmpPath, "foo");

    writeFile(hdfsAsUser1, file);

    // Log onto cluster as another user and attempt to move the file
    try {
      hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
      fail("Shouldn't be able to rename someone else's file with SB on");
    } catch (IOException ioe) {
      assertTrue(ioe instanceof AccessControlException);
      assertTrue(ioe.getMessage().contains("sticky bit"));
    }
  }
  @org.junit.Test
  public void testPermissions() throws Exception {

    Path myFile = new Path("filePerm.txt");
    fs.create(myFile);
    short perm = 0777;
    fs.setPermission(myFile, new FsPermission(perm));
    assertEquals(fs.getFileStatus(myFile).getPermission().toShort(), perm);

    perm = 0700;
    fs.setPermission(myFile, new FsPermission(perm));
    assertEquals(fs.getFileStatus(myFile).getPermission().toShort(), perm);

    fs.delete(myFile);
    assertFalse(fs.exists(myFile));

    /* directory permissions */
    Path directory = new Path("aa/bb/cc");
    perm = 0700;
    fs.mkdirs(directory, new FsPermission(perm));
    assertEquals(fs.getFileStatus(directory).getPermission().toShort(), perm);
    fs.delete(new Path("aa"), true);
    assertFalse(fs.exists(directory));

    perm = 0777;
    fs.mkdirs(directory, new FsPermission(perm));
    assertEquals(fs.getFileStatus(directory).getPermission().toShort(), perm);
    fs.delete(new Path("aa"), true);
    assertFalse(fs.exists(directory));
  }
  @Test
  public void testGeneralSBBehavior() throws Exception {
    Path baseDir = new Path("/mcgann");
    hdfs.mkdirs(baseDir);

    // Create a tmp directory with wide-open permissions and sticky bit
    Path p = new Path(baseDir, "tmp");

    hdfs.mkdirs(p);
    hdfs.setPermission(p, new FsPermission((short) 01777));

    confirmCanAppend(conf, p);

    baseDir = new Path("/eccleston");
    hdfs.mkdirs(baseDir);
    p = new Path(baseDir, "roguetraders");

    hdfs.mkdirs(p);
    confirmSettingAndGetting(hdfs, p, baseDir);

    baseDir = new Path("/tennant");
    hdfs.mkdirs(baseDir);
    p = new Path(baseDir, "contemporary");
    hdfs.mkdirs(p);
    hdfs.setPermission(p, new FsPermission((short) 01777));
    confirmDeletingFiles(conf, p);

    baseDir = new Path("/smith");
    hdfs.mkdirs(baseDir);
    p = new Path(baseDir, "scissorsisters");

    // Turn on its sticky bit
    hdfs.mkdirs(p, new FsPermission((short) 01666));
    confirmStickyBitDoesntPropagate(hdfs, baseDir);
  }
  @Test
  public void testAclStickyBitPersistence() throws Exception {
    // A tale of three directories...
    Path sbSet = new Path("/Housemartins");
    Path sbNotSpecified = new Path("/INXS");
    Path sbSetOff = new Path("/Easyworld");

    for (Path p : new Path[] {sbSet, sbNotSpecified, sbSetOff}) hdfs.mkdirs(p);

    // Two directories had there sticky bits set explicitly...
    hdfs.setPermission(sbSet, new FsPermission((short) 01777));
    applyAcl(sbSet);
    hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
    applyAcl(sbSetOff);

    shutdown();

    // Start file system up again
    initCluster(false);

    assertTrue(hdfs.exists(sbSet));
    assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());

    assertTrue(hdfs.exists(sbNotSpecified));
    assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission().getStickyBit());

    assertTrue(hdfs.exists(sbSetOff));
    assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
  }
예제 #5
0
  /**
   * Copied from FileUtil to transfer ownership
   *
   * @param srcFS
   * @param srcStatus
   * @param dstFS
   * @param dst
   * @param deleteSource
   * @param overwrite
   * @param conf
   * @return
   * @throws IOException
   */
  public static boolean copy(
      FileSystem srcFS,
      FileStatus srcStatus,
      FileSystem dstFS,
      Path dst,
      boolean deleteSource,
      boolean overwrite,
      Configuration conf)
      throws IOException {
    Path src = srcStatus.getPath();
    // dst = checkDest(src.getName(), dstFS, dst, overwrite);
    if (srcStatus.isDirectory()) {
      // checkDependencies(srcFS, src, dstFS, dst);
      if (!mkdirs(dstFS, dst)) {
        return false;
      }

      FileStatus[] contents = srcFS.listStatus(src);
      for (int i = 0; i < contents.length; i++) {
        copy(
            srcFS,
            contents[i],
            dstFS,
            new Path(dst, contents[i].getPath().getName()),
            deleteSource,
            overwrite,
            conf);
      }
    } else {
      try (InputStream in = srcFS.open(src);
          OutputStream out = dstFS.create(dst, overwrite)) {
        org.apache.hadoop.io.IOUtils.copyBytes(in, out, conf, true);
      }
    }

    // TODO: change group and limit write to group
    if (srcStatus.isDirectory()) {
      dstFS.setPermission(dst, new FsPermission((short) 0777));
    } else {
      dstFS.setPermission(dst, new FsPermission((short) 0777) /*"ugo+w"*/);
    }
    // dstFS.setOwner(dst, null, srcStatus.getGroup());

    /*
        try {
          // transfer owner
          // DOES NOT WORK only super user can change file owner
          dstFS.setOwner(dst, srcStatus.getOwner(), srcStatus.getGroup());
        } catch (IOException e) {
          LOG.warn("Failed to change owner on {} to {}", dst, srcStatus.getOwner(), e);
          throw e;
        }
    */
    if (deleteSource) {
      return srcFS.delete(src, true);
    } else {
      return true;
    }
  }
예제 #6
0
 public static void setPermission(FileSystem fs, Path dst, FsPermission permission)
     throws IOException {
   FileStatus[] contents = fs.listStatus(dst);
   for (int i = 0; i < contents.length; i++) {
     fs.setPermission(contents[i].getPath(), permission);
   }
   fs.setPermission(dst, permission);
 }
예제 #7
0
    @Override
    public String prepareBulkLoad(final byte[] family, final String srcPath) throws IOException {
      Path p = new Path(srcPath);
      Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName()));
      if (srcFs == null) {
        srcFs = FileSystem.get(p.toUri(), conf);
      }

      if (!isFile(p)) {
        throw new IOException("Path does not reference a file: " + p);
      }

      // Check to see if the source and target filesystems are the same
      if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) {
        LOG.debug(
            "Bulk-load file "
                + srcPath
                + " is on different filesystem than "
                + "the destination filesystem. Copying file over to destination staging dir.");
        FileUtil.copy(srcFs, p, fs, stageP, false, conf);
      } else {
        LOG.debug("Moving " + p + " to " + stageP);
        FileStatus origFileStatus = fs.getFileStatus(p);
        origPermissions.put(srcPath, origFileStatus.getPermission());
        if (!fs.rename(p, stageP)) {
          throw new IOException("Failed to move HFile: " + p + " to " + stageP);
        }
      }
      fs.setPermission(stageP, PERM_ALL_ACCESS);
      return stageP.toString();
    }
예제 #8
0
  @Override
  public void start(CoprocessorEnvironment env) {
    this.env = (RegionCoprocessorEnvironment) env;
    random = new SecureRandom();
    conf = env.getConfiguration();
    baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf);
    this.userProvider = UserProvider.instantiate(conf);

    try {
      fs = FileSystem.get(conf);
      fs.mkdirs(baseStagingDir, PERM_HIDDEN);
      fs.setPermission(baseStagingDir, PERM_HIDDEN);
      // no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased
      fs.mkdirs(new Path(baseStagingDir, "DONOTERASE"), PERM_HIDDEN);
      FileStatus status = fs.getFileStatus(baseStagingDir);
      if (status == null) {
        throw new IllegalStateException("Failed to create staging directory");
      }
      if (!status.getPermission().equals(PERM_HIDDEN)) {
        throw new IllegalStateException(
            "Directory already exists but permissions aren't set to '-rwx--x--x' ");
      }
    } catch (IOException e) {
      throw new IllegalStateException("Failed to get FileSystem instance", e);
    }
  }
예제 #9
0
    @Override
    public void failedBulkLoad(final byte[] family, final String srcPath) throws IOException {
      if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) {
        // files are copied so no need to move them back
        return;
      }
      Path p = new Path(srcPath);
      Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName()));

      // In case of Replication for bulk load files, hfiles are not renamed by end point during
      // prepare stage, so no need of rename here again
      if (p.equals(stageP)) {
        LOG.debug(p.getName() + " is already available in source directory. Skipping rename.");
        return;
      }

      LOG.debug("Moving " + stageP + " back to " + p);
      if (!fs.rename(stageP, p))
        throw new IOException("Failed to move HFile: " + stageP + " to " + p);

      // restore original permission
      if (origPermissions.containsKey(srcPath)) {
        fs.setPermission(p, origPermissions.get(srcPath));
      } else {
        LOG.warn("Can't find previous permission for path=" + srcPath);
      }
    }
예제 #10
0
 static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi) throws IOException {
   final Path home = new Path("/user/" + ugi.getUserName());
   fs.mkdirs(home);
   fs.setOwner(home, ugi.getUserName(), ugi.getGroupNames()[0]);
   fs.setPermission(home, new FsPermission((short) 0700));
   return home;
 }
예제 #11
0
  /**
   * Create log directory for the given attempt. This involves creating the following and setting
   * proper permissions for the new directories <br>
   * {hadoop.log.dir}/userlogs/<jobid> <br>
   * {hadoop.log.dir}/userlogs/<jobid>/<attempt-id-as-symlink> <br>
   * {one of the mapred-local-dirs}/userlogs/<jobid> <br>
   * {one of the mapred-local-dirs}/userlogs/<jobid>/<attempt-id>
   *
   * @param taskID attempt-id for which log dir is to be created
   * @param isCleanup Is this attempt a cleanup attempt ?
   * @param localDirs mapred local directories
   * @throws IOException
   */
  public static void createTaskAttemptLogDir(
      TaskAttemptID taskID, boolean isCleanup, String[] localDirs) throws IOException {
    String cleanupSuffix = isCleanup ? ".cleanup" : "";
    String strAttemptLogDir = getTaskAttemptLogDir(taskID, cleanupSuffix, localDirs);
    File attemptLogDir = new File(strAttemptLogDir);
    if (!attemptLogDir.mkdirs()) {
      throw new IOException("Creation of " + attemptLogDir + " failed.");
    }
    String strLinkAttemptLogDir =
        getJobDir(taskID.getJobID()).getAbsolutePath()
            + File.separatorChar
            + taskID.toString()
            + cleanupSuffix;
    if (FileUtil.symLink(strAttemptLogDir, strLinkAttemptLogDir) != 0) {
      throw new IOException(
          "Creation of symlink from "
              + strLinkAttemptLogDir
              + " to "
              + strAttemptLogDir
              + " failed.");
    }

    FileSystem localFs = FileSystem.getLocal(new Configuration());
    localFs.setPermission(new Path(attemptLogDir.getPath()), new FsPermission((short) 0700));
  }
예제 #12
0
 public void recoverFileLease(
     final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter)
     throws IOException {
   LOG.info("Recovering file " + p.toString() + " by changing permission to readonly");
   FsPermission roPerm = new FsPermission((short) 0444);
   fs.setPermission(p, roPerm);
 }
예제 #13
0
 /** Create a directory. */
 static boolean createNonexistingDirectory(FileSystem fs, Path dir) throws IOException {
   if (fs.exists(dir)) {
     Util.err.println("dir (= " + dir + ") already exists.");
     return false;
   } else if (!fs.mkdirs(dir)) {
     throw new IOException("Cannot create working directory " + dir);
   }
   fs.setPermission(dir, new FsPermission((short) 0777));
   return true;
 }
예제 #14
0
 public static boolean chmod(Path path, String chmodStr, boolean recursive) {
   boolean result = false;
   try {
     fs.setPermission(path, FsPermission.valueOf(chmodStr));
     if (recursive == true) {
       FileStatus stats[] = fs.listStatus(path);
       for (FileStatus stat : stats) {
         Path subPath = stat.getPath();
         fs.setPermission(subPath, FsPermission.valueOf(chmodStr));
         if (fs.isDirectory(subPath)) {
           chmod(subPath, chmodStr, recursive);
         }
       }
     }
   } catch (Exception e) {
     LOG.error("修改文件权限出错: " + e.getMessage());
     e.printStackTrace();
   }
   return result;
 }
예제 #15
0
  /**
   * Test for {@link TFS#setPermission(Path, org.apache.hadoop.fs.permission.FsPermission)}. It will
   * test changing the permission of file using TFS.
   */
  @Test
  public void chmodTest() throws Exception {
    Path fileA = new Path("/chmodfileA");

    create(sTFS, fileA);
    FileStatus fs = sTFS.getFileStatus(fileA);
    // Default permission should be 0644
    Assert.assertEquals((short) 0644, fs.getPermission().toShort());

    sTFS.setPermission(fileA, FsPermission.createImmutable((short) 0755));
    Assert.assertEquals((short) 0755, sTFS.getFileStatus(fileA).getPermission().toShort());
  }
예제 #16
0
  int start(
      Configuration conf, String traceIn, Path ioPath, long genbytes, UserResolver userResolver)
      throws IOException, InterruptedException {
    InputStream trace = null;
    try {
      Path scratchDir = new Path(ioPath, conf.get(GRIDMIX_OUT_DIR, "gridmix"));
      final FileSystem scratchFs = scratchDir.getFileSystem(conf);
      scratchFs.mkdirs(scratchDir, new FsPermission((short) 0777));
      scratchFs.setPermission(scratchDir, new FsPermission((short) 0777));
      // add shutdown hook for SIGINT, etc.
      Runtime.getRuntime().addShutdownHook(sdh);
      CountDownLatch startFlag = new CountDownLatch(1);
      try {
        // Create, start job submission threads
        startThreads(conf, traceIn, ioPath, scratchDir, startFlag, userResolver);
        // Write input data if specified
        if (genbytes > 0) {
          writeInputData(genbytes, ioPath);
        }
        // scan input dir contents
        submitter.refreshFilePool();
        factory.start();
        statistics.start();
      } catch (Throwable e) {
        LOG.error("Startup failed", e);
        if (factory != null) factory.abort(); // abort pipeline
      } finally {
        // signal for factory to start; sets start time
        startFlag.countDown();
      }
      if (factory != null) {
        // wait for input exhaustion
        factory.join(Long.MAX_VALUE);
        final Throwable badTraceException = factory.error();
        if (null != badTraceException) {
          LOG.error("Error in trace", badTraceException);
          throw new IOException("Error in trace", badTraceException);
        }
        // wait for pending tasks to be submitted
        submitter.shutdown();
        submitter.join(Long.MAX_VALUE);
        // wait for running tasks to complete
        monitor.shutdown();
        monitor.join(Long.MAX_VALUE);

        statistics.shutdown();
        statistics.join(Long.MAX_VALUE);
      }
    } finally {
      IOUtils.cleanup(LOG, trace);
    }
    return 0;
  }
  /** Test basic ability to get and set sticky bits on files and directories. */
  private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir) throws IOException {
    // Initially sticky bit should not be set
    assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());

    // Same permission, but with sticky bit on
    short withSB;
    withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);

    assertTrue((new FsPermission(withSB)).getStickyBit());

    hdfs.setPermission(p, new FsPermission(withSB));
    assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());

    // Write a file to the fs, try to set its sticky bit
    Path f = new Path(baseDir, "somefile");
    writeFile(hdfs, f);
    assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());

    withSB = (short) (hdfs.getFileStatus(f).getPermission().toShort() | 01000);

    hdfs.setPermission(f, new FsPermission(withSB));

    assertTrue(hdfs.getFileStatus(f).getPermission().getStickyBit());
  }
예제 #18
0
 public static boolean chmod(String filePath, String chmodStr) {
   boolean result = false;
   Path path = new Path(filePath);
   try {
     FileStatus stats[] = fs.listStatus(path);
     for (FileStatus stat : stats) {
       Path subPath = stat.getPath();
       fs.setPermission(path, FsPermission.valueOf(chmodStr));
     }
     result = true;
   } catch (Exception e) {
     LOG.error("修改文件权限出错: " + e.getMessage());
     e.printStackTrace();
   }
   return result;
 }
  // TODO If the FS objects are the same, this should be a rename instead of a
  // copy.
  private void moveToDoneNow(Path fromPath, Path toPath) throws IOException {
    // check if path exists, in case of retries it may not exist
    if (stagingDirFS.exists(fromPath)) {
      LOG.info("Moving " + fromPath.toString() + " to " + toPath.toString());
      // TODO temporarily removing the existing dst
      if (doneDirFS.exists(toPath)) {
        doneDirFS.delete(toPath, true);
      }
      boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath, false, getConfig());

      if (copied) LOG.info("Copied to done location: " + toPath);
      else LOG.info("copy failed");
      doneDirFS.setPermission(
          toPath, new FsPermission(JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS));

      stagingDirFS.delete(fromPath, false);
    }
  }
예제 #20
0
 @Override
 public void setPermission(String path, String posixPerm) throws IOException {
   try {
     FileStatus fileStatus = mFs.getFileStatus(new Path(path));
     LOG.info(
         "Changing file '"
             + fileStatus.getPath()
             + "' permissions from: "
             + fileStatus.getPermission()
             + " to "
             + posixPerm);
     FsPermission perm = new FsPermission(Short.parseShort(posixPerm));
     mFs.setPermission(fileStatus.getPath(), perm);
   } catch (IOException e) {
     LOG.error("Fail to set permission for " + path + " with perm " + posixPerm, e);
     throw e;
   }
 }
  /**
   * Ensure that even if a file is in a directory with the sticky bit on, another user can write to
   * that file (assuming correct permissions).
   */
  private void confirmCanAppend(Configuration conf, Path p) throws Exception {
    // Write a file to the new tmp directory as a regular user
    Path file = new Path(p, "foo");
    writeFile(hdfsAsUser1, file);
    hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));

    // Log onto cluster as another user and attempt to append to file
    Path file2 = new Path(p, "foo");
    FSDataOutputStream h = null;
    try {
      h = hdfsAsUser2.append(file2);
      h.write("Some more data".getBytes());
      h.close();
      h = null;
    } finally {
      IOUtils.cleanup(null, h);
    }
  }
예제 #22
0
    @Override
    public void failedBulkLoad(final byte[] family, final String srcPath) throws IOException {
      if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) {
        // files are copied so no need to move them back
        return;
      }
      Path p = new Path(srcPath);
      Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName()));
      LOG.debug("Moving " + stageP + " back to " + p);
      if (!fs.rename(stageP, p))
        throw new IOException("Failed to move HFile: " + stageP + " to " + p);

      // restore original permission
      if (origPermissions.containsKey(srcPath)) {
        fs.setPermission(p, origPermissions.get(srcPath));
      } else {
        LOG.warn("Can't find previous permission for path=" + srcPath);
      }
    }
예제 #23
0
 private static void updatePermissions(
     FileStatus src, FileStatus dst, EnumSet<FileAttribute> preseved, FileSystem destFileSys)
     throws IOException {
   String owner = null;
   String group = null;
   if (preseved.contains(FileAttribute.USER) && !src.getOwner().equals(dst.getOwner())) {
     owner = src.getOwner();
   }
   if (preseved.contains(FileAttribute.GROUP) && !src.getGroup().equals(dst.getGroup())) {
     group = src.getGroup();
   }
   if (owner != null || group != null) {
     destFileSys.setOwner(dst.getPath(), owner, group);
   }
   if (preseved.contains(FileAttribute.PERMISSION)
       && !src.getPermission().equals(dst.getPermission())) {
     destFileSys.setPermission(dst.getPath(), src.getPermission());
   }
 }
예제 #24
0
  @Override
  public int run(String[] args) throws Exception {
    Options options = new Options();

    try {
      options.addOption(OPTION_INPUT_PATH);
      options.addOption(OPTION_HTABLE_NAME);
      options.addOption(OPTION_CUBE_NAME);
      parseOptions(options, args);

      String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase();
      // e.g
      // /tmp/kylin-3f150b00-3332-41ca-9d3d-652f67f044d7/test_kylin_cube_with_slr_ready_2_segments/hfile/
      // end with "/"
      String input = getOptionValue(OPTION_INPUT_PATH);

      Configuration conf = HBaseConfiguration.create(getConf());
      FileSystem fs = FileSystem.get(conf);

      String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase();
      KylinConfig config = KylinConfig.getInstanceFromEnv();
      CubeManager cubeMgr = CubeManager.getInstance(config);
      CubeInstance cube = cubeMgr.getCube(cubeName);
      CubeDesc cubeDesc = cube.getDescriptor();
      FsPermission permission = new FsPermission((short) 0777);
      for (HBaseColumnFamilyDesc cf : cubeDesc.getHBaseMapping().getColumnFamily()) {
        String cfName = cf.getName();
        fs.setPermission(new Path(input + cfName), permission);
      }

      String[] newArgs = new String[2];
      newArgs[0] = input;
      newArgs[1] = tableName;

      log.debug("Start to run LoadIncrementalHFiles");
      int ret = ToolRunner.run(new LoadIncrementalHFiles(conf), newArgs);
      log.debug("End to run LoadIncrementalHFiles");
      return ret;
    } catch (Exception e) {
      printUsage(options);
      throw e;
    }
  }
예제 #25
0
 /**
  * Initializes the staging directory and returns the path. It also keeps track of all necessary
  * ownership & permissions
  *
  * @param client
  * @param conf
  */
 public static Path getStagingDir(JobClient client, Configuration conf)
     throws IOException, InterruptedException {
   Path stagingArea = client.getStagingAreaDir();
   FileSystem fs = stagingArea.getFileSystem(conf);
   String realUser;
   String currentUser;
   UserGroupInformation ugi = UserGroupInformation.getLoginUser();
   realUser = ugi.getShortUserName();
   currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
   if (fs.exists(stagingArea)) {
     FileStatus fsStatus = fs.getFileStatus(stagingArea);
     String owner = fsStatus.getOwner();
     if (!(owner.equals(currentUser) || owner.equals(realUser))) {
       throw new IOException(
           "The ownership on the staging directory "
               + stagingArea
               + " is not as expected. "
               + "It is owned by "
               + owner
               + ". The directory must "
               + "be owned by the submitter "
               + currentUser
               + " or "
               + "by "
               + realUser);
     }
     if (!fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
       LOG.info(
           "Permissions on staging directory "
               + stagingArea
               + " are "
               + "incorrect: "
               + fsStatus.getPermission()
               + ". Fixing permissions "
               + "to correct value "
               + JOB_DIR_PERMISSION);
       fs.setPermission(stagingArea, JOB_DIR_PERMISSION);
     }
   } else {
     fs.mkdirs(stagingArea, new FsPermission(JOB_DIR_PERMISSION));
   }
   return stagingArea;
 }
 private void mkdir(FileSystem fs, Path path, FsPermission fsp) throws IOException {
   if (!fs.exists(path)) {
     try {
       fs.mkdirs(path, fsp);
       FileStatus fsStatus = fs.getFileStatus(path);
       LOG.info(
           "Perms after creating "
               + fsStatus.getPermission().toShort()
               + ", Expected: "
               + fsp.toShort());
       if (fsStatus.getPermission().toShort() != fsp.toShort()) {
         LOG.info("Explicitly setting permissions to : " + fsp.toShort() + ", " + fsp);
         fs.setPermission(path, fsp);
       }
     } catch (FileAlreadyExistsException e) {
       LOG.info("Directory: [" + path + "] already exists.");
     }
   }
 }
예제 #27
0
 private static FileOutputStream insecureCreateForWrite(File f, int permissions)
     throws IOException {
   // If we can't do real security, do a racy exists check followed by an
   // open and chmod
   if (f.exists()) {
     throw new AlreadyExistsException("File " + f + " already exists");
   }
   FileOutputStream fos = new FileOutputStream(f);
   boolean success = false;
   try {
     rawFilesystem.setPermission(
         new Path(f.getAbsolutePath()), new FsPermission((short) permissions));
     success = true;
     return fos;
   } finally {
     if (!success) {
       fos.close();
     }
   }
 }
예제 #28
0
  private static void createTable(String tableName, String tablePerm) throws Exception {
    Table tbl = new Table();
    tbl.setDbName(DATABASE);
    tbl.setTableName(tableName);
    StorageDescriptor sd = new StorageDescriptor();
    sd.setCols(ColumnHolder.colMapping.get(tableName));
    tbl.setSd(sd);
    sd.setParameters(new HashMap<String, String>());
    sd.setSerdeInfo(new SerDeInfo());
    sd.getSerdeInfo().setName(tbl.getTableName());
    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
    sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
    sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
    sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
    sd.getSerdeInfo()
        .setSerializationLib(org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
    tbl.setPartitionKeys(ColumnHolder.partitionCols);

    hmsc.createTable(tbl);
    FileSystem fs = FileSystem.get(mrConf);
    fs.setPermission(new Path(warehousedir, tableName), new FsPermission(tablePerm));
  }
예제 #29
0
  /**
   * Creates one file, writes a few bytes to it and then closed it. Reopens the same file for
   * appending, write all blocks and then close. Verify that all data exists in file.
   *
   * @throws IOException an exception might be thrown
   */
  public void testSimpleAppend() throws IOException {
    final Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    conf.setInt("dfs.datanode.handler.count", 50);
    conf.setBoolean("dfs.support.append", true);
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    try {
      { // test appending to a file.

        // create a new file.
        Path file1 = new Path("/simpleAppend.dat");
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
        System.out.println("Created file simpleAppend.dat");

        // write to file
        int mid = 186; // io.bytes.per.checksum bytes
        System.out.println("Writing " + mid + " bytes to file " + file1);
        stm.write(fileContents, 0, mid);
        stm.close();
        System.out.println("Wrote and Closed first part of file.");

        // write to file
        int mid2 = 607; // io.bytes.per.checksum bytes
        System.out.println("Writing " + mid + " bytes to file " + file1);
        stm = fs.append(file1);
        stm.write(fileContents, mid, mid2 - mid);
        stm.close();
        System.out.println("Wrote and Closed second part of file.");

        // write the remainder of the file
        stm = fs.append(file1);

        // ensure getPos is set to reflect existing size of the file
        assertTrue(stm.getPos() > 0);

        System.out.println(
            "Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file " + file1);
        stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
        System.out.println("Written second part of file");
        stm.close();
        System.out.println("Wrote and Closed second part of file.");

        // verify that entire file is good
        AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
      }

      { // test appending to an non-existing file.
        FSDataOutputStream out = null;
        try {
          out = fs.append(new Path("/non-existing.dat"));
          fail("Expected to have FileNotFoundException");
        } catch (java.io.FileNotFoundException fnfe) {
          System.out.println("Good: got " + fnfe);
          fnfe.printStackTrace(System.out);
        } finally {
          IOUtils.closeStream(out);
        }
      }

      { // test append permission.

        // set root to all writable
        Path root = new Path("/");
        fs.setPermission(root, new FsPermission((short) 0777));
        fs.close();

        // login as a different user
        final UserGroupInformation superuser = UserGroupInformation.getCurrentUser();
        String username = "******";
        String group = "testappendgroup";
        assertFalse(superuser.getShortUserName().equals(username));
        assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
        UserGroupInformation appenduser =
            UserGroupInformation.createUserForTesting(username, new String[] {group});

        fs = DFSTestUtil.getFileSystemAs(appenduser, conf);

        // create a file
        Path dir = new Path(root, getClass().getSimpleName());
        Path foo = new Path(dir, "foo.dat");
        FSDataOutputStream out = null;
        int offset = 0;
        try {
          out = fs.create(foo);
          int len = 10 + AppendTestUtil.nextInt(100);
          out.write(fileContents, offset, len);
          offset += len;
        } finally {
          IOUtils.closeStream(out);
        }

        // change dir and foo to minimal permissions.
        fs.setPermission(dir, new FsPermission((short) 0100));
        fs.setPermission(foo, new FsPermission((short) 0200));

        // try append, should success
        out = null;
        try {
          out = fs.append(foo);
          int len = 10 + AppendTestUtil.nextInt(100);
          out.write(fileContents, offset, len);
          offset += len;
        } finally {
          IOUtils.closeStream(out);
        }

        // change dir and foo to all but no write on foo.
        fs.setPermission(foo, new FsPermission((short) 0577));
        fs.setPermission(dir, new FsPermission((short) 0777));

        // try append, should fail
        out = null;
        try {
          out = fs.append(foo);
          fail("Expected to have AccessControlException");
        } catch (AccessControlException ace) {
          System.out.println("Good: got " + ace);
          ace.printStackTrace(System.out);
        } finally {
          IOUtils.closeStream(out);
        }
      }
    } catch (IOException e) {
      System.out.println("Exception :" + e);
      throw e;
    } catch (Throwable e) {
      System.out.println("Throwable :" + e);
      e.printStackTrace();
      throw new IOException("Throwable : " + e);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
예제 #30
0
  @Override
  public void serviceInit(Configuration conf) throws Exception {
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_TEZ_FRAMEWORK_NAME);
    // blacklisting disabled to prevent scheduling issues
    conf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false);
    if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
      conf.set(
          MRJobConfig.MR_AM_STAGING_DIR,
          new File(getTestWorkDir(), "apps_staging_dir" + Path.SEPARATOR).getAbsolutePath());
    }

    if (conf.get(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC) == null) {
      // nothing defined. set quick delete value
      conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0l);
    }

    File appJarLocalFile = new File(MiniTezCluster.APPJAR);

    if (!appJarLocalFile.exists()) {
      String message = "TezAppJar " + MiniTezCluster.APPJAR + " not found. Exiting.";
      LOG.info(message);
      throw new TezUncheckedException(message);
    }

    FileSystem fs = FileSystem.get(conf);
    Path testRootDir = fs.makeQualified(new Path("target", getName() + "-tmpDir"));
    Path appRemoteJar = new Path(testRootDir, "TezAppJar.jar");
    // Copy AppJar and make it public.
    Path appMasterJar = new Path(MiniTezCluster.APPJAR);
    fs.copyFromLocalFile(appMasterJar, appRemoteJar);
    fs.setPermission(appRemoteJar, new FsPermission("777"));

    conf.set(TezConfiguration.TEZ_LIB_URIS, appRemoteJar.toUri().toString());
    LOG.info("Set TEZ-LIB-URI to: " + conf.get(TezConfiguration.TEZ_LIB_URIS));

    // VMEM monitoring disabled, PMEM monitoring enabled.
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);

    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");

    try {
      Path stagingPath =
          FileContext.getFileContext(conf)
              .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
      /*
       * Re-configure the staging path on Windows if the file system is localFs.
       * We need to use a absolute path that contains the drive letter. The unit
       * test could run on a different drive than the AM. We can run into the
       * issue that job files are localized to the drive where the test runs on,
       * while the AM starts on a different drive and fails to find the job
       * metafiles. Using absolute path can avoid this ambiguity.
       */
      if (Path.WINDOWS) {
        if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
          conf.set(
              MRJobConfig.MR_AM_STAGING_DIR,
              new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
        }
      }
      FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
      if (fc.util().exists(stagingPath)) {
        LOG.info(stagingPath + " exists! deleting...");
        fc.delete(stagingPath, true);
      }
      LOG.info("mkdir: " + stagingPath);
      fc.mkdir(stagingPath, null, true);

      // mkdir done directory as well
      String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
      Path doneDirPath = fc.makeQualified(new Path(doneDir));
      fc.mkdir(doneDirPath, null, true);
    } catch (IOException e) {
      throw new TezUncheckedException("Could not create staging directory. ", e);
    }
    conf.set(MRConfig.MASTER_ADDRESS, "test");

    // configure the shuffle service in NM
    conf.setStrings(
        YarnConfiguration.NM_AUX_SERVICES,
        new String[] {ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID});
    conf.setClass(
        String.format(
            YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID),
        ShuffleHandler.class,
        Service.class);

    // Non-standard shuffle port
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);

    conf.setClass(
        YarnConfiguration.NM_CONTAINER_EXECUTOR,
        DefaultContainerExecutor.class,
        ContainerExecutor.class);

    // TestMRJobs is for testing non-uberized operation only; see TestUberAM
    // for corresponding uberized tests.
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    super.serviceInit(conf);
  }