Ejemplo n.º 1
0
 /**
  * Creates a new group in HDFS with the name <code>projectName</code> if it does not exist, then
  * creates the owner in HDFS with the name <code>projectName</code>__<code>username</code> , also
  * if it does not exist, and gets added to the group <code>projectName</code>.
  *
  * <p>
  *
  * @param project
  * @throws java.io.IOException
  */
 public void addProjectFolderOwner(Project project) throws IOException {
   String owner = getHdfsUserName(project, project.getOwner());
   String projectPath = File.separator + settings.DIR_ROOT + File.separator + project.getName();
   Path location = new Path(projectPath);
   // FsPermission(FsAction u, FsAction g, FsAction o) 775
   // Gives owner and group all access and read, execute for others
   // This means group is for data_owners and others for data_scientist
   // This means every body can see the content of a project.
   FsPermission fsPermission =
       new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ_EXECUTE); // 775
   fsOps.setOwner(location, owner, project.getName());
   fsOps.setPermission(location, fsPermission);
 }
Ejemplo n.º 2
0
  /**
   * Create a new group in HDFS with the name project.name__datasetName if it does not exist, then
   * adds all members of the project to this group. This is done when a new dataset is created in a
   * project. If stickyBit is set true: all members of the project will be given r, w, x privileges.
   * If stickyBit is set false: user will get all privileges, and all other members will have r and
   * x privileges.
   *
   * <p>
   *
   * @param owner
   * @param project
   * @param dataset
   * @param stickyBit
   * @throws java.io.IOException
   */
  public void addDatasetUsersGroups(
      Users owner, Project project, Dataset dataset, boolean stickyBit) throws IOException {
    if (owner == null
        || project == null
        || project.getProjectTeamCollection() == null
        || dataset == null) {
      throw new IllegalArgumentException("One or more arguments are null.");
    }
    String datasetGroup = getHdfsGroupName(project, dataset);
    String dsOwner = getHdfsUserName(project, owner);
    String dsPath =
        File.separator
            + settings.DIR_ROOT
            + File.separator
            + project.getName()
            + File.separator
            + dataset.getInode().getInodePK().getName();
    Path location = new Path(dsPath);
    // FsPermission(FsAction u, FsAction g, FsAction o, boolean sb)
    FsPermission fsPermission =
        new FsPermission(
            FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE); // Permission hdfs dfs -chmod 750
    if (stickyBit) {
      fsPermission =
          new FsPermission(
              FsAction.ALL,
              FsAction.ALL,
              FsAction.NONE,
              stickyBit); // Permission hdfs dfs -chmod 1770
    }
    fsOps.setOwner(location, dsOwner, datasetGroup);
    fsOps.setPermission(location, fsPermission);

    String hdfsUsername;
    HdfsUsers hdfsUser;
    byte[] userId;
    byte[] groupId = UsersGroups.getGroupID(datasetGroup);
    HdfsGroups hdfsGroup = hdfsGroupsFacade.findHdfsGroup(groupId);
    if (hdfsGroup == null) {
      throw new IllegalArgumentException("Could not create dataset group in HDFS.");
    }
    if (hdfsGroup.getHdfsUsersCollection() == null) {
      hdfsGroup.setHdfsUsersCollection(new ArrayList<HdfsUsers>());
    }
    // add every member to the new ds group
    for (ProjectTeam member : project.getProjectTeamCollection()) {
      hdfsUsername = getHdfsUserName(project, member.getUser());
      userId = UsersGroups.getUserID(hdfsUsername);
      hdfsUser = hdfsUsersFacade.findHdfsUser(userId);
      // the owner does not need to be added to the group.
      if (hdfsUsername.equals(dsOwner)) {
        continue;
      }
      if (hdfsUser == null) {
        hdfsUser = new HdfsUsers(userId, hdfsUsername);
        hdfsUsersFacade.persist(hdfsUser);
      }
      if (!hdfsGroup.getHdfsUsersCollection().contains(hdfsUser)) {
        hdfsGroup.getHdfsUsersCollection().add(hdfsUser);
      }
    }
    hdfsGroupsFacade.merge(hdfsGroup);
  }