/**
   * write text to a remote file system
   *
   * @param hdfsPath !null remote path
   * @param content !null test content
   */
  @Override
  public void writeToFileSystem(final String hdfsPath, final String content) {
    if (isRunningAsUser()) {
      super.writeToFileSystem(hdfsPath, content);
      return;
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //          if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      uig.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {
              FileSystem fs = getDFS();
              Path src = new Path(hdfsPath);
              Path parent = src.getParent();
              guaranteeDirectory(parent);
              OutputStream os = FileSystem.create(fs, src, FULL_FILE_ACCESS);
              FileUtilities.writeFile(os, content);
              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to writeToFileSystem because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
  /**
   * read a remote file as text
   *
   * @param hdfsPath !null remote path to an existing file
   * @return content as text
   */
  @Override
  public String readFromFileSystem(final String hdfsPath) {
    if (isRunningAsUser()) {
      return super.readFromFileSystem(hdfsPath);
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      return uig.doAs(
          new PrivilegedExceptionAction<String>() {

            public String run() throws Exception {
              FileSystem fs = getDFS();
              Path src = new Path(hdfsPath);
              InputStream is = fs.open(src);
              String ret = FileUtilities.readInFile(is);
              return ret;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to readFromFileSystem because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
    //    return null;
  }
 public FsPermission getPermissions(final Path src) {
   UserGroupInformation uig = getCurrentUserGroup();
   try {
     //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
     // 1.0.*");
     return uig.doAs(
         new PrivilegedExceptionAction<FsPermission>() {
           public FsPermission run() throws Exception {
             FileSystem fs = getDFS();
             if (fs.exists(src)) {
               FileStatus fileStatus = fs.getFileStatus(src);
               return fileStatus.getPermission();
             }
             return null;
           }
         });
   } catch (Exception e) {
     throw new RuntimeException(
         "Failed to getPermissions because "
             + e.getMessage()
             + " exception of class "
             + e.getClass(),
         e);
   }
   //      return FsPermission.getDefault();
 }
  /**
   * true if the file exists
   *
   * @param hdfsPath !null path - probably of an existing file
   * @return
   */
  @Override
  public long fileLength(final String hdfsPath) {
    if (isRunningAsUser()) {
      return super.fileLength(hdfsPath);
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      return uig.doAs(
          new PrivilegedExceptionAction<Long>() {

            public Long run() throws Exception {
              FileSystem fs = getDFS();

              if (!exists(hdfsPath)) return null;
              Path src = new Path(hdfsPath);
              ContentSummary contentSummary = fs.getContentSummary(src);
              if (contentSummary == null) return null;
              return contentSummary.getLength();
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to get fileLength because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
  private void guaranteeDirectory(final Path src) {
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //          if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      uig.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {
              FileSystem fs = getDFS();

              if (fs.exists(src)) return null;
              if (!fs.isFile(src)) {
                return null;
              } else {
                fs.delete(src, false); // drop a file we want a directory
              }
              fs.setPermission(src, FULL_ACCESS);
              fs.mkdirs(src, FULL_ACCESS);
              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to guaranteeDirectory because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
  /**
   * true if the file esists
   *
   * @param hdfsPath
   * @return
   */
  @Override
  public boolean exists(final String hdfsPath) {
    if (isRunningAsUser()) {
      return super.exists(hdfsPath);
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      return uig.doAs(
          new PrivilegedExceptionAction<Boolean>() {

            public Boolean run() throws Exception {
              FileSystem fileSystem = getDFS();

              Path dst = new Path(hdfsPath);

              boolean directory = fileSystem.exists(dst);
              return directory;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to copyFromFileSystem because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
    //       return false;
  }
  @Override
  public void writeToFileSystem(final String hdfsPath, final File localPath) {
    if (isRunningAsUser()) {
      super.writeToFileSystem(hdfsPath, localPath);
      return;
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      uig.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {
              FileSystem fileSystem = getDFS();

              Path dst = new Path(hdfsPath);

              Path src = new Path(localPath.getAbsolutePath());

              fileSystem.copyFromLocalFile(src, dst);
              fileSystem.setPermission(dst, FULL_FILE_ACCESS);
              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to writeToFileSystem because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
  protected HDFWithNameAccessor(final String host, final int port, final String user) {
    super(host, port, user);
    if (port <= 0) throw new IllegalArgumentException("bad port " + port);
    String connectString = "hdfs://" + host + ":" + port + "/";
    final String userDir = "/user/" + user;

    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //          if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");

      uig.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {
              getDFS();
              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to connect on "
              + connectString
              + " because "
              + e.getMessage()
              + " exception of class "
              + e.getClass(),
          e);
    }
  }
 /**
  * there are issues with hdfs and running as a remote user InputFormats should be running in the
  * cluster and should alerday be the right user
  *
  * @return true is you are running on the cluster
  */
 @Override
 public boolean isRunningAsUser() {
   Configuration cong = new Configuration();
   try {
     //            if(true)    throw new UnsupportedOperationException("Uncomment when using
     // version 1.0.*");
     UserGroupInformation uig = UserGroupInformation.getCurrentUser();
     String userName = uig.getShortUserName();
     String user = RemoteUtilities.getUser();
     return user.equals(userName);
   } catch (Exception e) {
     throw new RuntimeException(e);
   }
   //        return false;
 }
 public static UserGroupInformation getCurrentUserGroup() {
   if (g_CurrentUserGroup == null) {
     //            throw new UnsupportedOperationException("Uncomment when using version 1.0.*");
     String user = RemoteUtilities.getUser();
     //     user = "******";
     g_CurrentUserGroup = UserGroupInformation.createRemoteUser(user);
   }
   return g_CurrentUserGroup;
 }
  /**
   * delete a directory and all enclosed files and directories
   *
   * @param hdfsPath !null path
   * @return true on success
   */
  @Override
  public boolean expunge(final String hdfsPath) {
    if (isRunningAsUser()) {
      return super.expunge(hdfsPath);
    }
    UserGroupInformation uig = getCurrentUserGroup();
    try {
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      return uig.doAs(
          new PrivilegedExceptionAction<Boolean>() {

            public Boolean run() throws Exception {
              FileSystem fs = getDFS();

              Path src = new Path(hdfsPath);

              boolean ret = true;
              if (!fs.exists(src)) {
                return ret;
              }
              // break these out
              if (fs.getFileStatus(src).isDir()) {
                boolean doneOK = fs.delete(src, true);
                doneOK = !fs.exists(src);
                ret = doneOK;
                return ret;
              }
              if (fs.isFile(src)) {
                boolean doneOK = fs.delete(src, false);
                ret = doneOK;
                return ret;
              }
              throw new IllegalStateException("should be file of directory if it exists");
            }
          });

    } catch (Exception e) {
      throw new RuntimeException(
          "Failed to expunge because " + e.getMessage() + " exception of class " + e.getClass(), e);
    }
    //      return false;
  }
  protected FileSystem getNewDFS() {
    final FileSystem[] returned = new FileSystem[1];
    IHDFSFileSystem access = null;
    final String host = RemoteUtilities.getHost();
    final int port = RemoteUtilities.getPort();
    //     RemoteUtilities.getPassword()
    final String user = RemoteUtilities.getUser();
    String connStr = host + ":" + port + ":" + user + ":" + RemoteUtilities.getPassword();

    final String userDir = "/user/" + user;
    //   final String userDir = "/user" ;

    try {
      UserGroupInformation ugi = getCurrentUserGroup();
      //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
      // 1.0.*");
      ugi.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {

              Configuration conf = new Configuration();
              conf.set("fs.default.name", "hdfs://" + host + ":" + port);
              conf.set("fs.defaultFS", "hdfs://" + host + ":" + port + userDir);
              //       conf.set("fs.defaultFS", "hdfs://" + host + ":" + port + userDir);
              //              conf.set("hadoop.job.ugi", user);
              conf.set("hadoop.job.ugi", user);
              //             conf.set("hadoop.job.ugi", "hdfs");

              returned[0] = FileSystem.get(conf);

              return null;
            }
          });
    } catch (Exception e) {
      throw new RuntimeException(e);
    }

    if (returned[0] == null) throw new IllegalStateException("cannot get file system");
    return returned[0];
  }
 public void setPermissions(final Path src, final FsPermission p) {
   UserGroupInformation uig = getCurrentUserGroup();
   try {
     //           if(true)    throw new UnsupportedOperationException("Uncomment when using version
     // 1.0.*");
     uig.doAs(
         new PrivilegedExceptionAction<Void>() {
           public Void run() throws Exception {
             FileSystem fs = getDFS();
             if (fs.exists(src)) fs.setPermission(src, p);
             return null;
           }
         });
   } catch (Exception e) {
     throw new RuntimeException(
         "Failed to setPermissions because "
             + e.getMessage()
             + " exception of class "
             + e.getClass(),
         e);
   }
 }
  public static boolean isHDFSAccessible() {

    IHDFSFileSystem access = null;
    final String host = RemoteUtilities.getHost();
    final int port = RemoteUtilities.getPort();
    //     RemoteUtilities.getPassword()
    final String user = RemoteUtilities.getUser();
    String connStr = host + ":" + port + ":" + user + ":" + RemoteUtilities.getPassword();

    final String userDir = "/user/" + user;
    //   final String userDir = "/user" ;

    try {
      UserGroupInformation ugi = getCurrentUserGroup();
      //          throw new UnsupportedOperationException("Uncomment when using version 1.0.*");
      ugi.doAs(
          new PrivilegedExceptionAction<Void>() {

            public Void run() throws Exception {

              Configuration conf = new Configuration();
              conf.set("fs.default.name", "hdfs://" + host + ":" + port);
              conf.set("fs.defaultFS", "hdfs://" + host + ":" + port + userDir);
              //       conf.set("fs.defaultFS", "hdfs://" + host + ":" + port + userDir);
              //              conf.set("hadoop.job.ugi", user);
              conf.set("hadoop.job.ugi", user);
              //             conf.set("hadoop.job.ugi", "hdfs");

              FileSystem fs = FileSystem.get(conf);

              Path udir = new Path(userDir);

              FileStatus fileStatus = fs.getFileStatus(udir);
              FsPermission permission = fileStatus.getPermission();

              //           fs.setPermission(udir, ALL_ACCESS);

              fileStatus = fs.getFileStatus(udir);
              permission = fileStatus.getPermission();

              //         fs.mkdirs(new Path(userDir + "/ebi/" ),IHDFSFileSystem.FULL_ACCESS);
              //         fs.mkdirs(new Path(userDir + "/ebi/Sample2/"
              // ),IHDFSFileSystem.FULL_ACCESS);

              FileStatus[] fileStatuses = fs.listStatus(udir);
              for (int i = 0; i < fileStatuses.length; i++) {
                FileStatus fileStatuse = fileStatuses[i];
                System.err.println(fileStatuse.getPath());
              }

              //                    fs.createNewFile(new Path(userDir + "/test"));
              //
              //                    FileStatus[] status = fs.listStatus(new Path("/user/" + user));
              //                    for (int i = 0; i < status.length; i++) {
              //                        System.out.println(status[i].getPath());
              //                    }
              return null;
            }
          });
    } catch (Exception e) {
      return false;
    }

    //        if (true)
    //            return true;
    return true;
  }
  public void testFilePermision() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();

    try {
      FileSystem nnfs = FileSystem.get(conf);
      // test permissions on files that do not exist
      assertFalse(nnfs.exists(CHILD_FILE1));
      try {
        nnfs.setOwner(CHILD_FILE1, "foo", "bar");
        assertTrue(false);
      } catch (java.io.FileNotFoundException e) {
        LOG.info("GOOD: got " + e);
      }
      try {
        nnfs.setPermission(CHILD_FILE1, new FsPermission((short) 0777));
        assertTrue(false);
      } catch (java.io.FileNotFoundException e) {
        LOG.info("GOOD: got " + e);
      }
      // following dir/file creations are legal
      nnfs.mkdirs(CHILD_DIR1);
      FSDataOutputStream out = nnfs.create(CHILD_FILE1);
      byte data[] = new byte[FILE_LEN];
      RAN.nextBytes(data);
      out.write(data);
      out.close();
      nnfs.setPermission(CHILD_FILE1, new FsPermission("700"));

      // following read is legal
      byte dataIn[] = new byte[FILE_LEN];
      FSDataInputStream fin = nnfs.open(CHILD_FILE1);
      int bytesRead = fin.read(dataIn);
      assertTrue(bytesRead == FILE_LEN);
      for (int i = 0; i < FILE_LEN; i++) {
        assertEquals(data[i], dataIn[i]);
      }

      ////////////////////////////////////////////////////////////////
      // test illegal file/dir creation
      UserGroupInformation userGroupInfo =
          UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);

      FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

      // make sure mkdir of a existing directory that is not owned by
      // this user does not throw an exception.
      userfs.mkdirs(CHILD_DIR1);

      // illegal mkdir
      assertTrue(!canMkdirs(userfs, CHILD_DIR2));

      // illegal file creation
      assertTrue(!canCreate(userfs, CHILD_FILE2));

      // illegal file open
      assertTrue(!canOpen(userfs, CHILD_FILE1));

      nnfs.setPermission(ROOT_PATH, new FsPermission((short) 0755));
      nnfs.setPermission(CHILD_DIR1, new FsPermission("777"));
      nnfs.setPermission(new Path("/"), new FsPermission((short) 0777));
      final Path RENAME_PATH = new Path("/foo/bar");
      userfs.mkdirs(RENAME_PATH);
      assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1));
    } finally {
      cluster.shutdown();
    }
  }