public void init(Stage.Context context, String prefix, List<Stage.ConfigIssue> issues) {
    conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);

    if (hdfsKerberos) {
      conf.set(
          CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
          UserGroupInformation.AuthenticationMethod.KERBEROS.name());
      try {
        conf.set(
            DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
            "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm());
      } catch (Exception ex) {
        if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
          issues.add(
              context.createConfigIssue(
                  Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_001, ex.toString()));
        }
      }
    }

    if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) {
      File hadoopConfigDir = new File(hdfsConfDir);
      if (!hadoopConfigDir.isAbsolute()) {
        hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile();
      }
      if (!hadoopConfigDir.exists()) {
        issues.add(
            context.createConfigIssue(
                Groups.HDFS.name(),
                JOIN.join(prefix, "hdfsConfDir"),
                HdfsMetadataErrors.HDFS_METADATA_002,
                hadoopConfigDir.getPath()));
      } else if (!hadoopConfigDir.isDirectory()) {
        issues.add(
            context.createConfigIssue(
                Groups.HDFS.name(),
                JOIN.join(prefix, "hdfsConfDir"),
                HdfsMetadataErrors.HDFS_METADATA_003,
                hadoopConfigDir.getPath()));
      } else {
        File coreSite = new File(hadoopConfigDir, "core-site.xml");
        if (coreSite.exists()) {
          if (!coreSite.isFile()) {
            issues.add(
                context.createConfigIssue(
                    Groups.HDFS.name(),
                    JOIN.join(prefix, "hdfsConfDir"),
                    HdfsMetadataErrors.HDFS_METADATA_004,
                    coreSite.getPath()));
          }
          conf.addResource(new Path(coreSite.getAbsolutePath()));
        }
        File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
        if (hdfsSite.exists()) {
          if (!hdfsSite.isFile()) {
            issues.add(
                context.createConfigIssue(
                    Groups.HDFS.name(),
                    JOIN.join(prefix, "hdfsConfDir"),
                    HdfsMetadataErrors.HDFS_METADATA_004,
                    hdfsSite.getPath()));
          }
          conf.addResource(new Path(hdfsSite.getAbsolutePath()));
        }
      }
    }

    // Unless user specified non-empty, non-null HDFS URI, we need to retrieve it's value
    if (StringUtils.isEmpty(hdfsUri)) {
      hdfsUri = conf.get("fs.defaultFS");
    }

    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
      conf.set(config.getKey(), config.getValue());
    }

    try {
      loginUgi = HadoopSecurityUtil.getLoginUser(conf);
    } catch (IOException e) {
      LOG.error("Can't create login UGI", e);
      issues.add(
          context.createConfigIssue(
              Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005, e.getMessage(), e));
    }

    if (!issues.isEmpty()) {
      return;
    }

    try {
      fs =
          getUGI()
              .doAs(
                  new PrivilegedExceptionAction<FileSystem>() {
                    @Override
                    public FileSystem run() throws Exception {
                      return FileSystem.newInstance(new URI(hdfsUri), conf);
                    }
                  });
    } catch (Exception ex) {
      LOG.error("Can't retrieve FileSystem instance", ex);
      issues.add(
          context.createConfigIssue(
              Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005, ex.getMessage(), ex));
    }
  }
 public UserGroupInformation getUGI() {
   return (hdfsUser.isEmpty()) ? loginUgi : HadoopSecurityUtil.getProxyUser(hdfsUser, loginUgi);
 }