/** Exercise 4 - Read a list of people from Cassandra */
  private static void readPeopleFromCassandra(JavaSparkContext javaSparkContext) {

    SparkContextJavaFunctions sparkContextJavaFunctions =
        CassandraJavaUtil.javaFunctions(javaSparkContext);

    CassandraJavaRDD<Person> personCassandraJavaRDD =
        sparkContextJavaFunctions.cassandraTable(
            "test", "people", CassandraJavaUtil.mapRowTo(Person.class));

    JavaRDD<String> readPeopleRDD = personCassandraJavaRDD.map(Person::toString);

    System.out.println(
        "Data as Person beans: \n" + StringUtils.join("\n", readPeopleRDD.collect()));

    //   select id from test.people where id=45;

    JavaRDD<String> rdd4 =
        sparkContextJavaFunctions
            .cassandraTable("test", "people")
            .select("id")
            .where("id = 45")
            .map(CassandraRow::toString);

    System.out.println(
        "Data with only 'id' column fetched: \n" + StringUtils.join("\n", rdd4.collect()));
  }
예제 #2
0
  /**
   * Verify if the NodeManager could identify disk failures.
   *
   * @param localORLogDirs <em>true</em> represent nm-local-dirs and <em>false </em> means
   *     nm-log-dirs
   * @param expectedDirs expected nm-local-dirs/nm-log-dirs as a string
   * @param isHealthy <em>true</em> if the overall node should be healthy
   */
  private void verifyDisksHealth(boolean localORLogDirs, String expectedDirs, boolean isHealthy) {
    // Wait for the NodeManager to identify disk failures.
    waitForDiskHealthCheck();

    List<String> list = localORLogDirs ? dirsHandler.getLocalDirs() : dirsHandler.getLogDirs();
    String seenDirs = StringUtils.join(",", list);
    LOG.info("ExpectedDirs=" + expectedDirs);
    LOG.info("SeenDirs=" + seenDirs);
    Assert.assertTrue(
        "NodeManager could not identify disk failure.", expectedDirs.equals(seenDirs));

    Assert.assertEquals(
        "Node's health in terms of disks is wrong", isHealthy, dirsHandler.areDisksHealthy());
    for (int i = 0; i < 10; i++) {
      Iterator<RMNode> iter =
          yarnCluster.getResourceManager().getRMContext().getRMNodes().values().iterator();
      if (iter.next().getNodeHealthStatus().getIsNodeHealthy() == isHealthy) {
        break;
      }
      // wait for the node health info to go to RM
      try {
        Thread.sleep(1000);
      } catch (InterruptedException e) {
        LOG.error("Interrupted while waiting for NM->RM heartbeat.");
      }
    }
    Iterator<RMNode> iter =
        yarnCluster.getResourceManager().getRMContext().getRMNodes().values().iterator();
    Assert.assertEquals(
        "RM is not updated with the health status of a node",
        isHealthy,
        iter.next().getNodeHealthStatus().getIsNodeHealthy());
  }
  public JavaApiDemo() {
    DemoApp demoApp = DemoApp$.MODULE$.apply();
    JavaSparkContext sc = new JavaSparkContext(demoApp.sc());

    // Here we are going to save some data to Cassandra...
    List<Person> people =
        Arrays.asList(
            Person.newInstance(1, "John", new Date()),
            Person.newInstance(2, "Anna", new Date()),
            Person.newInstance(3, "Andrew", new Date()));
    JavaRDD<Person> rdd = sc.parallelize(people);
    javaFunctions(rdd, Person.class).saveToCassandra("test", "people");

    // then, we want to read that data as an RDD of CassandraRows and convert them to strings...
    JavaRDD<String> cassandraRowsRDD =
        javaFunctions(sc)
            .cassandraTable("test", "people")
            .toJavaRDD()
            .map(
                new Function<CassandraRow, String>() {
                  @Override
                  public String call(CassandraRow cassandraRow) throws Exception {
                    return cassandraRow.toString();
                  }
                });
    System.out.println(
        "Data as CassandraRows: \n" + StringUtils.join("\n", cassandraRowsRDD.toArray()));

    // finally, we want to read that data as an RDD of Person beans and also convert them to
    // strings...
    JavaRDD<String> rdd2 =
        javaFunctions(sc)
            .cassandraTable("test", "people", Person.class)
            .toJavaRDD()
            .map(
                new Function<Person, String>() {
                  @Override
                  public String call(Person person) throws Exception {
                    return person.toString();
                  }
                });
    System.out.println("Data as Person beans: \n" + StringUtils.join("\n", rdd2.toArray()));

    sc.stop();
  }
예제 #4
0
  private void testDirsFailures(boolean localORLogDirs) throws IOException {
    String dirType = localORLogDirs ? "local" : "log";
    String dirsProperty =
        localORLogDirs ? YarnConfiguration.NM_LOCAL_DIRS : YarnConfiguration.NM_LOG_DIRS;

    Configuration conf = new Configuration();
    // set disk health check interval to a small value (say 1 sec).
    conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, DISK_HEALTH_CHECK_INTERVAL);

    // If 2 out of the total 4 local-dirs fail OR if 2 Out of the total 4
    // log-dirs fail, then the node's health status should become unhealthy.
    conf.setFloat(YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION, 0.60F);

    if (yarnCluster != null) {
      yarnCluster.stop();
      FileUtil.fullyDelete(localFSDirBase);
      localFSDirBase.mkdirs();
    }
    LOG.info("Starting up YARN cluster");
    yarnCluster =
        new MiniYARNCluster(TestDiskFailures.class.getName(), 1, numLocalDirs, numLogDirs);
    yarnCluster.init(conf);
    yarnCluster.start();

    NodeManager nm = yarnCluster.getNodeManager(0);
    LOG.info("Configured nm-" + dirType + "-dirs=" + nm.getConfig().get(dirsProperty));
    dirsHandler = nm.getNodeHealthChecker().getDiskHandler();
    List<String> list = localORLogDirs ? dirsHandler.getLocalDirs() : dirsHandler.getLogDirs();
    String[] dirs = list.toArray(new String[list.size()]);
    Assert.assertEquals("Number of nm-" + dirType + "-dirs is wrong.", numLocalDirs, dirs.length);
    String expectedDirs = StringUtils.join(",", list);
    // validate the health of disks initially
    verifyDisksHealth(localORLogDirs, expectedDirs, true);

    // Make 1 nm-local-dir fail and verify if "the nodemanager can identify
    // the disk failure(s) and can update the list of good nm-local-dirs.
    prepareDirToFail(dirs[2]);
    expectedDirs = dirs[0] + "," + dirs[1] + "," + dirs[3];
    verifyDisksHealth(localORLogDirs, expectedDirs, true);

    // Now, make 1 more nm-local-dir/nm-log-dir fail and verify if "the
    // nodemanager can identify the disk failures and can update the list of
    // good nm-local-dirs/nm-log-dirs and can update the overall health status
    // of the node to unhealthy".
    prepareDirToFail(dirs[0]);
    expectedDirs = dirs[1] + "," + dirs[3];
    verifyDisksHealth(localORLogDirs, expectedDirs, false);

    // Fail the remaining 2 local-dirs/log-dirs and verify if NM remains with
    // empty list of local-dirs/log-dirs and the overall health status is
    // unhealthy.
    prepareDirToFail(dirs[1]);
    prepareDirToFail(dirs[3]);
    expectedDirs = "";
    verifyDisksHealth(localORLogDirs, expectedDirs, false);
  }
예제 #5
0
 @Test
 @TestException(exception = ServerException.class, msgRegExp = "S10.*")
 @TestDir
 public void serviceWithMissingDependency() throws Exception {
   String dir = TestDirHelper.getTestDir().getAbsolutePath();
   Configuration conf = new Configuration(false);
   String services =
       StringUtils.join(
           ",", Arrays.asList(MyService3.class.getName(), MyService6.class.getName()));
   conf.set("server.services", services);
   Server server = new Server("server", dir, dir, dir, dir, conf);
   server.init();
 }
예제 #6
0
  private void uploadLogsForContainer(ContainerId containerId) {

    if (this.logAggregationDisabled) {
      return;
    }

    // Lazy creation of the writer
    if (this.writer == null) {
      LOG.info(
          "Starting aggregate log-file for app "
              + this.applicationId
              + " at "
              + this.remoteNodeTmpLogFileForApp);
      try {
        this.writer = new LogWriter(this.conf, this.remoteNodeTmpLogFileForApp, this.userUgi);
        // Write ACLs once when and if the writer is created.
        this.writer.writeApplicationACLs(appAcls);
        this.writer.writeApplicationOwner(this.userUgi.getShortUserName());
      } catch (IOException e) {
        LOG.error(
            "Cannot create writer for app "
                + this.applicationId
                + ". Disabling log-aggregation for this app.",
            e);
        this.logAggregationDisabled = true;
        return;
      }
    }

    LOG.info(
        "Uploading logs for container "
            + containerId
            + ". Current good log dirs are "
            + StringUtils.join(",", dirsHandler.getLogDirs()));
    LogKey logKey = new LogKey(containerId);
    LogValue logValue =
        new LogValue(dirsHandler.getLogDirs(), containerId, userUgi.getShortUserName());
    try {
      this.writer.append(logKey, logValue);
    } catch (IOException e) {
      LOG.error("Couldn't upload logs for " + containerId + ". Skipping this container.");
    }
  }
예제 #7
0
  @Test
  @TestDir
  public void services() throws Exception {
    String dir = TestDirHelper.getTestDir().getAbsolutePath();
    Configuration conf;
    Server server;

    // no services
    ORDER.clear();
    conf = new Configuration(false);
    server = new Server("server", dir, dir, dir, dir, conf);
    server.init();
    assertEquals(ORDER.size(), 0);

    // 2 services init/destroy
    ORDER.clear();
    String services =
        StringUtils.join(
            ",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
    conf = new Configuration(false);
    conf.set("server.services", services);
    server = new Server("server", dir, dir, dir, dir, conf);
    server.init();
    assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
    assertEquals(server.get(MyService3.class).getInterface(), MyService3.class);
    assertEquals(ORDER.size(), 4);
    assertEquals(ORDER.get(0), "s1.init");
    assertEquals(ORDER.get(1), "s3.init");
    assertEquals(ORDER.get(2), "s1.postInit");
    assertEquals(ORDER.get(3), "s3.postInit");
    server.destroy();
    assertEquals(ORDER.size(), 6);
    assertEquals(ORDER.get(4), "s3.destroy");
    assertEquals(ORDER.get(5), "s1.destroy");

    // 3 services, 2nd one fails on init
    ORDER.clear();
    services =
        StringUtils.join(
            ",",
            Arrays.asList(
                MyService1.class.getName(),
                MyService2.class.getName(),
                MyService3.class.getName()));
    conf = new Configuration(false);
    conf.set("server.services", services);

    server = new Server("server", dir, dir, dir, dir, conf);
    try {
      server.init();
      fail();
    } catch (ServerException ex) {
      assertEquals(MyService2.class, ex.getError().getClass());
    } catch (Exception ex) {
      fail();
    }
    assertEquals(ORDER.size(), 3);
    assertEquals(ORDER.get(0), "s1.init");
    assertEquals(ORDER.get(1), "s2.init");
    assertEquals(ORDER.get(2), "s1.destroy");

    // 2 services one fails on destroy
    ORDER.clear();
    services =
        StringUtils.join(
            ",", Arrays.asList(MyService1.class.getName(), MyService5.class.getName()));
    conf = new Configuration(false);
    conf.set("server.services", services);
    server = new Server("server", dir, dir, dir, dir, conf);
    server.init();
    assertEquals(ORDER.size(), 4);
    assertEquals(ORDER.get(0), "s1.init");
    assertEquals(ORDER.get(1), "s5.init");
    assertEquals(ORDER.get(2), "s1.postInit");
    assertEquals(ORDER.get(3), "s5.postInit");
    server.destroy();
    assertEquals(ORDER.size(), 6);
    assertEquals(ORDER.get(4), "s5.destroy");
    assertEquals(ORDER.get(5), "s1.destroy");

    // service override via ext
    ORDER.clear();
    services =
        StringUtils.join(
            ",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
    String servicesExt = StringUtils.join(",", Arrays.asList(MyService1a.class.getName()));

    conf = new Configuration(false);
    conf.set("server.services", services);
    conf.set("server.services.ext", servicesExt);
    server = new Server("server", dir, dir, dir, dir, conf);
    server.init();

    assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
    assertEquals(ORDER.size(), 4);
    assertEquals(ORDER.get(0), "s1a.init");
    assertEquals(ORDER.get(1), "s3.init");
    assertEquals(ORDER.get(2), "s1a.postInit");
    assertEquals(ORDER.get(3), "s3.postInit");
    server.destroy();
    assertEquals(ORDER.size(), 6);
    assertEquals(ORDER.get(4), "s3.destroy");
    assertEquals(ORDER.get(5), "s1a.destroy");

    // service override via setService
    ORDER.clear();
    services =
        StringUtils.join(
            ",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
    conf = new Configuration(false);
    conf.set("server.services", services);
    server = new Server("server", dir, dir, dir, dir, conf);
    server.init();

    server.setService(MyService1a.class);
    assertEquals(ORDER.size(), 6);
    assertEquals(ORDER.get(4), "s1.destroy");
    assertEquals(ORDER.get(5), "s1a.init");

    assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);

    server.destroy();
    assertEquals(ORDER.size(), 8);
    assertEquals(ORDER.get(6), "s3.destroy");
    assertEquals(ORDER.get(7), "s1a.destroy");

    // service add via setService
    ORDER.clear();
    services =
        StringUtils.join(
            ",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
    conf = new Configuration(false);
    conf.set("server.services", services);
    server = new Server("server", dir, dir, dir, dir, conf);
    server.init();

    server.setService(MyService5.class);
    assertEquals(ORDER.size(), 5);
    assertEquals(ORDER.get(4), "s5.init");

    assertEquals(server.get(MyService5.class).getClass(), MyService5.class);

    server.destroy();
    assertEquals(ORDER.size(), 8);
    assertEquals(ORDER.get(5), "s5.destroy");
    assertEquals(ORDER.get(6), "s3.destroy");
    assertEquals(ORDER.get(7), "s1.destroy");

    // service add via setService exception
    ORDER.clear();
    services =
        StringUtils.join(
            ",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
    conf = new Configuration(false);
    conf.set("server.services", services);
    server = new Server("server", dir, dir, dir, dir, conf);
    server.init();
    try {
      server.setService(MyService7.class);
      fail();
    } catch (ServerException ex) {
      assertEquals(ServerException.ERROR.S09, ex.getError());
    } catch (Exception ex) {
      fail();
    }
    assertEquals(ORDER.size(), 6);
    assertEquals(ORDER.get(4), "s3.destroy");
    assertEquals(ORDER.get(5), "s1.destroy");

    // service with dependency
    ORDER.clear();
    services =
        StringUtils.join(
            ",", Arrays.asList(MyService1.class.getName(), MyService6.class.getName()));
    conf = new Configuration(false);
    conf.set("server.services", services);
    server = new Server("server", dir, dir, dir, dir, conf);
    server.init();
    assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
    assertEquals(server.get(MyService6.class).getInterface(), MyService6.class);
    server.destroy();
  }
예제 #8
0
  /**
   * @param taskid
   * @param workDir
   * @param classPaths
   * @param logSize
   * @return
   * @throws IOException
   */
  private Vector<String> getVMArgs(
      TaskAttemptID taskid, File workDir, List<String> classPaths, long logSize)
      throws IOException {
    Vector<String> vargs = new Vector<String>(8);
    File jvm = // use same jvm as parent
        new File(new File(System.getProperty("java.home"), "bin"), "java");

    vargs.add(jvm.toString());

    // Add child (task) java-vm options.
    //
    // The following symbols if present in mapred.{map|reduce}.child.java.opts
    // value are replaced:
    // + @taskid@ is interpolated with value of TaskID.
    // Other occurrences of @ will not be altered.
    //
    // Example with multiple arguments and substitutions, showing
    // jvm GC logging, and start of a passwordless JVM JMX agent so can
    // connect with jconsole and the likes to watch child memory, threads
    // and get thread dumps.
    //
    //  <property>
    //    <name>mapred.map.child.java.opts</name>
    //    <value>-Xmx 512M -verbose:gc -Xloggc:/tmp/@[email protected] \
    //           -Dcom.sun.management.jmxremote.authenticate=false \
    //           -Dcom.sun.management.jmxremote.ssl=false \
    //    </value>
    //  </property>
    //
    //  <property>
    //    <name>mapred.reduce.child.java.opts</name>
    //    <value>-Xmx 1024M -verbose:gc -Xloggc:/tmp/@[email protected] \
    //           -Dcom.sun.management.jmxremote.authenticate=false \
    //           -Dcom.sun.management.jmxremote.ssl=false \
    //    </value>
    //  </property>
    //
    String[] javaOptsSplit =
        parseChildJavaOpts(getChildJavaOpts(conf, JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS), taskid);

    // Add java.library.path; necessary for loading native libraries.
    //
    // 1. To support native-hadoop library i.e. libhadoop.so, we add the
    //    parent processes' java.library.path to the child.
    // 2. We also add the 'cwd' of the task to it's java.library.path to help
    //    users distribute native libraries via the DistributedCache.
    // 3. The user can also specify extra paths to be added to the
    //    java.library.path via mapred.{map|reduce}.child.java.opts.
    //
    String libraryPath = System.getProperty("java.library.path");
    if (libraryPath == null) {
      libraryPath = workDir.getAbsolutePath();
    } else {
      libraryPath += SYSTEM_PATH_SEPARATOR + workDir;
    }
    boolean hasUserLDPath = false;
    for (int i = 0; i < javaOptsSplit.length; i++) {
      if (javaOptsSplit[i].startsWith("-Djava.library.path=")) {
        javaOptsSplit[i] += SYSTEM_PATH_SEPARATOR + libraryPath;
        hasUserLDPath = true;
        break;
      }
    }
    if (!hasUserLDPath) {
      vargs.add("-Djava.library.path=" + libraryPath);
    }
    for (int i = 0; i < javaOptsSplit.length; i++) {
      vargs.add(javaOptsSplit[i]);
    }

    Path childTmpDir = createChildTmpDir(workDir, conf, false);
    vargs.add("-Djava.io.tmpdir=" + childTmpDir);

    // Add classpath.
    vargs.add("-classpath");
    String classPath = StringUtils.join(SYSTEM_PATH_SEPARATOR, classPaths);
    vargs.add(classPath);

    // Setup the log4j prop
    setupLog4jProperties(vargs, taskid, logSize);

    if (conf.getProfileEnabled()) {
      if (conf.getProfileTaskRange(t.isMapTask()).isIncluded(t.getPartition())) {
        File prof = TaskLog.getTaskLogFile(taskid, t.isTaskCleanupTask(), TaskLog.LogName.PROFILE);
        vargs.add(String.format(conf.getProfileParams(), prof.toString()));
      }
    }

    // Add main class and its arguments
    vargs.add(Child.class.getName()); // main of Child
    // pass umbilical address
    InetSocketAddress address = tracker.getTaskTrackerReportAddress();
    vargs.add(address.getAddress().getHostAddress());
    vargs.add(Integer.toString(address.getPort()));
    vargs.add(taskid.toString()); // pass task identifier
    // pass task log location
    vargs.add(TaskLog.getAttemptDir(taskid, t.isTaskCleanupTask()).toString());
    return vargs;
  }
예제 #9
0
  @Test(timeout = 30000)
  public void testCreateJarWithClassPath() throws Exception {
    // setup test directory for files
    Assert.assertFalse(tmp.exists());
    Assert.assertTrue(tmp.mkdirs());

    // create files expected to match a wildcard
    List<File> wildcardMatches =
        Arrays.asList(
            new File(tmp, "wildcard1.jar"),
            new File(tmp, "wildcard2.jar"),
            new File(tmp, "wildcard3.JAR"),
            new File(tmp, "wildcard4.JAR"));
    for (File wildcardMatch : wildcardMatches) {
      Assert.assertTrue("failure creating file: " + wildcardMatch, wildcardMatch.createNewFile());
    }

    // create non-jar files, which we expect to not be included in the classpath
    Assert.assertTrue(new File(tmp, "text.txt").createNewFile());
    Assert.assertTrue(new File(tmp, "executable.exe").createNewFile());
    Assert.assertTrue(new File(tmp, "README").createNewFile());

    // create classpath jar
    String wildcardPath = tmp.getCanonicalPath() + File.separator + "*";
    String nonExistentSubdir = tmp.getCanonicalPath() + Path.SEPARATOR + "subdir" + Path.SEPARATOR;
    List<String> classPaths =
        Arrays.asList("", "cp1.jar", "cp2.jar", wildcardPath, "cp3.jar", nonExistentSubdir);
    String inputClassPath = StringUtils.join(File.pathSeparator, classPaths);
    String[] jarCp =
        FileUtil.createJarWithClassPath(
            inputClassPath + File.pathSeparator + "unexpandedwildcard/*",
            new Path(tmp.getCanonicalPath()),
            System.getenv());
    String classPathJar = jarCp[0];
    assertNotEquals(
        "Unexpanded wildcard was not placed in extra classpath",
        jarCp[1].indexOf("unexpanded"),
        -1);

    // verify classpath by reading manifest from jar file
    JarFile jarFile = null;
    try {
      jarFile = new JarFile(classPathJar);
      Manifest jarManifest = jarFile.getManifest();
      Assert.assertNotNull(jarManifest);
      Attributes mainAttributes = jarManifest.getMainAttributes();
      Assert.assertNotNull(mainAttributes);
      Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
      String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH);
      Assert.assertNotNull(classPathAttr);
      List<String> expectedClassPaths = new ArrayList<String>();
      for (String classPath : classPaths) {
        if (classPath.length() == 0) {
          continue;
        }
        if (wildcardPath.equals(classPath)) {
          // add wildcard matches
          for (File wildcardMatch : wildcardMatches) {
            expectedClassPaths.add(wildcardMatch.toURI().toURL().toExternalForm());
          }
        } else {
          File fileCp = null;
          if (!new Path(classPath).isAbsolute()) {
            fileCp = new File(tmp, classPath);
          } else {
            fileCp = new File(classPath);
          }
          if (nonExistentSubdir.equals(classPath)) {
            // expect to maintain trailing path separator if present in input, even
            // if directory doesn't exist yet
            expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm() + Path.SEPARATOR);
          } else {
            expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm());
          }
        }
      }
      List<String> actualClassPaths = Arrays.asList(classPathAttr.split(" "));
      Collections.sort(expectedClassPaths);
      Collections.sort(actualClassPaths);
      Assert.assertEquals(expectedClassPaths, actualClassPaths);
    } finally {
      if (jarFile != null) {
        try {
          jarFile.close();
        } catch (IOException e) {
          LOG.warn("exception closing jarFile: " + classPathJar, e);
        }
      }
    }
  }
예제 #10
0
@Public
@Evolving
public class YarnConfiguration extends Configuration {

  private static final Joiner JOINER = Joiner.on("");

  private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml";
  private static final String YARN_SITE_XML_FILE = "yarn-site.xml";

  static {
    Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE);
    Configuration.addDefaultResource(YARN_SITE_XML_FILE);
  }

  // Configurations

  public static final String YARN_PREFIX = "yarn.";

  /** Delay before deleting resource to ease debugging of NM issues */
  public static final String DEBUG_NM_DELETE_DELAY_SEC =
      YarnConfiguration.NM_PREFIX + "delete.debug-delay-sec";

  ////////////////////////////////
  // IPC Configs
  ////////////////////////////////
  public static final String IPC_PREFIX = YARN_PREFIX + "ipc.";

  /** Factory to create client IPC classes. */
  public static final String IPC_CLIENT_FACTORY_CLASS = IPC_PREFIX + "client.factory.class";

  public static final String DEFAULT_IPC_CLIENT_FACTORY_CLASS =
      "org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl";

  /** Factory to create server IPC classes. */
  public static final String IPC_SERVER_FACTORY_CLASS = IPC_PREFIX + "server.factory.class";

  public static final String DEFAULT_IPC_SERVER_FACTORY_CLASS =
      "org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl";

  /** Factory to create serializeable records. */
  public static final String IPC_RECORD_FACTORY_CLASS = IPC_PREFIX + "record.factory.class";

  public static final String DEFAULT_IPC_RECORD_FACTORY_CLASS =
      "org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl";

  /** RPC class implementation */
  public static final String IPC_RPC_IMPL = IPC_PREFIX + "rpc.class";

  public static final String DEFAULT_IPC_RPC_IMPL = "org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC";

  ////////////////////////////////
  // Resource Manager Configs
  ////////////////////////////////
  public static final String RM_PREFIX = "yarn.resourcemanager.";

  /** The address of the applications manager interface in the RM. */
  public static final String RM_ADDRESS = RM_PREFIX + "address";

  public static final int DEFAULT_RM_PORT = 8032;
  public static final String DEFAULT_RM_ADDRESS = "0.0.0.0:" + DEFAULT_RM_PORT;

  /** The number of threads used to handle applications manager requests. */
  public static final String RM_CLIENT_THREAD_COUNT = RM_PREFIX + "client.thread-count";

  public static final int DEFAULT_RM_CLIENT_THREAD_COUNT = 50;

  /** The Kerberos principal for the resource manager. */
  public static final String RM_PRINCIPAL = RM_PREFIX + "principal";

  /** The address of the scheduler interface. */
  public static final String RM_SCHEDULER_ADDRESS = RM_PREFIX + "scheduler.address";

  public static final int DEFAULT_RM_SCHEDULER_PORT = 8030;
  public static final String DEFAULT_RM_SCHEDULER_ADDRESS = "0.0.0.0:" + DEFAULT_RM_SCHEDULER_PORT;

  /** Miniumum request grant-able by the RM scheduler. */
  public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB =
      YARN_PREFIX + "scheduler.minimum-allocation-mb";

  public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024;
  public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES =
      YARN_PREFIX + "scheduler.minimum-allocation-vcores";
  public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = 1;

  // add by lism
  public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_GPU_CORES =
      YARN_PREFIX + "scheduler.minimum-allocation-gpu-cores";
  public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_GPU_CORES = 1;
  // end add

  /** Maximum request grant-able by the RM scheduler. */
  public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_MB =
      YARN_PREFIX + "scheduler.maximum-allocation-mb";

  public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = 8192;
  public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES =
      YARN_PREFIX + "scheduler.maximum-allocation-vcores";
  public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = 4;

  // add by lism
  public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_GPU_CORES =
      YARN_PREFIX + "scheduler.maximum-allocation-gpu-cores";
  public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_GPU_CORES = 4;
  // end add

  /** Number of threads to handle scheduler interface. */
  public static final String RM_SCHEDULER_CLIENT_THREAD_COUNT =
      RM_PREFIX + "scheduler.client.thread-count";

  public static final int DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT = 50;

  /**
   * Enable periodic monitor threads.
   *
   * @see #RM_SCHEDULER_MONITOR_POLICIES
   */
  public static final String RM_SCHEDULER_ENABLE_MONITORS = RM_PREFIX + "scheduler.monitor.enable";

  public static final boolean DEFAULT_RM_SCHEDULER_ENABLE_MONITORS = false;

  /** List of SchedulingEditPolicy classes affecting the scheduler. */
  public static final String RM_SCHEDULER_MONITOR_POLICIES =
      RM_PREFIX + "scheduler.monitor.policies";

  /** The address of the RM web application. */
  public static final String RM_WEBAPP_ADDRESS = RM_PREFIX + "webapp.address";

  public static final int DEFAULT_RM_WEBAPP_PORT = 8088;
  public static final String DEFAULT_RM_WEBAPP_ADDRESS = "0.0.0.0:" + DEFAULT_RM_WEBAPP_PORT;

  public static final String RM_RESOURCE_TRACKER_ADDRESS = RM_PREFIX + "resource-tracker.address";
  public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031;
  public static final String DEFAULT_RM_RESOURCE_TRACKER_ADDRESS =
      "0.0.0.0:" + DEFAULT_RM_RESOURCE_TRACKER_PORT;

  /** The expiry interval for application master reporting. */
  public static final String RM_AM_EXPIRY_INTERVAL_MS =
      YARN_PREFIX + "am.liveness-monitor.expiry-interval-ms";

  public static final int DEFAULT_RM_AM_EXPIRY_INTERVAL_MS = 600000;

  /** How long to wait until a node manager is considered dead. */
  public static final String RM_NM_EXPIRY_INTERVAL_MS =
      YARN_PREFIX + "nm.liveness-monitor.expiry-interval-ms";

  public static final int DEFAULT_RM_NM_EXPIRY_INTERVAL_MS = 600000;

  /** Are acls enabled. */
  public static final String YARN_ACL_ENABLE = YARN_PREFIX + "acl.enable";

  public static final boolean DEFAULT_YARN_ACL_ENABLE = false;

  /** ACL of who can be admin of YARN cluster. */
  public static final String YARN_ADMIN_ACL = YARN_PREFIX + "admin.acl";

  public static final String DEFAULT_YARN_ADMIN_ACL = "*";

  /** ACL used in case none is found. Allows nothing. */
  public static final String DEFAULT_YARN_APP_ACL = " ";

  /** The address of the RM admin interface. */
  public static final String RM_ADMIN_ADDRESS = RM_PREFIX + "admin.address";

  public static final int DEFAULT_RM_ADMIN_PORT = 8033;
  public static final String DEFAULT_RM_ADMIN_ADDRESS = "0.0.0.0:" + DEFAULT_RM_ADMIN_PORT;

  /** Number of threads used to handle RM admin interface. */
  public static final String RM_ADMIN_CLIENT_THREAD_COUNT = RM_PREFIX + "admin.client.thread-count";

  public static final int DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT = 1;

  /**
   * The maximum number of application attempts. It's a global setting for all application masters.
   */
  public static final String RM_AM_MAX_ATTEMPTS = RM_PREFIX + "am.max-attempts";

  public static final int DEFAULT_RM_AM_MAX_ATTEMPTS = 2;

  /** The keytab for the resource manager. */
  public static final String RM_KEYTAB = RM_PREFIX + "keytab";

  /** How long to wait until a container is considered dead. */
  public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS =
      RM_PREFIX + "rm.container-allocation.expiry-interval-ms";

  public static final int DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = 600000;

  /** Path to file with nodes to include. */
  public static final String RM_NODES_INCLUDE_FILE_PATH = RM_PREFIX + "nodes.include-path";

  public static final String DEFAULT_RM_NODES_INCLUDE_FILE_PATH = "";

  /** Path to file with nodes to exclude. */
  public static final String RM_NODES_EXCLUDE_FILE_PATH = RM_PREFIX + "nodes.exclude-path";

  public static final String DEFAULT_RM_NODES_EXCLUDE_FILE_PATH = "";

  /** Number of threads to handle resource tracker calls. */
  public static final String RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT =
      RM_PREFIX + "resource-tracker.client.thread-count";

  public static final int DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT = 50;

  /** The class to use as the resource scheduler. */
  public static final String RM_SCHEDULER = RM_PREFIX + "scheduler.class";

  public static final String DEFAULT_RM_SCHEDULER =
      "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler";

  /** RM set next Heartbeat interval for NM */
  public static final String RM_NM_HEARTBEAT_INTERVAL_MS =
      RM_PREFIX + "nodemanagers.heartbeat-interval-ms";

  public static final long DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS = 1000;

  // Delegation token related keys
  public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY =
      RM_PREFIX + "delegation.key.update-interval";
  public static final long DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60 * 1000; // 1 day
  public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY =
      RM_PREFIX + "delegation.token.renew-interval";
  public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT = 24 * 60 * 60 * 1000; // 1 day
  public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY =
      RM_PREFIX + "delegation.token.max-lifetime";
  public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT =
      7 * 24 * 60 * 60 * 1000; // 7 days

  public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled";
  public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false;

  /** The class to use as the persistent store. */
  public static final String RM_STORE = RM_PREFIX + "store.class";

  /** URI for FileSystemRMStateStore */
  public static final String FS_RM_STATE_STORE_URI = RM_PREFIX + "fs.state-store.uri";

  /** The maximum number of completed applications RM keeps. */
  public static final String RM_MAX_COMPLETED_APPLICATIONS =
      RM_PREFIX + "max-completed-applications";

  public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 10000;

  /** Default application name */
  public static final String DEFAULT_APPLICATION_NAME = "N/A";

  /** Default application type */
  public static final String DEFAULT_APPLICATION_TYPE = "YARN";

  /** Default application type length */
  public static final int APPLICATION_TYPE_LENGTH = 20;

  /** Default queue name */
  public static final String DEFAULT_QUEUE_NAME = "default";

  /** Buckets (in minutes) for the number of apps running in each queue. */
  public static final String RM_METRICS_RUNTIME_BUCKETS = RM_PREFIX + "metrics.runtime.buckets";

  /** Default sizes of the runtime metric buckets in minutes. */
  public static final String DEFAULT_RM_METRICS_RUNTIME_BUCKETS = "60,300,1440";

  public static final String RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS =
      RM_PREFIX + "am-rm-tokens.master-key-rolling-interval-secs";

  public static final long DEFAULT_RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = 24 * 60 * 60;

  public static final String RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS =
      RM_PREFIX + "container-tokens.master-key-rolling-interval-secs";

  public static final long DEFAULT_RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS =
      24 * 60 * 60;

  public static final String RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS =
      RM_PREFIX + "nm-tokens.master-key-rolling-interval-secs";

  public static final long DEFAULT_RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = 24 * 60 * 60;
  ////////////////////////////////
  // Node Manager Configs
  ////////////////////////////////

  /** Prefix for all node manager configs. */
  public static final String NM_PREFIX = "yarn.nodemanager.";

  /** Environment variables that will be sent to containers. */
  public static final String NM_ADMIN_USER_ENV = NM_PREFIX + "admin-env";

  public static final String DEFAULT_NM_ADMIN_USER_ENV = "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX";

  /** Environment variables that containers may override rather than use NodeManager's default. */
  public static final String NM_ENV_WHITELIST = NM_PREFIX + "env-whitelist";

  public static final String DEFAULT_NM_ENV_WHITELIST =
      StringUtils.join(
          ",",
          Arrays.asList(
              ApplicationConstants.Environment.JAVA_HOME.key(),
              ApplicationConstants.Environment.HADOOP_COMMON_HOME.key(),
              ApplicationConstants.Environment.HADOOP_HDFS_HOME.key(),
              ApplicationConstants.Environment.HADOOP_CONF_DIR.key(),
              ApplicationConstants.Environment.HADOOP_YARN_HOME.key()));

  /** address of node manager IPC. */
  public static final String NM_ADDRESS = NM_PREFIX + "address";

  public static final int DEFAULT_NM_PORT = 0;
  public static final String DEFAULT_NM_ADDRESS = "0.0.0.0:" + DEFAULT_NM_PORT;

  /** who will execute(launch) the containers. */
  public static final String NM_CONTAINER_EXECUTOR = NM_PREFIX + "container-executor.class";

  /**
   * Adjustment to make to the container os scheduling priority. The valid values for this could
   * vary depending on the platform. On Linux, higher values mean run the containers at a less
   * favorable priority than the NM. The value specified is an int.
   */
  public static final String NM_CONTAINER_EXECUTOR_SCHED_PRIORITY =
      NM_PREFIX + "container-executor.os.sched.priority.adjustment";

  public static final int DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY = 0;

  /** Number of threads container manager uses. */
  public static final String NM_CONTAINER_MGR_THREAD_COUNT =
      NM_PREFIX + "container-manager.thread-count";

  public static final int DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT = 20;

  /** Number of threads used in cleanup. */
  public static final String NM_DELETE_THREAD_COUNT = NM_PREFIX + "delete.thread-count";

  public static final int DEFAULT_NM_DELETE_THREAD_COUNT = 4;

  /** Keytab for NM. */
  public static final String NM_KEYTAB = NM_PREFIX + "keytab";

  /** List of directories to store localized files in. */
  public static final String NM_LOCAL_DIRS = NM_PREFIX + "local-dirs";

  public static final String DEFAULT_NM_LOCAL_DIRS = "/tmp/nm-local-dir";

  /** Number of files in each localized directories Avoid tuning this too low. */
  public static final String NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY =
      NM_PREFIX + "local-cache.max-files-per-directory";

  public static final int DEFAULT_NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY = 8192;

  /** Address where the localizer IPC is. */
  public static final String NM_LOCALIZER_ADDRESS = NM_PREFIX + "localizer.address";

  public static final int DEFAULT_NM_LOCALIZER_PORT = 8040;
  public static final String DEFAULT_NM_LOCALIZER_ADDRESS = "0.0.0.0:" + DEFAULT_NM_LOCALIZER_PORT;

  /** Interval in between cache cleanups. */
  public static final String NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS =
      NM_PREFIX + "localizer.cache.cleanup.interval-ms";

  public static final long DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS = 10 * 60 * 1000;

  /** Target size of localizer cache in MB, per local directory. */
  public static final String NM_LOCALIZER_CACHE_TARGET_SIZE_MB =
      NM_PREFIX + "localizer.cache.target-size-mb";

  public static final long DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB = 10 * 1024;

  /** Number of threads to handle localization requests. */
  public static final String NM_LOCALIZER_CLIENT_THREAD_COUNT =
      NM_PREFIX + "localizer.client.thread-count";

  public static final int DEFAULT_NM_LOCALIZER_CLIENT_THREAD_COUNT = 5;

  /** Number of threads to use for localization fetching. */
  public static final String NM_LOCALIZER_FETCH_THREAD_COUNT =
      NM_PREFIX + "localizer.fetch.thread-count";

  public static final int DEFAULT_NM_LOCALIZER_FETCH_THREAD_COUNT = 4;

  /** Where to store container logs. */
  public static final String NM_LOG_DIRS = NM_PREFIX + "log-dirs";

  public static final String DEFAULT_NM_LOG_DIRS = "/tmp/logs";

  /** Interval at which the delayed token removal thread runs */
  public static final String RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS =
      RM_PREFIX + "delayed.delegation-token.removal-interval-ms";

  public static final long DEFAULT_RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = 30000l;

  /** Whether to enable log aggregation */
  public static final String LOG_AGGREGATION_ENABLED = YARN_PREFIX + "log-aggregation-enable";

  public static final boolean DEFAULT_LOG_AGGREGATION_ENABLED = false;

  /**
   * How long to wait before deleting aggregated logs, -1 disables. Be careful set this too small
   * and you will spam the name node.
   */
  public static final String LOG_AGGREGATION_RETAIN_SECONDS =
      YARN_PREFIX + "log-aggregation.retain-seconds";

  public static final long DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS = -1;

  /**
   * How long to wait between aggregated log retention checks. If set to a value <= 0 then the value
   * is computed as one-tenth of the log retention setting. Be careful set this too small and you
   * will spam the name node.
   */
  public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS =
      YARN_PREFIX + "log-aggregation.retain-check-interval-seconds";

  public static final long DEFAULT_LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = -1;

  /**
   * Number of seconds to retain logs on the NodeManager. Only applicable if Log aggregation is
   * disabled
   */
  public static final String NM_LOG_RETAIN_SECONDS = NM_PREFIX + "log.retain-seconds";

  /** Number of threads used in log cleanup. Only applicable if Log aggregation is disabled */
  public static final String NM_LOG_DELETION_THREADS_COUNT =
      NM_PREFIX + "log.deletion-threads-count";

  public static final int DEFAULT_NM_LOG_DELETE_THREAD_COUNT = 4;

  /** Where to aggregate logs to. */
  public static final String NM_REMOTE_APP_LOG_DIR = NM_PREFIX + "remote-app-log-dir";

  public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR = "/tmp/logs";

  /**
   * The remote log dir will be created at
   * NM_REMOTE_APP_LOG_DIR/${user}/NM_REMOTE_APP_LOG_DIR_SUFFIX/${appId}
   */
  public static final String NM_REMOTE_APP_LOG_DIR_SUFFIX = NM_PREFIX + "remote-app-log-dir-suffix";

  public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX = "logs";

  public static final String YARN_LOG_SERVER_URL = YARN_PREFIX + "log.server.url";

  public static final String YARN_TRACKING_URL_GENERATOR = YARN_PREFIX + "tracking.url.generator";

  /** Amount of memory in GB that can be allocated for containers. */
  public static final String NM_PMEM_MB = NM_PREFIX + "resource.memory-mb";

  public static final int DEFAULT_NM_PMEM_MB = 8 * 1024;

  /** Specifies whether physical memory check is enabled. */
  public static final String NM_PMEM_CHECK_ENABLED = NM_PREFIX + "pmem-check-enabled";

  public static final boolean DEFAULT_NM_PMEM_CHECK_ENABLED = true;

  /** Specifies whether physical memory check is enabled. */
  public static final String NM_VMEM_CHECK_ENABLED = NM_PREFIX + "vmem-check-enabled";

  public static final boolean DEFAULT_NM_VMEM_CHECK_ENABLED = true;

  /** Conversion ratio for physical memory to virtual memory. */
  public static final String NM_VMEM_PMEM_RATIO = NM_PREFIX + "vmem-pmem-ratio";

  public static final float DEFAULT_NM_VMEM_PMEM_RATIO = 2.1f;

  /** Number of Virtual CPU Cores which can be allocated for containers. */
  public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores";

  public static final int DEFAULT_NM_VCORES = 8;

  // add by lism
  public static final String NM_GPU_CORES = NM_PREFIX + "resource.gpu-cores";
  public static final int DEFAULT_NM_GPU_CORES = 0;
  // end lism

  /** NM Webapp address.* */
  public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address";

  public static final int DEFAULT_NM_WEBAPP_PORT = 8042;
  public static final String DEFAULT_NM_WEBAPP_ADDRESS = "0.0.0.0:" + DEFAULT_NM_WEBAPP_PORT;

  /** How often to monitor containers. */
  public static final String NM_CONTAINER_MON_INTERVAL_MS =
      NM_PREFIX + "container-monitor.interval-ms";

  public static final int DEFAULT_NM_CONTAINER_MON_INTERVAL_MS = 3000;

  /** Class that calculates containers current resource utilization. */
  public static final String NM_CONTAINER_MON_RESOURCE_CALCULATOR =
      NM_PREFIX + "container-monitor.resource-calculator.class";
  /** Class that calculates process tree resource utilization. */
  public static final String NM_CONTAINER_MON_PROCESS_TREE =
      NM_PREFIX + "container-monitor.process-tree.class";

  /**
   * Enable/Disable disks' health checker. Default is true. An expert level configuration property.
   */
  public static final String NM_DISK_HEALTH_CHECK_ENABLE = NM_PREFIX + "disk-health-checker.enable";
  /** Frequency of running disks' health checker. */
  public static final String NM_DISK_HEALTH_CHECK_INTERVAL_MS =
      NM_PREFIX + "disk-health-checker.interval-ms";
  /** By default, disks' health is checked every 2 minutes. */
  public static final long DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS = 2 * 60 * 1000;

  /**
   * The minimum fraction of number of disks to be healthy for the nodemanager to launch new
   * containers. This applies to nm-local-dirs and nm-log-dirs.
   */
  public static final String NM_MIN_HEALTHY_DISKS_FRACTION =
      NM_PREFIX + "disk-health-checker.min-healthy-disks";
  /**
   * By default, at least 5% of disks are to be healthy to say that the node is healthy in terms of
   * disks.
   */
  public static final float DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION = 0.25F;

  /** Frequency of running node health script. */
  public static final String NM_HEALTH_CHECK_INTERVAL_MS = NM_PREFIX + "health-checker.interval-ms";

  public static final long DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS = 10 * 60 * 1000;

  /** Health check script time out period. */
  public static final String NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS =
      NM_PREFIX + "health-checker.script.timeout-ms";

  public static final long DEFAULT_NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS =
      2 * DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS;

  /** The health check script to run. */
  public static final String NM_HEALTH_CHECK_SCRIPT_PATH = NM_PREFIX + "health-checker.script.path";

  /** The arguments to pass to the health check script. */
  public static final String NM_HEALTH_CHECK_SCRIPT_OPTS = NM_PREFIX + "health-checker.script.opts";

  /** The path to the Linux container executor. */
  public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH =
      NM_PREFIX + "linux-container-executor.path";

  /**
   * The UNIX group that the linux-container-executor should run as. This is intended to be set as
   * part of container-executor.cfg.
   */
  public static final String NM_LINUX_CONTAINER_GROUP =
      NM_PREFIX + "linux-container-executor.group";

  /** The type of resource enforcement to use with the linux container executor. */
  public static final String NM_LINUX_CONTAINER_RESOURCES_HANDLER =
      NM_PREFIX + "linux-container-executor.resources-handler.class";

  /** The path the linux container executor should use for cgroups */
  public static final String NM_LINUX_CONTAINER_CGROUPS_HIERARCHY =
      NM_PREFIX + "linux-container-executor.cgroups.hierarchy";

  /** Whether the linux container executor should mount cgroups if not found */
  public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT =
      NM_PREFIX + "linux-container-executor.cgroups.mount";

  /** Where the linux container executor should mount cgroups if not found */
  public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH =
      NM_PREFIX + "linux-container-executor.cgroups.mount-path";

  /** T-file compression types used to compress aggregated logs. */
  public static final String NM_LOG_AGG_COMPRESSION_TYPE =
      NM_PREFIX + "log-aggregation.compression-type";

  public static final String DEFAULT_NM_LOG_AGG_COMPRESSION_TYPE = "none";

  /** The kerberos principal for the node manager. */
  public static final String NM_PRINCIPAL = NM_PREFIX + "principal";

  public static final String NM_AUX_SERVICES = NM_PREFIX + "aux-services";

  public static final String NM_AUX_SERVICE_FMT = NM_PREFIX + "aux-services.%s.class";

  public static final String NM_USER_HOME_DIR = NM_PREFIX + "user-home-dir";

  public static final String DEFAULT_NM_USER_HOME_DIR = "/home/";

  ////////////////////////////////
  // Web Proxy Configs
  ////////////////////////////////
  public static final String PROXY_PREFIX = "yarn.web-proxy.";

  /** The kerberos principal for the proxy. */
  public static final String PROXY_PRINCIPAL = PROXY_PREFIX + "principal";

  /** Keytab for Proxy. */
  public static final String PROXY_KEYTAB = PROXY_PREFIX + "keytab";

  /** The address for the web proxy. */
  public static final String PROXY_ADDRESS = PROXY_PREFIX + "address";

  /** YARN Service Level Authorization */
  public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL =
      "security.resourcetracker.protocol.acl";

  public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONCLIENT_PROTOCOL =
      "security.applicationclient.protocol.acl";
  public static final String
      YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCEMANAGER_ADMINISTRATION_PROTOCOL =
          "security.resourcemanager-administration.protocol.acl";
  public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL =
      "security.applicationmaster.protocol.acl";

  public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL =
      "security.containermanagement.protocol.acl";
  public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER =
      "security.resourcelocalizer.protocol.acl";

  /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL to a running container */
  public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS =
      NM_PREFIX + "sleep-delay-before-sigkill.ms";

  public static final long DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS = 250;

  /** Max time to wait for a process to come up when trying to cleanup container resources */
  public static final String NM_PROCESS_KILL_WAIT_MS = NM_PREFIX + "process-kill-wait.ms";

  public static final long DEFAULT_NM_PROCESS_KILL_WAIT_MS = 2000;

  /** Max time to wait to establish a connection to RM */
  public static final String RESOURCEMANAGER_CONNECT_MAX_WAIT_MS =
      RM_PREFIX + "connect.max-wait.ms";

  public static final int DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS = 15 * 60 * 1000;

  /** Time interval between each attempt to connect to RM */
  public static final String RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS =
      RM_PREFIX + "connect.retry-interval.ms";

  public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS = 30 * 1000;

  /** CLASSPATH for YARN applications. A comma-separated list of CLASSPATH entries */
  public static final String YARN_APPLICATION_CLASSPATH = YARN_PREFIX + "application.classpath";

  /** Default CLASSPATH for YARN applications. A comma-separated list of CLASSPATH entries */
  public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {
    ApplicationConstants.Environment.HADOOP_CONF_DIR.$(),
    ApplicationConstants.Environment.HADOOP_COMMON_HOME.$() + "/share/hadoop/common/*",
    ApplicationConstants.Environment.HADOOP_COMMON_HOME.$() + "/share/hadoop/common/lib/*",
    ApplicationConstants.Environment.HADOOP_HDFS_HOME.$() + "/share/hadoop/hdfs/*",
    ApplicationConstants.Environment.HADOOP_HDFS_HOME.$() + "/share/hadoop/hdfs/lib/*",
    ApplicationConstants.Environment.HADOOP_YARN_HOME.$() + "/share/hadoop/yarn/*",
    ApplicationConstants.Environment.HADOOP_YARN_HOME.$() + "/share/hadoop/yarn/lib/*"
  };

  /** Container temp directory */
  public static final String DEFAULT_CONTAINER_TEMP_DIR = "./tmp";

  public static final String IS_MINI_YARN_CLUSTER = YARN_PREFIX + "is.minicluster";

  /** Whether to use fixed ports with the minicluster. */
  public static final String YARN_MINICLUSTER_FIXED_PORTS = YARN_PREFIX + "minicluster.fixed.ports";

  /** Default is false to be able to run tests concurrently without port conflicts. */
  public static boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false;

  /**
   * Whether users are explicitly trying to control resource monitoring configuration for the
   * MiniYARNCluster. Disabled by default.
   */
  public static final String YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING =
      YARN_PREFIX + "minicluster.control-resource-monitoring";

  public static final boolean DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING = false;

  /** The log directory for the containers */
  public static final String YARN_APP_CONTAINER_LOG_DIR = YARN_PREFIX + "app.container.log.dir";

  public static final String YARN_APP_CONTAINER_LOG_SIZE =
      YARN_PREFIX + "app.container.log.filesize";

  ////////////////////////////////
  // Other Configs
  ////////////////////////////////

  /**
   * The interval of the yarn client's querying application state after application submission. The
   * unit is millisecond.
   */
  public static final String YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS =
      YARN_PREFIX + "client.app-submission.poll-interval";

  public static final long DEFAULT_YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS = 1000;

  /** Max number of threads in NMClientAsync to process container management events */
  public static final String NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE =
      YARN_PREFIX + "client.nodemanager-client-async.thread-pool-max-size";

  public static final int DEFAULT_NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE = 500;

  /**
   * Maximum number of proxy connections for node manager. It should always be more than 1. NMClient
   * and MRAppMaster will use this to cache connection with node manager. There will be at max one
   * connection per node manager. Ex. configuring it to a value of 5 will make sure that client will
   * at max have 5 connections cached with 5 different node managers. These connections will be
   * timed out if idle for more than system wide idle timeout period. The token if used for
   * authentication then it will be used only at connection creation time. If new token is received
   * then earlier connection should be closed in order to use newer token. Note: {@link
   * YarnConfiguration#NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE} are related to each other.
   */
  public static final String NM_CLIENT_MAX_NM_PROXIES =
      YARN_PREFIX + "client.max-nodemanagers-proxies";

  public static final int DEFAULT_NM_CLIENT_MAX_NM_PROXIES = 500;

  public YarnConfiguration() {
    super();
  }

  public YarnConfiguration(Configuration conf) {
    super(conf);
    if (!(conf instanceof YarnConfiguration)) {
      this.reloadConfiguration();
    }
  }

  public static String getProxyHostAndPort(Configuration conf) {
    String addr = conf.get(PROXY_ADDRESS);
    if (addr == null || addr.isEmpty()) {
      addr = getRMWebAppHostAndPort(conf);
    }
    return addr;
  }

  public static String getRMWebAppHostAndPort(Configuration conf) {
    InetSocketAddress address =
        conf.getSocketAddr(
            YarnConfiguration.RM_WEBAPP_ADDRESS,
            YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS,
            YarnConfiguration.DEFAULT_RM_WEBAPP_PORT);
    address = NetUtils.getConnectAddress(address);
    StringBuffer sb = new StringBuffer();
    InetAddress resolved = address.getAddress();
    if (resolved == null || resolved.isAnyLocalAddress() || resolved.isLoopbackAddress()) {
      String lh = address.getHostName();
      try {
        lh = InetAddress.getLocalHost().getCanonicalHostName();
      } catch (UnknownHostException e) {
        // Ignore and fallback.
      }
      sb.append(lh);
    } else {
      sb.append(address.getHostName());
    }
    sb.append(":").append(address.getPort());
    return sb.toString();
  }

  public static String getRMWebAppURL(Configuration conf) {
    return JOINER.join("http://", getRMWebAppHostAndPort(conf));
  }
}