void launchWorkerNode(Container container) throws IOException, YarnException { Map<String, String> env = System.getenv(); ContainerLaunchContext workerContext = Records.newRecord(ContainerLaunchContext.class); LocalResource workerJar = Records.newRecord(LocalResource.class); setupWorkerJar(workerJar); workerContext.setLocalResources(Collections.singletonMap("socialite.jar", workerJar)); System.out.println("[Master] workerJar:" + workerJar); Map<String, String> workerEnv = new HashMap<String, String>(env); setupWorkerEnv(workerEnv); workerContext.setEnvironment(workerEnv); String opts = "-Dsocialite.master=" + NetUtils.getHostname().split("/")[1] + " "; opts += "-Dsocialite.worker.num_threads=" + ClusterConf.get().getNumWorkerThreads() + " "; workerContext.setCommands( Collections.singletonList( "$JAVA_HOME/bin/java -ea " + env.get("JVM_OPTS") + " " + env.get("SOCIALITE_OPTS") + " " + opts + " " + "socialite.dist.worker.WorkerNode" + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")); nmClient.startContainer(container, workerContext); L.info("Launched worker node (container:" + container.getId() + ")"); }
private ContainerLaunchContext newContainerLaunchContext( Container container, String helixInstanceName) throws IOException { Path appWorkDir = GobblinClusterUtils.getAppWorkDirPath(this.fs, this.applicationName, this.applicationId); Path containerWorkDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.CONTAINER_WORK_DIR_NAME); Map<String, LocalResource> resourceMap = Maps.newHashMap(); addContainerLocalResources( new Path(appWorkDir, GobblinYarnConfigurationKeys.LIB_JARS_DIR_NAME), resourceMap); addContainerLocalResources( new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_JARS_DIR_NAME), resourceMap); addContainerLocalResources( new Path(containerWorkDir, GobblinYarnConfigurationKeys.APP_FILES_DIR_NAME), resourceMap); if (this.config.hasPath(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY)) { addRemoteAppFiles( this.config.getString(GobblinYarnConfigurationKeys.CONTAINER_FILES_REMOTE_KEY), resourceMap); } ContainerLaunchContext containerLaunchContext = Records.newRecord(ContainerLaunchContext.class); containerLaunchContext.setLocalResources(resourceMap); containerLaunchContext.setEnvironment( YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration)); containerLaunchContext.setCommands( Lists.newArrayList(buildContainerCommand(container, helixInstanceName))); if (UserGroupInformation.isSecurityEnabled()) { containerLaunchContext.setTokens(this.tokens.duplicate()); } return containerLaunchContext; }
public void submitApplication( ApplicationId appId, String appName, Map<String, String> env, Map<String, LocalResource> localResources, List<String> commands, int memory) throws URISyntaxException, IOException { if (clientResourceManager == null) throw new IllegalStateException( "Cannot submit an application without connecting to resource manager!"); ApplicationSubmissionContext appCtx = Records.newRecord(ApplicationSubmissionContext.class); appCtx.setApplicationId(appId); appCtx.setApplicationName(appName); appCtx.setQueue("default"); appCtx.setUser(UserGroupInformation.getCurrentUser().getShortUserName()); // System.out.println( "Based on my current user I am: " + // UserGroupInformation.getCurrentUser().getShortUserName() ); Priority prio = Records.newRecord(Priority.class); prio.setPriority(0); appCtx.setPriority(prio); /* for (Map.Entry<String, LocalResource> entry : localResources.entrySet()) { System.out.println("IR:RM: " + entry.getKey() + "/" + entry.getValue().getResource()); } */ /* for (Map.Entry<String, String> entry : env.entrySet()) { System.out.println("IR:ResourceManager -> Env vars: " + entry.getKey() + "/" + entry.getValue() ); } */ // Launch ctx ContainerLaunchContext containerCtx = Records.newRecord(ContainerLaunchContext.class); containerCtx.setLocalResources(localResources); containerCtx.setCommands(commands); containerCtx.setEnvironment(env); containerCtx.setUser(UserGroupInformation.getCurrentUser().getShortUserName()); Resource capability = Records.newRecord(Resource.class); capability.setMemory(memory); containerCtx.setResource(capability); appCtx.setAMContainerSpec(containerCtx); SubmitApplicationRequest submitReq = Records.newRecord(SubmitApplicationRequest.class); submitReq.setApplicationSubmissionContext(appCtx); LOG.info("Submitting application to ASM"); clientResourceManager.submitApplication(submitReq); // Don't return anything, ASM#submit returns an empty response }
public void launchSupervisorOnContainer(Container container) throws IOException { LOG.info("Connecting to ContainerManager for containerid=" + container.getId()); String cmIpPortStr = container.getNodeId().getHost() + ":" + container.getNodeId().getPort(); InetSocketAddress cmAddress = NetUtils.createSocketAddr(cmIpPortStr); LOG.info("Connecting to ContainerManager at " + cmIpPortStr); ContainerManager proxy = ((ContainerManager) rpc.getProxy(ContainerManager.class, cmAddress, hadoopConf)); LOG.info("launchSupervisorOnContainer( id:" + container.getId() + " )"); ContainerLaunchContext launchContext = Records.newRecord(ContainerLaunchContext.class); launchContext.setContainerId(container.getId()); launchContext.setResource(container.getResource()); try { launchContext.setUser(UserGroupInformation.getCurrentUser().getShortUserName()); } catch (IOException e) { LOG.info( "Getting current user info failed when trying to launch the container" + e.getMessage()); } Map<String, String> env = new HashMap<String, String>(); launchContext.setEnvironment(env); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); String stormVersion = Util.getStormVersion(this.storm_conf); Path zip = new Path("/lib/storm/" + stormVersion + "/storm.zip"); FileSystem fs = FileSystem.get(this.hadoopConf); localResources.put( "storm", Util.newYarnAppResource( fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PUBLIC)); String appHome = Util.getApplicationHomeForId(this.appAttemptId.toString()); Path dirDst = Util.createConfigurationFileInFs(fs, appHome, this.storm_conf, this.hadoopConf); localResources.put("conf", Util.newYarnAppResource(fs, dirDst)); launchContext.setLocalResources(localResources); List<String> supervisorArgs = Util.buildSupervisorCommands(this.storm_conf); launchContext.setCommands(supervisorArgs); StartContainerRequest startRequest = Records.newRecord(StartContainerRequest.class); startRequest.setContainerLaunchContext(launchContext); LOG.info( "launchSupervisorOnContainer: startRequest prepared, calling startContainer. " + startRequest); try { StartContainerResponse response = proxy.startContainer(startRequest); LOG.info("Got a start container response " + response); } catch (Exception e) { LOG.error("Caught an exception while trying to start a container", e); System.exit(-1); } }
public static ContainerLaunchContext newContainerLaunchContext( Map<String, LocalResource> localResources, Map<String, String> environment, List<String> commands, Map<String, ByteBuffer> serviceData, ByteBuffer tokens, Map<ApplicationAccessType, String> acls) { ContainerLaunchContext container = recordFactory.newRecordInstance(ContainerLaunchContext.class); container.setLocalResources(localResources); container.setEnvironment(environment); container.setCommands(commands); container.setServiceData(serviceData); container.setTokens(tokens); container.setApplicationACLs(acls); return container; }
/** * Connects to CM, sets up container launch context for shell command and eventually dispatches * the container start request to the CM. */ @Override public void run() { System.out.println( "Setting up container launch container for containerid=" + container.getId()); ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class); // Set the environment StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar).append("./*"); for (String c : am.getConf() .getStrings( YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(':'); classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } if (am.getConf().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } am.getShellEnv().put("CLASSPATH", classPathEnv.toString()); // Set the environment ctx.setEnvironment(am.getShellEnv()); ctx.setLocalResources(task.buildScriptsAndSetResources(container)); // Set the necessary command to execute on the allocated container Vector<CharSequence> vargs = new Vector<>(5); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + am.getContainerMemory() + "m"); // Set class name vargs.add(HiWayConfiguration.HIWAY_WORKER_CLASS); vargs.add("--appId " + am.getAppId().toString()); vargs.add("--containerId " + container.getId().toString()); vargs.add("--workflowId " + task.getWorkflowId()); vargs.add("--taskId " + task.getTaskId()); vargs.add("--taskName " + task.getTaskName()); vargs.add("--langLabel " + task.getLanguageLabel()); vargs.add("--id " + task.getId()); for (Data inputData : task.getInputData()) { vargs.add( "--input " + inputData.getLocalPath() + "," + inputData.isInput() + "," + inputData.getContainerId()); } for (Data outputData : task.getOutputData()) { vargs.add("--output " + outputData.getLocalPath()); } if (am.isDetermineFileSizes()) { vargs.add("--size"); } String invocScript = task.getInvocScript(); if (invocScript.length() > 0) { vargs.add("--invocScript " + invocScript); } vargs.add(">> " + Invocation.STDOUT_FILENAME); vargs.add("2>> " + Invocation.STDERR_FILENAME); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } List<String> commands = new ArrayList<>(); commands.add(command.toString()); ctx.setCommands(commands); /* Set up tokens for the container. For normal shell commands, the container in distribute-shell doesn't need any tokens. We are populating them mainly * for NodeManagers to be able to download any files in the distributed file-system. The tokens are otherwise also useful in cases, for e.g., when one * is running a "hadoop dfs" command inside the distributed shell. */ ctx.setTokens(am.getAllTokens().duplicate()); containerListener.addContainer(container.getId(), container); am.getNmClientAsync().startContainerAsync(container, ctx); }
/** * Launch application for the dag represented by this client. * * @throws YarnException * @throws IOException */ public void startApplication() throws YarnException, IOException { Class<?>[] defaultClasses; if (applicationType.equals(YARN_APPLICATION_TYPE)) { // TODO restrict the security check to only check if security is enabled for webservices. if (UserGroupInformation.isSecurityEnabled()) { defaultClasses = DATATORRENT_SECURITY_CLASSES; } else { defaultClasses = DATATORRENT_CLASSES; } } else { throw new IllegalStateException(applicationType + " is not a valid application type."); } LinkedHashSet<String> localJarFiles = findJars(dag, defaultClasses); if (resources != null) { localJarFiles.addAll(resources); } YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info( "Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); // GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class); // GetClusterNodesResponse clusterNodesResp = // rmClient.clientRM.getClusterNodes(clusterNodesReq); // LOG.info("Got Cluster node info from ASM"); // for (NodeReport node : clusterNodesResp.getNodeReports()) { // LOG.info("Got node report from ASM for" // + ", nodeId=" + node.getNodeId() // + ", nodeAddress" + node.getHttpAddress() // + ", nodeRackName" + node.getRackName() // + ", nodeNumContainers" + node.getNumContainers() // + ", nodeHealthStatus" + node.getHealthReport()); // } List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info( "User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication newApp = yarnClient.createApplication(); appId = newApp.getNewApplicationResponse().getApplicationId(); // Dump out information about cluster capability as seen by the resource manager int maxMem = newApp.getNewApplicationResponse().getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); int amMemory = dag.getMasterMemoryMB(); if (amMemory > maxMem) { LOG.info( "AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } if (dag.getAttributes().get(LogicalPlan.APPLICATION_ID) == null) { dag.setAttribute(LogicalPlan.APPLICATION_ID, appId.toString()); } // Create launch context for app master LOG.info("Setting up application submission context for ASM"); ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); // set the application id appContext.setApplicationId(appId); // set the application name appContext.setApplicationName(dag.getValue(LogicalPlan.APPLICATION_NAME)); appContext.setApplicationType(this.applicationType); if (YARN_APPLICATION_TYPE.equals(this.applicationType)) { // appContext.setMaxAppAttempts(1); // no retries until Stram is HA } // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // Setup security tokens // If security is enabled get ResourceManager and NameNode delegation tokens. // Set these tokens on the container so that they are sent as part of application submission. // This also sets them up for renewal by ResourceManager. The NameNode delegation rmToken // is also used by ResourceManager to fetch the jars from HDFS and set them up for the // application master launch. if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. FileSystem fs = StramClientUtils.newFileSystemInstance(conf); try { final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } } finally { fs.close(); } addRMDelegationToken(tokenRenewer, credentials); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // copy required jar files to dfs, to be localized for containers FileSystem fs = StramClientUtils.newFileSystemInstance(conf); try { Path appsBasePath = new Path(StramClientUtils.getDTDFSRootDir(fs, conf), StramClientUtils.SUBDIR_APPS); Path appPath = new Path(appsBasePath, appId.toString()); String libJarsCsv = copyFromLocal(fs, appPath, localJarFiles.toArray(new String[] {})); LOG.info("libjars: {}", libJarsCsv); dag.getAttributes().put(LogicalPlan.LIBRARY_JARS, libJarsCsv); LaunchContainerRunnable.addFilesToLocalResources( LocalResourceType.FILE, libJarsCsv, localResources, fs); if (archives != null) { String[] localFiles = archives.split(","); String archivesCsv = copyFromLocal(fs, appPath, localFiles); LOG.info("archives: {}", archivesCsv); dag.getAttributes().put(LogicalPlan.ARCHIVES, archivesCsv); LaunchContainerRunnable.addFilesToLocalResources( LocalResourceType.ARCHIVE, archivesCsv, localResources, fs); } if (files != null) { String[] localFiles = files.split(","); String filesCsv = copyFromLocal(fs, appPath, localFiles); LOG.info("files: {}", filesCsv); dag.getAttributes().put(LogicalPlan.FILES, filesCsv); LaunchContainerRunnable.addFilesToLocalResources( LocalResourceType.FILE, filesCsv, localResources, fs); } dag.getAttributes().put(LogicalPlan.APPLICATION_PATH, appPath.toString()); if (dag.getAttributes().get(OperatorContext.STORAGE_AGENT) == null) { /* which would be the most likely case */ Path checkpointPath = new Path(appPath, LogicalPlan.SUBDIR_CHECKPOINTS); // use conf client side to pickup any proxy settings from dt-site.xml dag.setAttribute( OperatorContext.STORAGE_AGENT, new FSStorageAgent(checkpointPath.toString(), conf)); } if (dag.getAttributes().get(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR) == null) { dag.setAttribute( LogicalPlan.CONTAINER_OPTS_CONFIGURATOR, new BasicContainerOptConfigurator()); } // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(appPath, "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); localResources.put("log4j.properties", log4jRsrc); } if (originalAppId != null) { Path origAppPath = new Path(appsBasePath, this.originalAppId); LOG.info("Restart from {}", origAppPath); copyInitialState(origAppPath); } // push logical plan to DFS location Path cfgDst = new Path(appPath, LogicalPlan.SER_FILE_NAME); FSDataOutputStream outStream = fs.create(cfgDst, true); LogicalPlan.write(this.dag, outStream); outStream.close(); Path launchConfigDst = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME); outStream = fs.create(launchConfigDst, true); conf.writeXml(outStream); outStream.close(); FileStatus topologyFileStatus = fs.getFileStatus(cfgDst); LocalResource topologyRsrc = Records.newRecord(LocalResource.class); topologyRsrc.setType(LocalResourceType.FILE); topologyRsrc.setVisibility(LocalResourceVisibility.APPLICATION); topologyRsrc.setResource(ConverterUtils.getYarnUrlFromURI(cfgDst.toUri())); topologyRsrc.setTimestamp(topologyFileStatus.getModificationTime()); topologyRsrc.setSize(topologyFileStatus.getLen()); localResources.put(LogicalPlan.SER_FILE_NAME, topologyRsrc); // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed // amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // Add application jar(s) location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar(s) // including ${CLASSPATH} will duplicate the class path in app master, removing it for now // StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*"); StringBuilder classPathEnv = new StringBuilder("./*"); String classpath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH); for (String c : StringUtils.isBlank(classpath) ? YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH : classpath.split(",")) { if (c.equals("$HADOOP_CLIENT_CONF_DIR")) { // SPOI-2501 continue; } classPathEnv.append(':'); classPathEnv.append(c.trim()); } env.put("CLASSPATH", classPathEnv.toString()); // propagate to replace node managers user name (effective in non-secure mode) env.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master ArrayList<CharSequence> vargs = new ArrayList<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(javaCmd); if (dag.isDebug()) { vargs.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n"); } // Set Xmx based on am memory size // default heap size 75% of total memory vargs.add("-Xmx" + (amMemory * 3 / 4) + "m"); vargs.add("-XX:+HeapDumpOnOutOfMemoryError"); vargs.add("-XX:HeapDumpPath=/tmp/dt-heap-" + appId.getId() + ".bin"); vargs.add("-Dhadoop.root.logger=" + (dag.isDebug() ? "DEBUG" : "INFO") + ",RFA"); vargs.add("-Dhadoop.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR); vargs.add(String.format("-D%s=%s", StreamingContainer.PROP_APP_PATH, dag.assertAppPath())); if (dag.isDebug()) { vargs.add("-Dlog4j.debug=true"); } String loggersLevel = conf.get(DTLoggerFactory.DT_LOGGERS_LEVEL); if (loggersLevel != null) { vargs.add(String.format("-D%s=%s", DTLoggerFactory.DT_LOGGERS_LEVEL, loggersLevel)); } vargs.add(StreamingAppMaster.class.getName()); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final command StringBuilder command = new StringBuilder(9 * vargs.size()); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(queueName); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = rmClient.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure String specStr = Objects.toStringHelper("Submitting application: ") .add("name", appContext.getApplicationName()) .add("queue", appContext.getQueue()) .add("user", UserGroupInformation.getLoginUser()) .add("resource", appContext.getResource()) .toString(); LOG.info(specStr); if (dag.isDebug()) { // LOG.info("Full submission context: " + appContext); } yarnClient.submitApplication(appContext); } finally { fs.close(); } }
public void run(String[] args) throws Exception { final String command = args[0]; final int n = Integer.valueOf(args[1]); final Path jarPath = new Path(args[2]); // Create yarnClient YarnConfiguration conf = new YarnConfiguration(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); // Create application via yarnClient YarnClientApplication app = yarnClient.createApplication(); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); amContainer.setCommands( Collections.singletonList( "$JAVA_HOME/bin/java" + " -Xmx256M" + " com.hortonworks.simpleyarnapp.ApplicationMaster" + " " + command + " " + String.valueOf(n) + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")); // Setup jar for ApplicationMaster LocalResource appMasterJar = Records.newRecord(LocalResource.class); setupAppMasterJar(jarPath, appMasterJar); System.out.println("Jar name is " + jarPath.getName()); amContainer.setLocalResources(Collections.singletonMap(jarPath.getName(), appMasterJar)); // Setup CLASSPATH for ApplicationMaster Map<String, String> appMasterEnv = new HashMap<String, String>(); setupAppMasterEnv(appMasterEnv); amContainer.setEnvironment(appMasterEnv); // Set up resource type requirements for ApplicationMaster Resource capability = Records.newRecord(Resource.class); capability.setMemory(256); capability.setVirtualCores(1); // Finally, set-up ApplicationSubmissionContext for the application ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); appContext.setApplicationName("apache-yarn-example"); // application name appContext.setAMContainerSpec(amContainer); appContext.setResource(capability); appContext.setQueue("default"); // queue // Submit application ApplicationId appId = appContext.getApplicationId(); System.out.println("Submitting application " + appId); yarnClient.submitApplication(appContext); ApplicationReport appReport = yarnClient.getApplicationReport(appId); YarnApplicationState appState = appReport.getYarnApplicationState(); while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED && appState != YarnApplicationState.FAILED) { System.out.println("App Status = " + appState); Thread.sleep(100); appReport = yarnClient.getApplicationReport(appId); appState = appReport.getYarnApplicationState(); } System.out.println( "Application " + appId + " finished with" + " state " + appState + " at " + appReport.getFinishTime()); }
@Override /** * Connects to CM, sets up container launch context for shell command and eventually dispatches * the container start request to the CM. */ public void run() { LOG.info("Setting up container launch container for containerid=" + container.getId()); ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class); // Set the environment ctx.setEnvironment(shellEnv); // Set the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // The container for the eventual shell commands needs its own local // resources too. // In this scenario, if a shell script is specified, we need to have it // copied and made available to the container. if (!scriptPath.isEmpty()) { Path renamedScriptPath = null; if (Shell.WINDOWS) { renamedScriptPath = new Path(scriptPath + ".bat"); } else { renamedScriptPath = new Path(scriptPath + ".sh"); } try { // rename the script file based on the underlying OS syntax. renameScriptFile(renamedScriptPath); } catch (Exception e) { LOG.error("Not able to add suffix (.bat/.sh) to the shell script filename", e); // We know we cannot continue launching the container // so we should release it. numCompletedContainers.incrementAndGet(); numFailedContainers.incrementAndGet(); return; } LocalResource shellRsrc = Records.newRecord(LocalResource.class); shellRsrc.setType(LocalResourceType.FILE); shellRsrc.setVisibility(LocalResourceVisibility.APPLICATION); try { shellRsrc.setResource( ConverterUtils.getYarnUrlFromURI(new URI(renamedScriptPath.toString()))); } catch (URISyntaxException e) { LOG.error( "Error when trying to use shell script path specified" + " in env, path=" + renamedScriptPath, e); // A failure scenario on bad input such as invalid shell script path // We know we cannot continue launching the container // so we should release it. // TODO numCompletedContainers.incrementAndGet(); numFailedContainers.incrementAndGet(); return; } shellRsrc.setTimestamp(shellScriptPathTimestamp); shellRsrc.setSize(shellScriptPathLen); localResources.put( Shell.WINDOWS ? ExecBatScripStringtPath : ExecShellStringPath, shellRsrc); shellCommand = Shell.WINDOWS ? windows_command : linux_bash_command; } ctx.setLocalResources(localResources); // Set the necessary command to execute on the allocated container Vector<CharSequence> vargs = new Vector<CharSequence>(5); // Set executable command vargs.add(shellCommand); // Set shell script path if (!scriptPath.isEmpty()) { vargs.add(Shell.WINDOWS ? ExecBatScripStringtPath : ExecShellStringPath); } // Set args for the shell command if any vargs.add(shellArgs); // Add log redirect params vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } List<String> commands = new ArrayList<String>(); commands.add(command.toString()); ctx.setCommands(commands); // Set up tokens for the container too. Today, for normal shell commands, // the container in distribute-shell doesn't need any tokens. We are // populating them mainly for NodeManagers to be able to download any // files in the distributed file-system. The tokens are otherwise also // useful in cases, for e.g., when one is running a "hadoop dfs" command // inside the distributed shell. ctx.setTokens(allTokens.duplicate()); containerListener.addContainer(container.getId(), container); nmClientAsync.startContainerAsync(container, ctx); }