コード例 #1
0
  /**
   * Start the child process to handle the task for us.
   *
   * @param conf the task's configuration
   * @param recordReader the fake record reader to update progress with
   * @param output the collector to send output to
   * @param reporter the reporter for the task
   * @param outputKeyClass the class of the output keys
   * @param outputValueClass the class of the output values
   * @throws IOException
   * @throws InterruptedException
   */
  Application(
      JobConf conf,
      RecordReader<FloatWritable, NullWritable> recordReader,
      OutputCollector<K2, V2> output,
      Reporter reporter,
      Class<? extends K2> outputKeyClass,
      Class<? extends V2> outputValueClass)
      throws IOException, InterruptedException {
    serverSocket = new ServerSocket(0);
    Map<String, String> env = new HashMap<String, String>();
    // add TMPDIR environment variable with the value of java.io.tmpdir
    env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
    env.put("hadoop.pipes.command.port", Integer.toString(serverSocket.getLocalPort()));
    List<String> cmd = new ArrayList<String>();
    String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
    FileUtil.chmod(executable, "a+x");
    cmd.add(executable);
    // wrap the command in a stdout/stderr capture
    TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id"));
    File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
    File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
    long logLength = TaskLog.getTaskLogLength(conf);
    cmd = TaskLog.captureOutAndError(cmd, stdout, stderr, logLength);

    process = runClient(cmd, env);
    clientSocket = serverSocket.accept();
    handler = new OutputHandler<K2, V2>(output, reporter, recordReader);
    K2 outputKey = (K2) ReflectionUtils.newInstance(outputKeyClass, conf);
    V2 outputValue = (V2) ReflectionUtils.newInstance(outputValueClass, conf);
    downlink =
        new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, outputKey, outputValue, conf);
    downlink.start();
    downlink.setJobConf(conf);
  }
コード例 #2
0
  /**
   * Prepare the log files for the task
   *
   * @param taskid
   * @param isCleanup
   * @return an array of files. The first file is stdout, the second is stderr.
   * @throws IOException
   */
  File[] prepareLogFiles(TaskAttemptID taskid, boolean isCleanup) throws IOException {
    File[] logFiles = new File[2];
    logFiles[0] = TaskLog.getTaskLogFile(taskid, isCleanup, TaskLog.LogName.STDOUT);
    logFiles[1] = TaskLog.getTaskLogFile(taskid, isCleanup, TaskLog.LogName.STDERR);
    getTracker().getTaskController().createLogDir(taskid, isCleanup);

    return logFiles;
  }
コード例 #3
0
ファイル: TaskRunner.java プロジェクト: amitkumaar/cdh-mesos
  /**
   * Prepare the log files for the task
   *
   * @param taskid
   * @param isCleanup
   * @return an array of files. The first file is stdout, the second is stderr.
   * @throws IOException
   */
  File[] prepareLogFiles(TaskAttemptID taskid, boolean isCleanup) throws IOException {
    File[] logFiles = new File[2];
    logFiles[0] = TaskLog.getTaskLogFile(taskid, isCleanup, TaskLog.LogName.STDOUT);
    logFiles[1] = TaskLog.getTaskLogFile(taskid, isCleanup, TaskLog.LogName.STDERR);
    File logDir = logFiles[0].getParentFile();
    boolean b = logDir.mkdirs();
    if (!b) {
      LOG.warn("mkdirs failed. Ignoring");
    } else {
      Localizer.PermissionsHandler.setPermissions(
          logDir, Localizer.PermissionsHandler.sevenZeroZero);
    }

    return logFiles;
  }
コード例 #4
0
ファイル: UserLogCleaner.java プロジェクト: ukulililixl/core
 /**
  * Clears all the logs in userlog directory.
  *
  * <p>Adds the job directories for deletion with default retain hours. Deletes all other
  * directories, if any. This is usually called on reinit/restart of the TaskTracker
  *
  * @param conf
  * @throws IOException
  */
 void clearOldUserLogs(Configuration conf) throws IOException {
   File userLogDir = TaskLog.getUserLogDir();
   if (userLogDir.exists()) {
     String[] logDirs = userLogDir.list();
     if (logDirs.length > 0) {
       // add all the log dirs to taskLogsMnonitor.
       long now = clock.getTime();
       for (String logDir : logDirs) {
         if (logDir.equals(logAsyncDisk.TOBEDELETED)) {
           // skip this
           continue;
         }
         JobID jobid = null;
         try {
           jobid = JobID.forName(logDir);
         } catch (IllegalArgumentException ie) {
           // if the directory is not a jobid, delete it immediately
           deleteLogPath(new File(userLogDir, logDir).getAbsolutePath());
           continue;
         }
         // add the job log directory with default retain hours, if it is not
         // already added
         if (!completedJobs.containsKey(jobid)) {
           markJobLogsForDeletion(now, conf, jobid);
         }
       }
     }
   }
 }
コード例 #5
0
 private File localizeJob(JobID jobid) throws IOException {
   String user = UserGroupInformation.getCurrentUser().getShortUserName();
   new JobLocalizer(tt.getJobConf(), user, jobid.toString()).initializeJobLogDir();
   File jobUserlog = TaskLog.getJobDir(jobid);
   JobConf conf = new JobConf();
   // localize job log directory
   tt.saveLogDir(jobid, conf);
   assertTrue(jobUserlog + " directory is not created.", jobUserlog.exists());
   return jobUserlog;
 }
コード例 #6
0
 @Override
 public void activateOptions() {
   synchronized (this) {
     if (maxEvents > 0) {
       tail = new LinkedList<LoggingEvent>();
     }
     setFile(
         TaskLog.getTaskLogFile(TaskAttemptID.forName(taskId), isCleanup, TaskLog.LogName.SYSLOG)
             .toString());
     setAppend(true);
     super.activateOptions();
   }
 }
コード例 #7
0
 @Override
 public void initializeJob(
     String user,
     String jobid,
     Path credentials,
     Path jobConf,
     TaskUmbilicalProtocol taskTracker,
     InetSocketAddress ttAddr)
     throws IOException {
   List<String> command =
       new ArrayList<String>(
           Arrays.asList(
               taskControllerExe,
               user,
               localStorage.getDirsString(),
               Integer.toString(Commands.INITIALIZE_JOB.getValue()),
               jobid,
               credentials.toUri().getPath().toString(),
               jobConf.toUri().getPath().toString()));
   File jvm = // use same jvm as parent
       new File(new File(System.getProperty("java.home"), "bin"), "java");
   command.add(jvm.toString());
   command.add("-classpath");
   command.add(System.getProperty("java.class.path"));
   command.add("-Dhadoop.log.dir=" + TaskLog.getBaseLogDir());
   command.add("-Dhadoop.root.logger=INFO,console");
   command.add(JobLocalizer.class.getName()); // main of JobLocalizer
   command.add(user);
   command.add(jobid);
   // add the task tracker's reporting address
   command.add(ttAddr.getHostName());
   command.add(Integer.toString(ttAddr.getPort()));
   String[] commandArray = command.toArray(new String[0]);
   ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray);
   if (LOG.isDebugEnabled()) {
     LOG.debug("initializeJob: " + Arrays.toString(commandArray));
   }
   try {
     shExec.execute();
     if (LOG.isDebugEnabled()) {
       logOutput(shExec.getOutput());
     }
   } catch (ExitCodeException e) {
     int exitCode = shExec.getExitCode();
     logOutput(shExec.getOutput());
     throw new IOException(
         "Job initialization failed (" + exitCode + ") with output: " + shExec.getOutput(), e);
   }
 }
コード例 #8
0
ファイル: UserLogCleaner.java プロジェクト: ukulililixl/core
 void processCompletedJobs() throws IOException {
   long now = clock.getTime();
   // iterate through completedJobs and remove old logs.
   synchronized (completedJobs) {
     Iterator<Entry<JobID, Long>> completedJobIter = completedJobs.entrySet().iterator();
     while (completedJobIter.hasNext()) {
       Entry<JobID, Long> entry = completedJobIter.next();
       // see if the job is old enough
       if (entry.getValue().longValue() <= now) {
         // add the job logs directory to for delete
         deleteLogPath(TaskLog.getJobDir(entry.getKey()).getAbsolutePath());
         completedJobIter.remove();
       }
     }
   }
 }
コード例 #9
0
  @Override
  public void truncateLogsAsUser(String user, List<Task> allAttempts) throws IOException {

    Task firstTask = allAttempts.get(0);
    String taskid = firstTask.getTaskID().toString();

    LocalDirAllocator ldirAlloc = new LocalDirAllocator(JobConf.MAPRED_LOCAL_DIR_PROPERTY);
    String taskRanFile = TaskTracker.TT_LOG_TMP_DIR + Path.SEPARATOR + taskid;
    Configuration conf = getConf();

    // write the serialized task information to a file to pass to the truncater
    Path taskRanFilePath = ldirAlloc.getLocalPathForWrite(taskRanFile, conf);
    LocalFileSystem lfs = FileSystem.getLocal(conf);
    FSDataOutputStream out = lfs.create(taskRanFilePath);
    out.writeInt(allAttempts.size());
    for (Task t : allAttempts) {
      out.writeBoolean(t.isMapTask());
      t.write(out);
    }
    out.close();
    lfs.setPermission(taskRanFilePath, FsPermission.createImmutable((short) 0755));

    List<String> command = new ArrayList<String>();
    File jvm = // use same jvm as parent
        new File(new File(System.getProperty("java.home"), "bin"), "java");
    command.add(jvm.toString());
    command.add("-Djava.library.path=" + System.getProperty("java.library.path"));
    command.add("-Dhadoop.log.dir=" + TaskLog.getBaseLogDir());
    command.add("-Dhadoop.root.logger=INFO,console");
    command.add("-classpath");
    command.add(System.getProperty("java.class.path"));
    // main of TaskLogsTruncater
    command.add(TaskLogsTruncater.class.getName());
    command.add(taskRanFilePath.toString());
    String[] taskControllerCmd = new String[4 + command.size()];
    taskControllerCmd[0] = taskControllerExe;
    taskControllerCmd[1] = user;
    taskControllerCmd[2] = localStorage.getDirsString();
    taskControllerCmd[3] = Integer.toString(Commands.RUN_COMMAND_AS_USER.getValue());

    int i = 4;
    for (String cmdArg : command) {
      taskControllerCmd[i++] = cmdArg;
    }
    if (LOG.isDebugEnabled()) {
      for (String cmd : taskControllerCmd) {
        LOG.debug("taskctrl command = " + cmd);
      }
    }
    ShellCommandExecutor shExec = new ShellCommandExecutor(taskControllerCmd);
    try {
      shExec.execute();
    } catch (Exception e) {
      LOG.warn(
          "Exit code from "
              + taskControllerExe.toString()
              + " is : "
              + shExec.getExitCode()
              + " for truncateLogs");
      LOG.warn(
          "Exception thrown by "
              + taskControllerExe.toString()
              + " : "
              + StringUtils.stringifyException(e));
      LOG.info("Output from LinuxTaskController's " + taskControllerExe.toString() + " follows:");
      logOutput(shExec.getOutput());
      lfs.delete(taskRanFilePath, false);
      throw new IOException(e);
    }
    lfs.delete(taskRanFilePath, false);
    if (LOG.isDebugEnabled()) {
      LOG.info("Output from LinuxTaskController's " + taskControllerExe.toString() + " follows:");
      logOutput(shExec.getOutput());
    }
  }
コード例 #10
0
ファイル: TaskRunner.java プロジェクト: amitkumaar/cdh-mesos
  @Override
  public final void run() {
    String errorInfo = "Child Error";
    try {

      // before preparing the job localize
      // all the archives
      TaskAttemptID taskid = t.getTaskID();
      final LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
      final File workDir = formWorkDir(lDirAlloc, taskid, t.isTaskCleanupTask(), conf);

      // We don't create any symlinks yet, so presence/absence of workDir
      // actually on the file system doesn't matter.
      tip.getUGI()
          .doAs(
              new PrivilegedExceptionAction<Void>() {
                public Void run() throws IOException {
                  taskDistributedCacheManager =
                      tracker
                          .getTrackerDistributedCacheManager()
                          .newTaskDistributedCacheManager(conf);
                  taskDistributedCacheManager.setup(
                      lDirAlloc,
                      workDir,
                      TaskTracker.getPrivateDistributedCacheDir(conf.getUser()),
                      TaskTracker.getPublicDistributedCacheDir());
                  return null;
                }
              });

      // Set up the child task's configuration. After this call, no localization
      // of files should happen in the TaskTracker's process space. Any changes to
      // the conf object after this will NOT be reflected to the child.
      setupChildTaskConfiguration(lDirAlloc);

      if (!prepare()) {
        return;
      }

      // Accumulates class paths for child.
      List<String> classPaths = getClassPaths(conf, workDir, taskDistributedCacheManager);

      long logSize = TaskLog.getTaskLogLength(conf);

      //  Build exec child JVM args.
      Vector<String> vargs = getVMArgs(taskid, workDir, classPaths, logSize);

      tracker.addToMemoryManager(t.getTaskID(), t.isMapTask(), conf);

      // set memory limit using ulimit if feasible and necessary ...
      List<String> setup = getVMSetupCmd();
      // Set up the redirection of the task's stdout and stderr streams
      File[] logFiles = prepareLogFiles(taskid, t.isTaskCleanupTask());
      File stdout = logFiles[0];
      File stderr = logFiles[1];
      List<TaskTrackerInstrumentation> ttInstrumentations =
          tracker.getTaskTrackerInstrumentations();
      for (TaskTrackerInstrumentation inst : ttInstrumentations) {
        inst.reportTaskLaunch(taskid, stdout, stderr);
      }

      Map<String, String> env = new HashMap<String, String>();
      errorInfo = getVMEnvironment(errorInfo, workDir, conf, env, taskid, logSize);

      launchJvmAndWait(setup, vargs, stdout, stderr, logSize, workDir, env);
      for (TaskTrackerInstrumentation inst : ttInstrumentations) {
        inst.reportTaskEnd(t.getTaskID());
      }
      if (exitCodeSet) {
        if (!killed && exitCode != 0) {
          if (exitCode == 65) {
            for (TaskTrackerInstrumentation inst : ttInstrumentations) {
              inst.taskFailedPing(t.getTaskID());
            }
          }
          throw new IOException("Task process exit with nonzero status of " + exitCode + ".");
        }
      }
    } catch (FSError e) {
      LOG.fatal("FSError", e);
      try {
        tracker.internalFsError(t.getTaskID(), e.getMessage());
      } catch (IOException ie) {
        LOG.fatal(t.getTaskID() + " reporting FSError", ie);
      }
    } catch (Throwable throwable) {
      LOG.warn(t.getTaskID() + " : " + errorInfo, throwable);
      Throwable causeThrowable = new Throwable(errorInfo, throwable);
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      causeThrowable.printStackTrace(new PrintStream(baos));
      try {
        tracker.internalReportDiagnosticInfo(t.getTaskID(), baos.toString());
      } catch (IOException e) {
        LOG.warn(t.getTaskID() + " Reporting Diagnostics", e);
      }
    } finally {
      try {
        if (taskDistributedCacheManager != null) {
          taskDistributedCacheManager.release();
        }
      } catch (IOException ie) {
        LOG.warn("Error releasing caches : Cache files might not have been cleaned up");
      }

      // It is safe to call TaskTracker.TaskInProgress.reportTaskFinished with
      // *false* since the task has either
      // a) SUCCEEDED - which means commit has been done
      // b) FAILED - which means we do not need to commit
      tip.reportTaskFinished(false);
    }
  }
コード例 #11
0
ファイル: UserLogCleaner.java プロジェクト: ukulililixl/core
 UserLogCleaner(Configuration conf) throws IOException {
   threadSleepTime = conf.getLong(TTConfig.TT_USERLOGCLEANUP_SLEEPTIME, DEFAULT_THREAD_SLEEP_TIME);
   logAsyncDisk =
       new MRAsyncDiskService(FileSystem.getLocal(conf), TaskLog.getUserLogDir().toString());
   setClock(new Clock());
 }
コード例 #12
0
  @Override
  public final void run() {
    String errorInfo = "Child Error";
    try {

      // before preparing the job localize
      // all the archives
      TaskAttemptID taskid = t.getTaskID();
      final LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
      // simply get the location of the workDir and pass it to the child. The
      // child will do the actual dir creation
      final File workDir =
          new File(
              new Path(
                      localdirs[rand.nextInt(localdirs.length)],
                      TaskTracker.getTaskWorkDir(
                          t.getUser(),
                          taskid.getJobID().toString(),
                          taskid.toString(),
                          t.isTaskCleanupTask()))
                  .toString());

      // Set up the child task's configuration. After this call, no localization
      // of files should happen in the TaskTracker's process space. Any changes to
      // the conf object after this will NOT be reflected to the child.
      // setupChildTaskConfiguration(lDirAlloc);

      if (!prepare()) {
        return;
      }

      // Accumulates class paths for child.
      List<String> classPaths = getClassPaths(conf, workDir, taskDistributedCacheManager);

      long logSize = TaskLog.getTaskLogLength(conf);

      //  Build exec child JVM args.
      Vector<String> vargs = getVMArgs(taskid, workDir, classPaths, logSize);

      tracker.addToMemoryManager(t.getTaskID(), t.isMapTask(), conf);

      // set memory limit using ulimit if feasible and necessary ...
      String setup = getVMSetupCmd();
      // Set up the redirection of the task's stdout and stderr streams
      File[] logFiles = prepareLogFiles(taskid, t.isTaskCleanupTask());
      File stdout = logFiles[0];
      File stderr = logFiles[1];
      tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout, stderr);

      Map<String, String> env = new HashMap<String, String>();
      errorInfo = getVMEnvironment(errorInfo, workDir, conf, env, taskid, logSize);

      // flatten the env as a set of export commands
      List<String> setupCmds = new ArrayList<String>();
      appendEnvExports(setupCmds, env);
      setupCmds.add(setup);

      launchJvmAndWait(setupCmds, vargs, stdout, stderr, logSize, workDir);
      tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID());
      if (exitCodeSet) {
        if (!killed && exitCode != 0) {
          if (exitCode == 65) {
            tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID());
          }
          throw new IOException("Task process exit with nonzero status of " + exitCode + ".");
        }
      }
    } catch (FSError e) {
      LOG.fatal("FSError", e);
      try {
        tracker.internalFsError(t.getTaskID(), e.getMessage());
      } catch (IOException ie) {
        LOG.fatal(t.getTaskID() + " reporting FSError", ie);
      }
    } catch (Throwable throwable) {
      LOG.warn(t.getTaskID() + " : " + errorInfo, throwable);
      Throwable causeThrowable = new Throwable(errorInfo, throwable);
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      causeThrowable.printStackTrace(new PrintStream(baos));
      try {
        tracker.internalReportDiagnosticInfo(t.getTaskID(), baos.toString());
      } catch (IOException e) {
        LOG.warn(t.getTaskID() + " Reporting Diagnostics", e);
      }
    } finally {

      // It is safe to call TaskTracker.TaskInProgress.reportTaskFinished with
      // *false* since the task has either
      // a) SUCCEEDED - which means commit has been done
      // b) FAILED - which means we do not need to commit
      tip.reportTaskFinished(false);
    }
  }
コード例 #13
0
  @SuppressWarnings("deprecation")
  @Override
  public void run() {
    Task task = getTask();
    TaskAttemptID taskid = task.getTaskID();
    try {
      if (!prepare()) {
        return;
      }

      String sep = System.getProperty("path.separator");
      StringBuffer classPath = new StringBuffer();
      // The alternate runtime can be used to debug tasks by putting a
      // custom version of the mapred libraries. This will get loaded before
      // the TT's jars.
      String debugRuntime = conf.get("mapred.task.debug.runtime.classpath");
      if (debugRuntime != null) {
        classPath.append(debugRuntime);
        classPath.append(sep);
      }
      // start with same classpath as parent process

      String systemClassPath = System.getProperty("java.class.path");
      if (releasePath != null
          && !releasePath.isEmpty()
          && originalPath != null
          && !releasePath.isEmpty()) {
        systemClassPath = systemClassPath.replaceAll(originalPath, releasePath);
      }
      classPath.append(systemClassPath);
      classPath.append(sep);
      //  Build exec child jmv args.
      Vector<String> vargs = new Vector<String>(8);
      File jvm = // use same jvm as parent
          new File(new File(System.getProperty("java.home"), "bin"), "java");

      vargs.add(jvm.toString());

      // Add child (task) java-vm options.
      String javaOpts = getCJTJavaOpts(conf);
      javaOpts = javaOpts.replace("@taskid@", taskid.toString());
      String[] javaOptsSplit = javaOpts.split(" ");
      for (int i = 0; i < javaOptsSplit.length; i++) {
        vargs.add(javaOptsSplit[i]);
      }

      // add java.io.tmpdir given by mapred.child.tmp
      String tmp = conf.get("mapred.child.tmp", "./tmp");
      Path tmpDir = new Path(tmp);
      // if temp directory path is not absolute
      // prepend it with workDir.
      if (!tmpDir.isAbsolute()) {
        tmpDir = new Path(workDir.toString(), tmp);
      }
      FileSystem localFs = FileSystem.getLocal(conf);
      if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) {
        throw new IOException("Mkdirs failed to create " + tmpDir.toString());
      }
      vargs.add("-Djava.io.tmpdir=" + tmpDir.toString());

      // Add classpath.
      vargs.add("-classpath");
      vargs.add(classPath.toString());

      // Setup the log4j prop
      long logSize = TaskLog.getTaskLogLength(conf);
      vargs.add("-Dhadoop.log.dir=" + CoronaTaskTracker.jobTrackerLogDir());
      boolean logToScribe = conf.getBoolean("mapred.task.log.scribe", false);
      if (logToScribe) {
        vargs.addAll(conf.getStringCollection("mapred.task.log.scribe.conf"));
      }
      String logger = logToScribe ? "INFO,TLA,scribe" : "INFO,TLA";

      vargs.add("-Dhadoop.root.logger=" + logger);
      vargs.add("-Dhadoop.tasklog.taskid=" + taskid);
      vargs.add("-Dhadoop.tasklog.totalLogFileSize=" + logSize);

      Path systemDirectory = tracker.systemDirectory;
      if (!systemDirectory.isAbsolute()) {
        systemDirectory = new Path(tracker.systemFS.getWorkingDirectory(), systemDirectory);
      }
      systemDirectory = systemDirectory.makeQualified(tracker.systemFS);
      vargs.add("-Dmapred.system.dir=" + systemDirectory);

      // Add main class and its arguments
      vargs.add(CoronaJobTracker.class.getName()); // main of CJT
      vargs.add(task.getJobID().toString()); // Pass job id.
      vargs.add(task.getTaskID().toString()); // Pass attempt id.
      vargs.add(coronaSessionInfo.getJobTrackerAddr().getHostName());
      vargs.add(Integer.toString(coronaSessionInfo.getJobTrackerAddr().getPort()));

      tracker.addToMemoryManager(task.getTaskID(), task.isMapTask(), conf);

      // set memory limit using ulimit if feasible and necessary ...
      String[] ulimitCmd = Shell.getUlimitMemoryCommand(getChildUlimit(conf));
      List<String> setup = null;
      if (ulimitCmd != null) {
        setup = new ArrayList<String>();
        for (String arg : ulimitCmd) {
          setup.add(arg);
        }
      }

      // Set up the redirection of the task's stdout and stderr streams
      File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
      File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
      stdout.getParentFile().mkdirs();

      Map<String, String> env = new HashMap<String, String>();
      StringBuffer ldLibraryPath = new StringBuffer();
      ldLibraryPath.append(workDir.toString());
      String oldLdLibraryPath = null;
      oldLdLibraryPath = System.getenv("LD_LIBRARY_PATH");
      if (oldLdLibraryPath != null) {
        ldLibraryPath.append(sep);
        ldLibraryPath.append(oldLdLibraryPath);
      }
      env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());

      LOG.info(
          "Launching CJT "
              + taskid
              + " in working directory "
              + workDir
              + " Command line "
              + vargs);

      jvmManager.launchJvm(
          this,
          jvmManager.constructJvmEnv(setup, vargs, stdout, stderr, logSize, workDir, env, conf));
      synchronized (lock) {
        while (!done) {
          lock.wait();
        }
      }
    } catch (IOException e) {
      LOG.error("Error while launching CJT ", e);
    } catch (InterruptedException e) {
      LOG.warn("Error while launching CJT ", e);
    }
  }
コード例 #14
0
  /**
   * @param taskid
   * @param workDir
   * @param classPaths
   * @param logSize
   * @return
   * @throws IOException
   */
  private Vector<String> getVMArgs(
      TaskAttemptID taskid, File workDir, List<String> classPaths, long logSize)
      throws IOException {
    Vector<String> vargs = new Vector<String>(8);
    File jvm = // use same jvm as parent
        new File(new File(System.getProperty("java.home"), "bin"), "java");

    vargs.add(jvm.toString());

    // Add child (task) java-vm options.
    //
    // The following symbols if present in mapred.{map|reduce}.child.java.opts
    // value are replaced:
    // + @taskid@ is interpolated with value of TaskID.
    // Other occurrences of @ will not be altered.
    //
    // Example with multiple arguments and substitutions, showing
    // jvm GC logging, and start of a passwordless JVM JMX agent so can
    // connect with jconsole and the likes to watch child memory, threads
    // and get thread dumps.
    //
    //  <property>
    //    <name>mapred.map.child.java.opts</name>
    //    <value>-Xmx 512M -verbose:gc -Xloggc:/tmp/@[email protected] \
    //           -Dcom.sun.management.jmxremote.authenticate=false \
    //           -Dcom.sun.management.jmxremote.ssl=false \
    //    </value>
    //  </property>
    //
    //  <property>
    //    <name>mapred.reduce.child.java.opts</name>
    //    <value>-Xmx 1024M -verbose:gc -Xloggc:/tmp/@[email protected] \
    //           -Dcom.sun.management.jmxremote.authenticate=false \
    //           -Dcom.sun.management.jmxremote.ssl=false \
    //    </value>
    //  </property>
    //
    String[] javaOptsSplit =
        parseChildJavaOpts(getChildJavaOpts(conf, JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS), taskid);

    // Add java.library.path; necessary for loading native libraries.
    //
    // 1. To support native-hadoop library i.e. libhadoop.so, we add the
    //    parent processes' java.library.path to the child.
    // 2. We also add the 'cwd' of the task to it's java.library.path to help
    //    users distribute native libraries via the DistributedCache.
    // 3. The user can also specify extra paths to be added to the
    //    java.library.path via mapred.{map|reduce}.child.java.opts.
    //
    String libraryPath = System.getProperty("java.library.path");
    if (libraryPath == null) {
      libraryPath = workDir.getAbsolutePath();
    } else {
      libraryPath += SYSTEM_PATH_SEPARATOR + workDir;
    }
    boolean hasUserLDPath = false;
    for (int i = 0; i < javaOptsSplit.length; i++) {
      if (javaOptsSplit[i].startsWith("-Djava.library.path=")) {
        javaOptsSplit[i] += SYSTEM_PATH_SEPARATOR + libraryPath;
        hasUserLDPath = true;
        break;
      }
    }
    if (!hasUserLDPath) {
      vargs.add("-Djava.library.path=" + libraryPath);
    }
    for (int i = 0; i < javaOptsSplit.length; i++) {
      vargs.add(javaOptsSplit[i]);
    }

    Path childTmpDir = createChildTmpDir(workDir, conf, false);
    vargs.add("-Djava.io.tmpdir=" + childTmpDir);

    // Add classpath.
    vargs.add("-classpath");
    String classPath = StringUtils.join(SYSTEM_PATH_SEPARATOR, classPaths);
    vargs.add(classPath);

    // Setup the log4j prop
    setupLog4jProperties(vargs, taskid, logSize);

    if (conf.getProfileEnabled()) {
      if (conf.getProfileTaskRange(t.isMapTask()).isIncluded(t.getPartition())) {
        File prof = TaskLog.getTaskLogFile(taskid, t.isTaskCleanupTask(), TaskLog.LogName.PROFILE);
        vargs.add(String.format(conf.getProfileParams(), prof.toString()));
      }
    }

    // Add main class and its arguments
    vargs.add(Child.class.getName()); // main of Child
    // pass umbilical address
    InetSocketAddress address = tracker.getTaskTrackerReportAddress();
    vargs.add(address.getAddress().getHostAddress());
    vargs.add(Integer.toString(address.getPort()));
    vargs.add(taskid.toString()); // pass task identifier
    // pass task log location
    vargs.add(TaskLog.getAttemptDir(taskid, t.isTaskCleanupTask()).toString());
    return vargs;
  }
コード例 #15
0
 @After
 public void tearDown() throws IOException {
   FileUtil.fullyDelete(TaskLog.getUserLogDir());
   FileUtil.fullyDelete(new File(TEST_ROOT_DIR));
 }
コード例 #16
0
public class TestUserLogCleanup {
  private static String jtid = "test";
  private static long ONE_HOUR = 1000 * 60 * 60;
  private Localizer localizer;
  private UserLogManager userLogManager;
  private UserLogCleaner userLogCleaner;
  private TaskTracker tt;
  private FakeClock myClock;
  private JobID jobid1 = new JobID(jtid, 1);
  private JobID jobid2 = new JobID(jtid, 2);
  private JobID jobid3 = new JobID(jtid, 3);
  private JobID jobid4 = new JobID(jtid, 4);
  private File foo = new File(TaskLog.getUserLogDir(), "foo");
  private File bar = new File(TaskLog.getUserLogDir(), "bar");
  private static String TEST_ROOT_DIR = System.getProperty("test.build.data", "/tmp");

  public TestUserLogCleanup() throws IOException, InterruptedException {
    JobConf conf = new JobConf();
    startTT(conf);
  }

  @After
  public void tearDown() throws IOException {
    FileUtil.fullyDelete(TaskLog.getUserLogDir());
    FileUtil.fullyDelete(new File(TEST_ROOT_DIR));
  }

  private File localizeJob(JobID jobid) throws IOException {
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    new JobLocalizer(tt.getJobConf(), user, jobid.toString()).initializeJobLogDir();
    File jobUserlog = TaskLog.getJobDir(jobid);
    JobConf conf = new JobConf();
    // localize job log directory
    tt.saveLogDir(jobid, conf);
    assertTrue(jobUserlog + " directory is not created.", jobUserlog.exists());
    return jobUserlog;
  }

  private void jobFinished(JobID jobid, int logRetainHours) {
    JobCompletedEvent jce = new JobCompletedEvent(jobid, myClock.getTime(), logRetainHours);
    userLogManager.addLogEvent(jce);
  }

  private void startTT(JobConf conf) throws IOException, InterruptedException {
    myClock = new FakeClock(); // clock is reset.
    String localdirs = TEST_ROOT_DIR + "/userlogs/local/0," + TEST_ROOT_DIR + "/userlogs/local/1";
    conf.set(JobConf.MAPRED_LOCAL_DIR_PROPERTY, localdirs);
    tt = new TaskTracker();
    tt.setConf(new JobConf(conf));
    LocalDirAllocator localDirAllocator = new LocalDirAllocator("mapred.local.dir");
    tt.setLocalDirAllocator(localDirAllocator);
    LocalStorage localStorage = new LocalStorage(conf.getLocalDirs());
    localStorage.checkLocalDirs();
    tt.setLocalStorage(localStorage);
    localizer =
        new Localizer(FileSystem.get(conf), conf.getStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY));
    tt.setLocalizer(localizer);
    userLogManager = new UtilsForTests.InLineUserLogManager(conf);
    TaskController taskController = userLogManager.getTaskController();
    taskController.setup(localDirAllocator, localStorage);
    tt.setTaskController(taskController);
    userLogCleaner = userLogManager.getUserLogCleaner();
    userLogCleaner.setClock(myClock);
    tt.setUserLogManager(userLogManager);
    userLogManager.clearOldUserLogs(conf);
  }

  private void ttReinited() throws IOException {
    JobConf conf = new JobConf();
    conf.setInt(JobContext.USER_LOG_RETAIN_HOURS, 3);
    userLogManager.clearOldUserLogs(conf);
  }

  private void ttRestarted() throws IOException, InterruptedException {
    JobConf conf = new JobConf();
    conf.setInt(JobContext.USER_LOG_RETAIN_HOURS, 3);
    startTT(conf);
  }

  /**
   * Tests job user-log directory deletion.
   *
   * <p>Adds two jobs for log deletion. One with one hour retain hours, other with two retain hours.
   * After an hour, TaskLogCleanupThread.processCompletedJobs() call, makes sure job with 1hr retain
   * hours is removed and other is retained. After one more hour, job with 2hr retain hours is also
   * removed.
   *
   * @throws IOException
   */
  @Test
  public void testJobLogCleanup() throws IOException {
    File jobUserlog1 = localizeJob(jobid1);
    File jobUserlog2 = localizeJob(jobid2);

    // add job user log directory for deletion, with 2 hours for deletion
    jobFinished(jobid1, 2);

    // add the job for deletion with one hour as retain hours
    jobFinished(jobid2, 1);

    // remove old logs and see jobid1 is not removed and jobid2 is removed
    myClock.advance(ONE_HOUR);
    userLogCleaner.processCompletedJobs();
    assertTrue(jobUserlog1 + " got deleted", jobUserlog1.exists());
    assertFalse(jobUserlog2 + " still exists.", jobUserlog2.exists());

    myClock.advance(ONE_HOUR);
    // remove old logs and see jobid1 is removed now
    userLogCleaner.processCompletedJobs();
    assertFalse(jobUserlog1 + " still exists.", jobUserlog1.exists());
  }

  /**
   * Tests user-log directory cleanup on a TT re-init with 3 hours as log retain hours for tracker.
   *
   * <p>Adds job1 deletion before the re-init with 2 hour retain hours. Adds job2 for which there
   * are no tasks/killJobAction after the re-init. Adds job3 for which there is localizeJob followed
   * by killJobAction with 3 hours as retain hours. Adds job4 for which there are some tasks after
   * the re-init.
   *
   * @throws IOException
   */
  @Test
  public void testUserLogCleanup() throws IOException {
    File jobUserlog1 = localizeJob(jobid1);
    File jobUserlog2 = localizeJob(jobid2);
    File jobUserlog3 = localizeJob(jobid3);
    File jobUserlog4 = localizeJob(jobid4);
    // create a some files/dirs in userlog
    foo.mkdirs();
    bar.createNewFile();

    // add the jobid1 for deletion with retainhours = 2
    jobFinished(jobid1, 2);

    // time is now 1.
    myClock.advance(ONE_HOUR);

    // mimic TaskTracker reinit
    // re-init the tt with 3 hours as user log retain hours.
    // This re-init clears the user log directory
    // job directories will be added with 3 hours as retain hours.
    // i.e. They will be deleted at time 4.
    ttReinited();

    assertFalse(foo.exists());
    assertFalse(bar.exists());
    assertTrue(jobUserlog1.exists());
    assertTrue(jobUserlog2.exists());
    assertTrue(jobUserlog3.exists());
    assertTrue(jobUserlog4.exists());

    myClock.advance(ONE_HOUR);
    // time is now 2.
    userLogCleaner.processCompletedJobs();
    assertFalse(jobUserlog1.exists());
    assertTrue(jobUserlog2.exists());
    assertTrue(jobUserlog3.exists());
    assertTrue(jobUserlog4.exists());

    // mimic localizeJob followed KillJobAction for jobid3
    // add the job for deletion with retainhours = 3.
    // jobid3 should be deleted at time 5.
    jobUserlog3 = localizeJob(jobid3);
    jobFinished(jobid3, 3);

    // mimic localizeJob for jobid4
    jobUserlog4 = localizeJob(jobid4);

    // do cleanup
    myClock.advance(2 * ONE_HOUR);
    // time is now 4.
    userLogCleaner.processCompletedJobs();

    // jobid2 will be deleted
    assertFalse(jobUserlog1.exists());
    assertFalse(jobUserlog2.exists());
    assertTrue(jobUserlog3.exists());
    assertTrue(jobUserlog4.exists());

    myClock.advance(ONE_HOUR);
    // time is now 5.
    // do cleanup again
    userLogCleaner.processCompletedJobs();

    // jobid3 will be deleted
    assertFalse(jobUserlog1.exists());
    assertFalse(jobUserlog2.exists());
    assertFalse(jobUserlog3.exists());
    assertTrue(jobUserlog4.exists());
  }

  /**
   * Tests user-log directory cleanup on a TT restart.
   *
   * <p>Adds job1 deletion before the restart with 2 hour retain hours. Adds job2 for which there
   * are no tasks/killJobAction after the restart. Adds job3 for which there is localizeJob followed
   * by killJobAction after the restart with 3 hours retain hours. Adds job4 for which there are
   * some tasks after the restart.
   *
   * @throws IOException
   * @throws InterruptedException
   */
  @Test
  public void testUserLogCleanupAfterRestart() throws IOException, InterruptedException {
    File jobUserlog1 = localizeJob(jobid1);
    File jobUserlog2 = localizeJob(jobid2);
    File jobUserlog3 = localizeJob(jobid3);
    File jobUserlog4 = localizeJob(jobid4);
    // create a some files/dirs in userlog
    foo.mkdirs();
    bar.createNewFile();

    // add the jobid1 for deletion with retain hours = 2
    jobFinished(jobid1, 2);

    // time is now 1.
    myClock.advance(ONE_HOUR);

    // Mimic the TaskTracker restart
    // Restart the tt with 3 hours as user log retain hours.
    // This restart clears the user log directory
    // job directories will be added with 3 hours as retain hours.
    // i.e. They will be deleted at time 3 as clock will reset after the restart
    ttRestarted();

    assertFalse(foo.exists());
    assertFalse(bar.exists());
    assertTrue(jobUserlog1.exists());
    assertTrue(jobUserlog2.exists());
    assertTrue(jobUserlog3.exists());
    assertTrue(jobUserlog4.exists());

    myClock.advance(ONE_HOUR);
    // time is now 1.
    userLogCleaner.processCompletedJobs();
    assertTrue(jobUserlog1.exists());
    assertTrue(jobUserlog2.exists());
    assertTrue(jobUserlog3.exists());
    assertTrue(jobUserlog4.exists());

    // mimic localizeJob followed KillJobAction for jobid3
    // add the job for deletion with retainhours = 3.
    // jobid3 should be deleted at time 4.
    jobUserlog3 = localizeJob(jobid3);
    jobFinished(jobid3, 3);

    // mimic localizeJob for jobid4
    jobUserlog4 = localizeJob(jobid4);

    // do cleanup
    myClock.advance(2 * ONE_HOUR);
    // time is now 3.
    userLogCleaner.processCompletedJobs();

    // jobid1 and jobid2 will be deleted
    assertFalse(jobUserlog1.exists());
    assertFalse(jobUserlog2.exists());
    assertTrue(jobUserlog3.exists());
    assertTrue(jobUserlog4.exists());

    myClock.advance(ONE_HOUR);
    // time is now 4.
    // do cleanup again
    userLogCleaner.processCompletedJobs();

    // jobid3 will be deleted
    assertFalse(jobUserlog1.exists());
    assertFalse(jobUserlog2.exists());
    assertFalse(jobUserlog3.exists());
    assertTrue(jobUserlog4.exists());
  }
}
コード例 #17
0
ファイル: TaskRunner.java プロジェクト: LefKok/upright
  @Override
  public final void run() {
    try {

      // before preparing the job localize
      // all the archives
      TaskAttemptID taskid = t.getTaskID();
      LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
      File jobCacheDir = null;
      if (conf.getJar() != null) {
        jobCacheDir = new File(new Path(conf.getJar()).getParent().toString());
      }
      File workDir =
          new File(
              lDirAlloc
                  .getLocalPathToRead(
                      TaskTracker.getJobCacheSubdir()
                          + Path.SEPARATOR
                          + t.getJobID()
                          + Path.SEPARATOR
                          + t.getTaskID()
                          + Path.SEPARATOR
                          + MRConstants.WORKDIR,
                      conf)
                  .toString());

      URI[] archives = DistributedCache.getCacheArchives(conf);
      URI[] files = DistributedCache.getCacheFiles(conf);
      FileStatus fileStatus;
      FileSystem fileSystem;
      Path localPath;
      String baseDir;

      if ((archives != null) || (files != null)) {
        if (archives != null) {
          String[] archivesTimestamps = DistributedCache.getArchiveTimestamps(conf);
          Path[] p = new Path[archives.length];
          for (int i = 0; i < archives.length; i++) {
            fileSystem = FileSystem.get(archives[i], conf);
            fileStatus = fileSystem.getFileStatus(new Path(archives[i].getPath()));
            String cacheId = DistributedCache.makeRelative(archives[i], conf);
            String cachePath = TaskTracker.getCacheSubdir() + Path.SEPARATOR + cacheId;
            if (lDirAlloc.ifExists(cachePath, conf)) {
              localPath = lDirAlloc.getLocalPathToRead(cachePath, conf);
            } else {
              localPath = lDirAlloc.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf);
            }
            baseDir = localPath.toString().replace(cacheId, "");
            p[i] =
                DistributedCache.getLocalCache(
                    archives[i],
                    conf,
                    new Path(baseDir),
                    fileStatus,
                    true,
                    Long.parseLong(archivesTimestamps[i]),
                    new Path(workDir.getAbsolutePath()),
                    false);
          }
          DistributedCache.setLocalArchives(conf, stringifyPathArray(p));
        }
        if ((files != null)) {
          String[] fileTimestamps = DistributedCache.getFileTimestamps(conf);
          Path[] p = new Path[files.length];
          for (int i = 0; i < files.length; i++) {
            fileSystem = FileSystem.get(files[i], conf);
            fileStatus = fileSystem.getFileStatus(new Path(files[i].getPath()));
            String cacheId = DistributedCache.makeRelative(files[i], conf);
            String cachePath = TaskTracker.getCacheSubdir() + Path.SEPARATOR + cacheId;
            if (lDirAlloc.ifExists(cachePath, conf)) {
              localPath = lDirAlloc.getLocalPathToRead(cachePath, conf);
            } else {
              localPath = lDirAlloc.getLocalPathForWrite(cachePath, fileStatus.getLen(), conf);
            }
            baseDir = localPath.toString().replace(cacheId, "");
            p[i] =
                DistributedCache.getLocalCache(
                    files[i],
                    conf,
                    new Path(baseDir),
                    fileStatus,
                    false,
                    Long.parseLong(fileTimestamps[i]),
                    new Path(workDir.getAbsolutePath()),
                    false);
          }
          DistributedCache.setLocalFiles(conf, stringifyPathArray(p));
        }
        Path localTaskFile = new Path(t.getJobFile());
        FileSystem localFs = FileSystem.getLocal(conf);
        localFs.delete(localTaskFile, true);
        OutputStream out = localFs.create(localTaskFile);
        try {
          conf.writeXml(out);
        } finally {
          out.close();
        }
      }

      if (!prepare()) {
        return;
      }

      String sep = System.getProperty("path.separator");
      StringBuffer classPath = new StringBuffer();
      // start with same classpath as parent process
      classPath.append(System.getProperty("java.class.path"));
      classPath.append(sep);
      if (!workDir.mkdirs()) {
        if (!workDir.isDirectory()) {
          LOG.fatal("Mkdirs failed to create " + workDir.toString());
        }
      }

      String jar = conf.getJar();
      if (jar != null) {
        // if jar exists, it into workDir
        File[] libs = new File(jobCacheDir, "lib").listFiles();
        if (libs != null) {
          for (int i = 0; i < libs.length; i++) {
            classPath.append(sep); // add libs from jar to classpath
            classPath.append(libs[i]);
          }
        }
        classPath.append(sep);
        classPath.append(new File(jobCacheDir, "classes"));
        classPath.append(sep);
        classPath.append(jobCacheDir);
      }

      // include the user specified classpath

      // archive paths
      Path[] archiveClasspaths = DistributedCache.getArchiveClassPaths(conf);
      if (archiveClasspaths != null && archives != null) {
        Path[] localArchives = DistributedCache.getLocalCacheArchives(conf);
        if (localArchives != null) {
          for (int i = 0; i < archives.length; i++) {
            for (int j = 0; j < archiveClasspaths.length; j++) {
              if (archives[i].getPath().equals(archiveClasspaths[j].toString())) {
                classPath.append(sep);
                classPath.append(localArchives[i].toString());
              }
            }
          }
        }
      }
      // file paths
      Path[] fileClasspaths = DistributedCache.getFileClassPaths(conf);
      if (fileClasspaths != null && files != null) {
        Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
        if (localFiles != null) {
          for (int i = 0; i < files.length; i++) {
            for (int j = 0; j < fileClasspaths.length; j++) {
              if (files[i].getPath().equals(fileClasspaths[j].toString())) {
                classPath.append(sep);
                classPath.append(localFiles[i].toString());
              }
            }
          }
        }
      }

      classPath.append(sep);
      classPath.append(workDir);
      //  Build exec child jmv args.
      Vector<String> vargs = new Vector<String>(8);
      File jvm = // use same jvm as parent
          new File(new File(System.getProperty("java.home"), "bin"), "java");

      vargs.add(jvm.toString());

      // Add child (task) java-vm options.
      //
      // The following symbols if present in mapred.child.java.opts value are
      // replaced:
      // + @taskid@ is interpolated with value of TaskID.
      // Other occurrences of @ will not be altered.
      //
      // Example with multiple arguments and substitutions, showing
      // jvm GC logging, and start of a passwordless JVM JMX agent so can
      // connect with jconsole and the likes to watch child memory, threads
      // and get thread dumps.
      //
      //  <property>
      //    <name>mapred.child.java.opts</name>
      //    <value>-verbose:gc -Xloggc:/tmp/@[email protected] \
      //           -Dcom.sun.management.jmxremote.authenticate=false \
      //           -Dcom.sun.management.jmxremote.ssl=false \
      //    </value>
      //  </property>
      //
      String javaOpts = conf.get("mapred.child.java.opts", "-Xmx200m");
      javaOpts = javaOpts.replace("@taskid@", taskid.toString());
      String[] javaOptsSplit = javaOpts.split(" ");

      // Add java.library.path; necessary for loading native libraries.
      //
      // 1. To support native-hadoop library i.e. libhadoop.so, we add the
      //    parent processes' java.library.path to the child.
      // 2. We also add the 'cwd' of the task to it's java.library.path to help
      //    users distribute native libraries via the DistributedCache.
      // 3. The user can also specify extra paths to be added to the
      //    java.library.path via mapred.child.java.opts.
      //
      String libraryPath = System.getProperty("java.library.path");
      if (libraryPath == null) {
        libraryPath = workDir.getAbsolutePath();
      } else {
        libraryPath += sep + workDir;
      }
      boolean hasUserLDPath = false;
      for (int i = 0; i < javaOptsSplit.length; i++) {
        if (javaOptsSplit[i].startsWith("-Djava.library.path=")) {
          javaOptsSplit[i] += sep + libraryPath;
          hasUserLDPath = true;
          break;
        }
      }
      if (!hasUserLDPath) {
        vargs.add("-Djava.library.path=" + libraryPath);
      }
      for (int i = 0; i < javaOptsSplit.length; i++) {
        vargs.add(javaOptsSplit[i]);
      }

      // add java.io.tmpdir given by mapred.child.tmp
      String tmp = conf.get("mapred.child.tmp", "./tmp");
      Path tmpDir = new Path(tmp);

      // if temp directory path is not absolute
      // prepend it with workDir.
      if (!tmpDir.isAbsolute()) {
        tmpDir = new Path(workDir.toString(), tmp);
      }
      FileSystem localFs = FileSystem.getLocal(conf);
      if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) {
        throw new IOException("Mkdirs failed to create " + tmpDir.toString());
      }
      vargs.add("-Djava.io.tmpdir=" + tmpDir.toString());

      // Add classpath.
      vargs.add("-classpath");
      vargs.add(classPath.toString());

      // Setup the log4j prop
      long logSize = TaskLog.getTaskLogLength(conf);
      vargs.add(
          "-Dhadoop.log.dir=" + new File(System.getProperty("hadoop.log.dir")).getAbsolutePath());
      vargs.add("-Dhadoop.root.logger=INFO,TLA");
      vargs.add("-Dhadoop.tasklog.taskid=" + taskid);
      vargs.add("-Dhadoop.tasklog.totalLogFileSize=" + logSize);

      if (conf.getProfileEnabled()) {
        if (conf.getProfileTaskRange(t.isMapTask()).isIncluded(t.getPartition())) {
          File prof = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.PROFILE);
          vargs.add(String.format(conf.getProfileParams(), prof.toString()));
        }
      }

      // Add main class and its arguments
      vargs.add(Child.class.getName()); // main of Child
      // pass umbilical address
      InetSocketAddress address = tracker.getTaskTrackerReportAddress();
      vargs.add(address.getAddress().getHostAddress());
      vargs.add(Integer.toString(address.getPort()));
      vargs.add(taskid.toString()); // pass task identifier

      String pidFile = null;
      if (tracker.isTaskMemoryManagerEnabled()) {
        pidFile =
            lDirAlloc
                .getLocalPathForWrite(
                    (TaskTracker.getPidFilesSubdir() + Path.SEPARATOR + taskid), this.conf)
                .toString();
      }

      // set memory limit using ulimit if feasible and necessary ...
      String[] ulimitCmd = Shell.getUlimitMemoryCommand(conf);
      List<String> setup = null;
      if (ulimitCmd != null) {
        setup = new ArrayList<String>();
        for (String arg : ulimitCmd) {
          setup.add(arg);
        }
      }

      // Set up the redirection of the task's stdout and stderr streams
      File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
      File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
      stdout.getParentFile().mkdirs();
      tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout, stderr);

      Map<String, String> env = new HashMap<String, String>();
      StringBuffer ldLibraryPath = new StringBuffer();
      ldLibraryPath.append(workDir.toString());
      String oldLdLibraryPath = null;
      oldLdLibraryPath = System.getenv("LD_LIBRARY_PATH");
      if (oldLdLibraryPath != null) {
        ldLibraryPath.append(sep);
        ldLibraryPath.append(oldLdLibraryPath);
      }
      env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
      jvmManager.launchJvm(
          this,
          jvmManager.constructJvmEnv(
              setup, vargs, stdout, stderr, logSize, workDir, env, pidFile, conf));
      synchronized (lock) {
        while (!done) {
          lock.wait();
        }
      }
      tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID());
      if (exitCodeSet) {
        if (!killed && exitCode != 0) {
          if (exitCode == 65) {
            tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID());
          }
          throw new IOException("Task process exit with nonzero status of " + exitCode + ".");
        }
      }
    } catch (FSError e) {
      LOG.fatal("FSError", e);
      try {
        tracker.fsError(t.getTaskID(), e.getMessage());
      } catch (IOException ie) {
        LOG.fatal(t.getTaskID() + " reporting FSError", ie);
      }
    } catch (Throwable throwable) {
      LOG.warn(t.getTaskID() + " Child Error", throwable);
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      throwable.printStackTrace(new PrintStream(baos));
      try {
        tracker.reportDiagnosticInfo(t.getTaskID(), baos.toString());
      } catch (IOException e) {
        LOG.warn(t.getTaskID() + " Reporting Diagnostics", e);
      }
    } finally {
      try {
        URI[] archives = DistributedCache.getCacheArchives(conf);
        URI[] files = DistributedCache.getCacheFiles(conf);
        if (archives != null) {
          for (int i = 0; i < archives.length; i++) {
            DistributedCache.releaseCache(archives[i], conf);
          }
        }
        if (files != null) {
          for (int i = 0; i < files.length; i++) {
            DistributedCache.releaseCache(files[i], conf);
          }
        }
      } catch (IOException ie) {
        LOG.warn("Error releasing caches : Cache files might not have been cleaned up");
      }
      tracker.reportTaskFinished(t.getTaskID(), false);
      if (t.isMapTask()) {
        tracker.addFreeMapSlot();
      } else {
        tracker.addFreeReduceSlot();
      }
    }
  }
コード例 #18
0
  @Override
  public int launchTask(
      String user,
      String jobId,
      String attemptId,
      List<String> setup,
      List<String> jvmArguments,
      File currentWorkDirectory,
      String stdout,
      String stderr)
      throws IOException {

    ShellCommandExecutor shExec = null;
    try {
      FileSystem rawFs = FileSystem.getLocal(getConf()).getRaw();
      long logSize = 0; // TODO, Ref BUG:2854624
      // get the JVM command line.
      String cmdLine =
          TaskLog.buildCommandLine(
              setup, jvmArguments, new File(stdout), new File(stderr), logSize, true);

      // write the command to a file in the
      // task specific cache directory
      Path p =
          new Path(
              allocator.getLocalPathForWrite(
                  TaskTracker.getPrivateDirTaskScriptLocation(user, jobId, attemptId), getConf()),
              COMMAND_FILE);
      String commandFile = writeCommand(cmdLine, rawFs, p);

      String[] command =
          new String[] {
            taskControllerExe,
            user,
            localStorage.getDirsString(),
            Integer.toString(Commands.LAUNCH_TASK_JVM.getValue()),
            jobId,
            attemptId,
            currentWorkDirectory.toString(),
            commandFile
          };
      shExec = new ShellCommandExecutor(command);

      if (LOG.isDebugEnabled()) {
        LOG.debug("launchTask: " + Arrays.toString(command));
      }
      shExec.execute();
    } catch (Exception e) {
      if (shExec == null) {
        return -1;
      }
      int exitCode = shExec.getExitCode();
      LOG.warn("Exit code from task is : " + exitCode);
      // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the task was
      // terminated/killed forcefully. In all other cases, log the
      // task-controller output
      if (exitCode != 143 && exitCode != 137) {
        LOG.warn(
            "Exception thrown while launching task JVM : " + StringUtils.stringifyException(e));
        LOG.info("Output from LinuxTaskController's launchTaskJVM follows:");
        logOutput(shExec.getOutput());
      }
      return exitCode;
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("Output from LinuxTaskController's launchTask follows:");
      logOutput(shExec.getOutput());
    }
    return 0;
  }