/**
   * Start the child process to handle the task for us.
   *
   * @param conf the task's configuration
   * @param recordReader the fake record reader to update progress with
   * @param output the collector to send output to
   * @param reporter the reporter for the task
   * @param outputKeyClass the class of the output keys
   * @param outputValueClass the class of the output values
   * @throws IOException
   * @throws InterruptedException
   */
  Application(
      JobConf conf,
      RecordReader<FloatWritable, NullWritable> recordReader,
      OutputCollector<K2, V2> output,
      Reporter reporter,
      Class<? extends K2> outputKeyClass,
      Class<? extends V2> outputValueClass)
      throws IOException, InterruptedException {
    serverSocket = new ServerSocket(0);
    Map<String, String> env = new HashMap<String, String>();
    // add TMPDIR environment variable with the value of java.io.tmpdir
    env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
    env.put("hadoop.pipes.command.port", Integer.toString(serverSocket.getLocalPort()));
    List<String> cmd = new ArrayList<String>();
    String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
    FileUtil.chmod(executable, "a+x");
    cmd.add(executable);
    // wrap the command in a stdout/stderr capture
    TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id"));
    File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
    File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
    long logLength = TaskLog.getTaskLogLength(conf);
    cmd = TaskLog.captureOutAndError(cmd, stdout, stderr, logLength);

    process = runClient(cmd, env);
    clientSocket = serverSocket.accept();
    handler = new OutputHandler<K2, V2>(output, reporter, recordReader);
    K2 outputKey = (K2) ReflectionUtils.newInstance(outputKeyClass, conf);
    V2 outputValue = (V2) ReflectionUtils.newInstance(outputValueClass, conf);
    downlink =
        new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, outputKey, outputValue, conf);
    downlink.start();
    downlink.setJobConf(conf);
  }
 /**
  * Abort the application and wait for it to finish.
  *
  * @param t the exception that signalled the problem
  * @throws IOException A wrapper around the exception that was passed in
  */
 void abort(Throwable t) throws IOException {
   LOG.info("Aborting because of " + StringUtils.stringifyException(t));
   try {
     downlink.abort();
     downlink.flush();
   } catch (IOException e) {
     // IGNORE cleanup problems
   }
   try {
     handler.waitForFinish();
   } catch (Throwable ignored) {
     process.destroy();
   }
   IOException wrapper = new IOException("pipe child exception");
   wrapper.initCause(t);
   throw wrapper;
 }
 /**
  * Clean up the child procress and socket.
  *
  * @throws IOException
  */
 void cleanup() throws IOException {
   serverSocket.close();
   try {
     downlink.close();
   } catch (InterruptedException ie) {
     Thread.currentThread().interrupt();
   }
 }
 /**
  * Wait for the application to finish
  *
  * @return did the application finish correctly?
  * @throws Throwable
  */
 boolean waitForFinish() throws Throwable {
   downlink.flush();
   return handler.waitForFinish();
 }