@Test
 // All Task attempts are timed out, leading to Job failure
 public void testTimedOutTask() throws Exception {
   MRApp app = new TimeOutTaskMRApp(1, 0);
   Configuration conf = new Configuration();
   int maxAttempts = 2;
   conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
   // disable uberization (requires entire job to be reattempted, so max for
   // subtask attempts is overridden to 1)
   conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
   Job job = app.submit(conf);
   app.waitForState(job, JobState.FAILED);
   Map<TaskId, Task> tasks = job.getTasks();
   Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
   Task task = tasks.values().iterator().next();
   Assert.assertEquals(
       "Task state not correct", TaskState.FAILED, task.getReport().getTaskState());
   Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
   Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
   for (TaskAttempt attempt : attempts.values()) {
     Assert.assertEquals(
         "Attempt state not correct",
         TaskAttemptState.FAILED,
         attempt.getReport().getTaskAttemptState());
   }
 }
Esempio n. 2
0
  @Test
  public void testCommitPending() throws Exception {
    MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
    Job job = app.submit(new Configuration());
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task task = it.next();
    app.waitForState(task, TaskState.RUNNING);
    TaskAttempt attempt = task.getAttempts().values().iterator().next();
    app.waitForState(attempt, TaskAttemptState.RUNNING);

    // send the commit pending signal to the task
    app.getContext()
        .getEventHandler()
        .handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_COMMIT_PENDING));

    // wait for first attempt to commit pending
    app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);

    // send the done signal to the task
    app.getContext()
        .getEventHandler()
        .handle(
            new TaskAttemptEvent(
                task.getAttempts().values().iterator().next().getID(),
                TaskAttemptEventType.TA_DONE));

    app.waitForState(job, JobState.SUCCEEDED);
  }
Esempio n. 3
0
  @Override
  public void updateAttempt(TaskAttemptStatus status, long timestamp) {

    TaskAttemptId attemptID = status.id;
    TaskId taskID = attemptID.getTaskId();
    JobId jobID = taskID.getJobId();
    Job job = context.getJob(jobID);

    if (job == null) {
      return;
    }

    Task task = job.getTask(taskID);

    if (task == null) {
      return;
    }

    Long boxedStart = startTimes.get(attemptID);
    long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;

    TaskAttempt taskAttempt = task.getAttempt(attemptID);

    if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) {
      boolean isNew = false;
      // is this  a new success?
      synchronized (doneTasks) {
        if (!doneTasks.contains(task)) {
          doneTasks.add(task);
          isNew = true;
        }
      }

      // It's a new completion
      // Note that if a task completes twice [because of a previous speculation
      //  and a race, or a success followed by loss of the machine with the
      //  local data] we only count the first one.
      if (isNew) {
        long finish = timestamp;
        if (start > 1L && finish > 1L && start <= finish) {
          long duration = finish - start;

          DataStatistics statistics = dataStatisticsForTask(taskID);

          if (statistics != null) {
            statistics.add(duration);
          }
        }
      }
    }
  }
Esempio n. 4
0
  @Test(timeout = 10000)
  public void testFailAbortDoesntHang() throws IOException {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");

    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = Mockito.mock(OutputCommitter.class);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    // Job has only 1 mapper task. No reducers
    conf.setInt(MRJobConfig.NUM_REDUCES, 0);
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 1, null);

    // Fail / finish all the tasks. This should land the JobImpl directly in the
    // FAIL_ABORT state
    for (Task t : job.tasks.values()) {
      TaskImpl task = (TaskImpl) t;
      task.handle(new TaskEvent(task.getID(), TaskEventType.T_SCHEDULE));
      for (TaskAttempt ta : task.getAttempts().values()) {
        task.handle(new TaskTAttemptEvent(ta.getID(), TaskEventType.T_ATTEMPT_FAILED));
      }
    }
    assertJobState(job, JobStateInternal.FAIL_ABORT);

    dispatcher.await();
    // Verify abortJob is called once and the job failed
    Mockito.verify(committer, Mockito.timeout(2000).times(1))
        .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
    assertJobState(job, JobStateInternal.FAILED);

    dispatcher.stop();
  }
 @Test
 public void testTaskFailWithUnusedContainer() throws Exception {
   MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
   Configuration conf = new Configuration();
   int maxAttempts = 1;
   conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
   // disable uberization (requires entire job to be reattempted, so max for
   // subtask attempts is overridden to 1)
   conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
   Job job = app.submit(conf);
   app.waitForState(job, JobState.RUNNING);
   Map<TaskId, Task> tasks = job.getTasks();
   Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
   Task task = tasks.values().iterator().next();
   app.waitForState(task, TaskState.SCHEDULED);
   Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
   Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
   TaskAttempt attempt = attempts.values().iterator().next();
   app.waitForInternalState((TaskAttemptImpl) attempt, TaskAttemptStateInternal.ASSIGNED);
   app.getDispatcher()
       .getEventHandler()
       .handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
   app.waitForState(job, JobState.FAILED);
 }
Esempio n. 6
0
  @Test
  public void testHistoryParsingForFailedAttempts() throws Exception {
    LOG.info("STARTING testHistoryParsingForFailedAttempts");
    try {
      Configuration conf = new Configuration();
      conf.setClass(
          CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
          MyResolver.class,
          DNSToSwitchMapping.class);
      RackResolver.init(conf);
      MRApp app =
          new MRAppWithHistoryWithFailedAttempt(2, 1, true, this.getClass().getName(), true);
      app.submit(conf);
      Job job = app.getContext().getAllJobs().values().iterator().next();
      JobId jobId = job.getID();
      app.waitForState(job, JobState.SUCCEEDED);

      // make sure all events are flushed
      app.waitForState(Service.STATE.STOPPED);

      String jobhistoryDir = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
      JobHistory jobHistory = new JobHistory();
      jobHistory.init(conf);

      JobIndexInfo jobIndexInfo = jobHistory.getJobFileInfo(jobId).getJobIndexInfo();
      String jobhistoryFileName = FileNameIndexUtils.getDoneFileName(jobIndexInfo);

      Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName);
      FSDataInputStream in = null;
      FileContext fc = null;
      try {
        fc = FileContext.getFileContext(conf);
        in = fc.open(fc.makeQualified(historyFilePath));
      } catch (IOException ioe) {
        LOG.info("Can not open history file: " + historyFilePath, ioe);
        throw (new Exception("Can not open History File"));
      }

      JobHistoryParser parser = new JobHistoryParser(in);
      JobInfo jobInfo = parser.parse();
      Exception parseException = parser.getParseException();
      Assert.assertNull("Caught an expected exception " + parseException, parseException);
      int noOffailedAttempts = 0;
      Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
      for (Task task : job.getTasks().values()) {
        TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
        for (TaskAttempt taskAttempt : task.getAttempts().values()) {
          TaskAttemptInfo taskAttemptInfo =
              taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID())));
          // Verify rack-name for all task attempts
          Assert.assertEquals("rack-name is incorrect", taskAttemptInfo.getRackname(), RACK_NAME);
          if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
            noOffailedAttempts++;
          }
        }
      }
      Assert.assertEquals("No of Failed tasks doesn't match.", 2, noOffailedAttempts);
    } finally {
      LOG.info("FINISHED testHistoryParsingForFailedAttempts");
    }
  }
Esempio n. 7
0
  private void checkHistoryParsing(
      final int numMaps, final int numReduces, final int numSuccessfulMaps) throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
    long amStartTimeEst = System.currentTimeMillis();
    conf.setClass(
        CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
        MyResolver.class,
        DNSToSwitchMapping.class);
    RackResolver.init(conf);
    MRApp app = new MRAppWithHistory(numMaps, numReduces, true, this.getClass().getName(), true);
    app.submit(conf);
    Job job = app.getContext().getAllJobs().values().iterator().next();
    JobId jobId = job.getID();
    LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
    app.waitForState(job, JobState.SUCCEEDED);

    // make sure all events are flushed
    app.waitForState(Service.STATE.STOPPED);

    String jobhistoryDir = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);

    FileContext fc = null;
    try {
      fc = FileContext.getFileContext(conf);
    } catch (IOException ioe) {
      LOG.info("Can not get FileContext", ioe);
      throw (new Exception("Can not get File Context"));
    }

    if (numMaps == numSuccessfulMaps) {
      String summaryFileName = JobHistoryUtils.getIntermediateSummaryFileName(jobId);
      Path summaryFile = new Path(jobhistoryDir, summaryFileName);
      String jobSummaryString = getJobSummary(fc, summaryFile);
      Assert.assertNotNull(jobSummaryString);
      Assert.assertTrue(jobSummaryString.contains("resourcesPerMap=100"));
      Assert.assertTrue(jobSummaryString.contains("resourcesPerReduce=100"));

      Map<String, String> jobSummaryElements = new HashMap<String, String>();
      StringTokenizer strToken = new StringTokenizer(jobSummaryString, ",");
      while (strToken.hasMoreTokens()) {
        String keypair = strToken.nextToken();
        jobSummaryElements.put(keypair.split("=")[0], keypair.split("=")[1]);
      }

      Assert.assertEquals(
          "JobId does not match", jobId.toString(), jobSummaryElements.get("jobId"));
      Assert.assertEquals("JobName does not match", "test", jobSummaryElements.get("jobName"));
      Assert.assertTrue(
          "submitTime should not be 0", Long.parseLong(jobSummaryElements.get("submitTime")) != 0);
      Assert.assertTrue(
          "launchTime should not be 0", Long.parseLong(jobSummaryElements.get("launchTime")) != 0);
      Assert.assertTrue(
          "firstMapTaskLaunchTime should not be 0",
          Long.parseLong(jobSummaryElements.get("firstMapTaskLaunchTime")) != 0);
      Assert.assertTrue(
          "firstReduceTaskLaunchTime should not be 0",
          Long.parseLong(jobSummaryElements.get("firstReduceTaskLaunchTime")) != 0);
      Assert.assertTrue(
          "finishTime should not be 0", Long.parseLong(jobSummaryElements.get("finishTime")) != 0);
      Assert.assertEquals(
          "Mismatch in num map slots",
          numSuccessfulMaps,
          Integer.parseInt(jobSummaryElements.get("numMaps")));
      Assert.assertEquals(
          "Mismatch in num reduce slots",
          numReduces,
          Integer.parseInt(jobSummaryElements.get("numReduces")));
      Assert.assertEquals(
          "User does not match", System.getProperty("user.name"), jobSummaryElements.get("user"));
      Assert.assertEquals("Queue does not match", "default", jobSummaryElements.get("queue"));
      Assert.assertEquals("Status does not match", "SUCCEEDED", jobSummaryElements.get("status"));
    }

    JobHistory jobHistory = new JobHistory();
    jobHistory.init(conf);
    HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
    JobInfo jobInfo;
    long numFinishedMaps;

    synchronized (fileInfo) {
      Path historyFilePath = fileInfo.getHistoryFile();
      FSDataInputStream in = null;
      LOG.info("JobHistoryFile is: " + historyFilePath);
      try {
        in = fc.open(fc.makeQualified(historyFilePath));
      } catch (IOException ioe) {
        LOG.info("Can not open history file: " + historyFilePath, ioe);
        throw (new Exception("Can not open History File"));
      }

      JobHistoryParser parser = new JobHistoryParser(in);
      final EventReader realReader = new EventReader(in);
      EventReader reader = Mockito.mock(EventReader.class);
      if (numMaps == numSuccessfulMaps) {
        reader = realReader;
      } else {
        final AtomicInteger numFinishedEvents = new AtomicInteger(0); // Hack!
        Mockito.when(reader.getNextEvent())
            .thenAnswer(
                new Answer<HistoryEvent>() {
                  public HistoryEvent answer(InvocationOnMock invocation) throws IOException {
                    HistoryEvent event = realReader.getNextEvent();
                    if (event instanceof TaskFinishedEvent) {
                      numFinishedEvents.incrementAndGet();
                    }

                    if (numFinishedEvents.get() <= numSuccessfulMaps) {
                      return event;
                    } else {
                      throw new IOException("test");
                    }
                  }
                });
      }

      jobInfo = parser.parse(reader);

      numFinishedMaps = computeFinishedMaps(jobInfo, numMaps, numSuccessfulMaps);

      if (numFinishedMaps != numMaps) {
        Exception parseException = parser.getParseException();
        Assert.assertNotNull("Didn't get expected parse exception", parseException);
      }
    }

    Assert.assertEquals(
        "Incorrect username ", System.getProperty("user.name"), jobInfo.getUsername());
    Assert.assertEquals("Incorrect jobName ", "test", jobInfo.getJobname());
    Assert.assertEquals("Incorrect queuename ", "default", jobInfo.getJobQueueName());
    Assert.assertEquals("incorrect conf path", "test", jobInfo.getJobConfPath());
    Assert.assertEquals("incorrect finishedMap ", numSuccessfulMaps, numFinishedMaps);
    Assert.assertEquals("incorrect finishedReduces ", numReduces, jobInfo.getFinishedReduces());
    Assert.assertEquals("incorrect uberized ", job.isUber(), jobInfo.getUberized());
    Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
    int totalTasks = allTasks.size();
    Assert.assertEquals("total number of tasks is incorrect  ", (numMaps + numReduces), totalTasks);

    // Verify aminfo
    Assert.assertEquals(1, jobInfo.getAMInfos().size());
    Assert.assertEquals(MRApp.NM_HOST, jobInfo.getAMInfos().get(0).getNodeManagerHost());
    AMInfo amInfo = jobInfo.getAMInfos().get(0);
    Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
    Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
    Assert.assertEquals(1, amInfo.getAppAttemptId().getAttemptId());
    Assert.assertEquals(
        amInfo.getAppAttemptId(), amInfo.getContainerId().getApplicationAttemptId());
    Assert.assertTrue(
        amInfo.getStartTime() <= System.currentTimeMillis()
            && amInfo.getStartTime() >= amStartTimeEst);

    ContainerId fakeCid = BuilderUtils.newContainerId(-1, -1, -1, -1);
    // Assert at taskAttempt level
    for (TaskInfo taskInfo : allTasks.values()) {
      int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
      Assert.assertEquals("total number of task attempts ", 1, taskAttemptCount);
      TaskAttemptInfo taInfo = taskInfo.getAllTaskAttempts().values().iterator().next();
      Assert.assertNotNull(taInfo.getContainerId());
      // Verify the wrong ctor is not being used. Remove after mrv1 is removed.
      Assert.assertFalse(taInfo.getContainerId().equals(fakeCid));
    }

    // Deep compare Job and JobInfo
    for (Task task : job.getTasks().values()) {
      TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
      Assert.assertNotNull("TaskInfo not found", taskInfo);
      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
        TaskAttemptInfo taskAttemptInfo =
            taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID())));
        Assert.assertNotNull("TaskAttemptInfo not found", taskAttemptInfo);
        Assert.assertEquals(
            "Incorrect shuffle port for task attempt",
            taskAttempt.getShufflePort(),
            taskAttemptInfo.getShufflePort());
        if (numMaps == numSuccessfulMaps) {
          Assert.assertEquals(MRApp.NM_HOST, taskAttemptInfo.getHostname());
          Assert.assertEquals(MRApp.NM_PORT, taskAttemptInfo.getPort());

          // Verify rack-name
          Assert.assertEquals("rack-name is incorrect", taskAttemptInfo.getRackname(), RACK_NAME);
        }
      }
    }
  }
Esempio n. 8
0
  /**
   * Go through a job and update the member variables with counts for information to output in the
   * page.
   *
   * @param job the job to get counts for.
   */
  private void countTasksAndAttempts(Job job) {
    final Map<TaskId, Task> tasks = job.getTasks();
    if (tasks == null) {
      return;
    }
    for (Task task : tasks.values()) {
      switch (task.getType()) {
        case MAP:
          // Task counts
          switch (task.getState()) {
            case RUNNING:
              ++this.mapsRunning;
              break;
            case SCHEDULED:
              ++this.mapsPending;
              break;
            default:
              break;
          }
          break;
        case REDUCE:
          // Task counts
          switch (task.getState()) {
            case RUNNING:
              ++this.reducesRunning;
              break;
            case SCHEDULED:
              ++this.reducesPending;
              break;
            default:
              break;
          }
          break;
        default:
          throw new IllegalStateException("Task type is neither map nor reduce: " + task.getType());
      }
      // Attempts counts
      Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
      int newAttempts, running, successful, failed, killed;
      for (TaskAttempt attempt : attempts.values()) {

        newAttempts = 0;
        running = 0;
        successful = 0;
        failed = 0;
        killed = 0;
        if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
          ++newAttempts;
        } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) {
          ++running;
        } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt.getState())) {
          ++successful;
        } else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) {
          ++failed;
        } else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) {
          ++killed;
        }

        switch (task.getType()) {
          case MAP:
            this.newMapAttempts += newAttempts;
            this.runningMapAttempts += running;
            this.successfulMapAttempts += successful;
            this.failedMapAttempts += failed;
            this.killedMapAttempts += killed;
            break;
          case REDUCE:
            this.newReduceAttempts += newAttempts;
            this.runningReduceAttempts += running;
            this.successfulReduceAttempts += successful;
            this.failedReduceAttempts += failed;
            this.killedReduceAttempts += killed;
            break;
          default:
            throw new IllegalStateException("Task type neither map nor reduce: " + task.getType());
        }
      }
    }
  }
  @Override
  public void updateAttempt(TaskAttemptStatus status, long timestamp) {
    super.updateAttempt(status, timestamp);

    TaskAttemptId attemptID = status.id;
    TaskId taskID = attemptID.getTaskId();
    JobId jobID = taskID.getJobId();
    Job job = context.getJob(jobID);

    if (job == null) {
      return;
    }

    Task task = job.getTask(taskID);

    if (task == null) {
      return;
    }

    TaskAttempt taskAttempt = task.getAttempt(attemptID);

    if (taskAttempt == null) {
      return;
    }

    Long boxedStart = startTimes.get(attemptID);
    long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;

    // We need to do two things.
    //  1: If this is a completion, we accumulate statistics in the superclass
    //  2: If this is not a completion, we learn more about it.

    // This is not a completion, but we're cooking.
    //
    if (taskAttempt.getState() == TaskAttemptState.RUNNING) {
      // See if this task is already in the registry
      AtomicLong estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
      AtomicLong estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt);

      if (estimateContainer == null) {
        if (attemptRuntimeEstimates.get(taskAttempt) == null) {
          attemptRuntimeEstimates.put(taskAttempt, new AtomicLong());

          estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
        }
      }

      if (estimateVarianceContainer == null) {
        attemptRuntimeEstimateVariances.putIfAbsent(taskAttempt, new AtomicLong());
        estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt);
      }

      long estimate = -1;
      long varianceEstimate = -1;

      // This code assumes that we'll never consider starting a third
      //  speculative task attempt if two are already running for this task
      if (start > 0 && timestamp > start) {
        estimate = (long) ((timestamp - start) / Math.max(0.0001, status.progress));
        varianceEstimate = (long) (estimate * status.progress / 10);
      }
      if (estimateContainer != null) {
        estimateContainer.set(estimate);
      }
      if (estimateVarianceContainer != null) {
        estimateVarianceContainer.set(varianceEstimate);
      }
    }
  }