@Test public void testProcessInfo() throws Exception { LOG.info("Process info of JobTracker is : " + cluster.getJTClient().getProcessInfo()); Assert.assertNotNull(cluster.getJTClient().getProcessInfo()); Collection<TTClient> tts = cluster.getTTClients(); for (TTClient tt : tts) { LOG.info("Process info of TaskTracker is : " + tt.getProcessInfo()); Assert.assertNotNull(tt.getProcessInfo()); } }
/** * Test to verify the common properties of tasks. * * @throws Exception */ @Test public void testTaskDetails() throws Exception { Configuration conf = new Configuration(cluster.getConf()); JTProtocol wovenClient = cluster.getJTClient().getProxy(); FinishTaskControlAction.configureControlActionForJob(conf); SleepJob job = new SleepJob(); job.setConf(conf); conf = job.setupJobConf(1, 1, 100, 100, 100, 100); JobClient client = cluster.getJTClient().getClient(); RunningJob rJob = client.submitJob(new JobConf(conf)); JobID id = rJob.getID(); JobInfo jInfo = wovenClient.getJobInfo(id); while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) { Thread.sleep(1000); jInfo = wovenClient.getJobInfo(id); } LOG.info("Waiting till job starts running one map"); TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id); boolean isOneTaskStored = false; String sometaskpid = null; org.apache.hadoop.mapreduce.TaskAttemptID sometaskId = null; TTClient myCli = null; for (TaskInfo info : myTaskInfos) { if (!info.isSetupOrCleanup()) { String[] taskTrackers = info.getTaskTrackers(); for (String taskTracker : taskTrackers) { TTInfo ttInfo = wovenClient.getTTInfo(taskTracker); TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost()); TaskID taskId = info.getTaskID(); TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(taskId); Assert.assertNotNull(ttTaskInfo); Assert.assertNotNull(ttTaskInfo.getConf()); Assert.assertNotNull(ttTaskInfo.getUser()); Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() >= 0.0); Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() <= 1.0); // Get the pid of the task attempt. The task need not have // reported the pid of the task by the time we are checking // the pid. So perform null check. String pid = ttTaskInfo.getPid(); int i = 1; while (pid.isEmpty()) { Thread.sleep(1000); LOG.info("Waiting for task to report its pid back"); ttTaskInfo = ttCli.getProxy().getTask(taskId); pid = ttTaskInfo.getPid(); if (i == 40) { Assert.fail("The task pid not reported for 40 seconds."); } i++; } if (!isOneTaskStored) { sometaskpid = pid; sometaskId = ttTaskInfo.getTaskStatus().getTaskID(); myCli = ttCli; isOneTaskStored = true; } LOG.info("verified task progress to be between 0 and 1"); State state = ttTaskInfo.getTaskStatus().getRunState(); if (ttTaskInfo.getTaskStatus().getProgress() < 1.0 && ttTaskInfo.getTaskStatus().getProgress() > 0.0) { Assert.assertEquals(TaskStatus.State.RUNNING, state); LOG.info("verified run state as " + state); } FinishTaskControlAction action = new FinishTaskControlAction( org.apache.hadoop.mapred.TaskID.downgrade(info.getTaskID())); ttCli.getProxy().sendAction(action); } } } rJob.killJob(); int i = 1; while (!rJob.isComplete()) { Thread.sleep(1000); if (i == 40) { Assert.fail("The job not completed within 40 seconds after killing it."); } i++; } TTTaskInfo myTaskInfo = myCli.getProxy().getTask(sometaskId.getTaskID()); i = 0; while (myTaskInfo != null && !myTaskInfo.getPid().isEmpty()) { LOG.info("sleeping till task is retired from TT memory"); Thread.sleep(1000); myTaskInfo = myCli.getProxy().getTask(sometaskId.getTaskID()); if (i == 40) { Assert.fail("Task not retired from TT memory within 40 seconds of job completeing"); } i++; } Assert.assertFalse(myCli.getProxy().isProcessTreeAlive(sometaskpid)); }