コード例 #1
0
ファイル: TestJobRetire.java プロジェクト: AkihiroSuda/PCheck
  public void testJobRetire() throws Exception {
    MiniMRCluster mr = null;
    try {
      JobConf conf = new JobConf();
      mr = startCluster(conf, 1);

      JobConf jobConf = mr.createJobConf();
      JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker();

      Path inDir = new Path(testDir, "input1");
      Path outDir = new Path(testDir, "output1");

      JobID id1 = validateJobRetire(jobConf, inDir, outDir, jobtracker);

      outDir = new Path(testDir, "output2");
      JobID id2 = validateJobRetire(jobConf, inDir, outDir, jobtracker);

      assertNull("Job not removed from cache", jobtracker.getJobStatus(id1));

      assertEquals("Total job in cache not correct", 1, jobtracker.getAllJobs().length);
    } finally {
      if (mr != null) {
        mr.shutdown();
      }
      FileUtil.fullyDelete(new File(testDir.toString()));
    }
  }
コード例 #2
0
  /**
   * run a distributed job and verify that TokenCache is available
   *
   * @throws IOException
   */
  @Test
  public void testTokenCache() throws IOException {

    System.out.println("running dist job");

    // make sure JT starts
    jConf = mrCluster.createJobConf();

    // provide namenodes names for the job to get the delegation tokens for
    String nnUri = dfsCluster.getURI(0).toString();
    jConf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
    // job tracker principla id..
    jConf.set(JTConfig.JT_USER_NAME, "jt_id/foo@BAR");

    // using argument to pass the file name
    String[] args = {
      "-tokenCacheFile", tokenFileName.toString(), "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
    };

    int res = -1;
    try {
      res = ToolRunner.run(jConf, new MySleepJob(), args);
    } catch (Exception e) {
      System.out.println("Job failed with" + e.getLocalizedMessage());
      e.printStackTrace(System.out);
      fail("Job failed");
    }
    assertEquals("dist job res is not 0", res, 0);
  }
コード例 #3
0
ファイル: Hadoop23Shims.java プロジェクト: prongs/apache-hive
 @Override
 public void setupConfiguration(Configuration conf) {
   JobConf jConf = mr.createJobConf();
   for (Map.Entry<String, String> pair : jConf) {
     conf.set(pair.getKey(), pair.getValue());
   }
 }
コード例 #4
0
  @BeforeClass
  public static void setup() throws Exception {
    String testDir = System.getProperty("test.data.dir", "./");
    testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/";
    workDir = new File(new File(testDir).getCanonicalPath());
    FileUtil.fullyDelete(workDir);
    workDir.mkdirs();

    warehousedir = new Path(workDir + "/warehouse");

    // Run hive metastore server
    t = new Thread(new RunMS());
    t.start();

    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    Configuration conf = new Configuration(true);
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");

    FileSystem fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
    mrConf = mrCluster.createJobConf();
    fs.mkdirs(warehousedir);

    initializeSetup();
  }
コード例 #5
0
  /** Check refreshNodes for decommissioning blacklisted nodes. */
  public void testBlacklistedNodeDecommissioning() throws Exception {
    LOG.info("Testing blacklisted node decommissioning");
    MiniMRCluster mr = null;
    JobTracker jt = null;

    try {
      // start mini mr
      JobConf jtConf = new JobConf();
      jtConf.set("mapred.max.tracker.blacklists", "1");
      mr = new MiniMRCluster(0, 0, 2, "file:///", 1, null, null, null, jtConf);
      jt = mr.getJobTrackerRunner().getJobTracker();

      assertEquals("Trackers not up", 2, jt.taskTrackers().size());
      // validate the total tracker count
      assertEquals(
          "Active tracker count mismatch", 2, jt.getClusterStatus(false).getTaskTrackers());
      // validate blacklisted count
      assertEquals(
          "Blacklisted tracker count mismatch",
          0,
          jt.getClusterStatus(false).getBlacklistedTrackers());

      // run a failing job to blacklist the tracker
      JobConf jConf = mr.createJobConf();
      jConf.set("mapred.max.tracker.failures", "1");
      jConf.setJobName("test-job-fail-once");
      jConf.setMapperClass(FailOnceMapper.class);
      jConf.setReducerClass(IdentityReducer.class);
      jConf.setNumMapTasks(1);
      jConf.setNumReduceTasks(0);

      RunningJob job =
          UtilsForTests.runJob(jConf, new Path(TEST_DIR, "in"), new Path(TEST_DIR, "out"));
      job.waitForCompletion();

      // validate the total tracker count
      assertEquals(
          "Active tracker count mismatch", 1, jt.getClusterStatus(false).getTaskTrackers());
      // validate blacklisted count
      assertEquals(
          "Blacklisted tracker count mismatch",
          1,
          jt.getClusterStatus(false).getBlacklistedTrackers());

      // find the blacklisted tracker
      String trackerName = null;
      for (TaskTrackerStatus status : jt.taskTrackers()) {
        if (jt.isBlacklisted(status.getTrackerName())) {
          trackerName = status.getTrackerName();
          break;
        }
      }
      // get the hostname
      String hostToDecommission = JobInProgress.convertTrackerNameToHostName(trackerName);
      LOG.info("Decommissioning tracker " + hostToDecommission);

      // decommission the node
      HashSet<String> decom = new HashSet<String>(1);
      decom.add(hostToDecommission);
      jt.decommissionNodes(decom);

      // validate
      // check the cluster status and tracker size
      assertEquals(
          "Tracker is not lost upon host decommissioning",
          1,
          jt.getClusterStatus(false).getTaskTrackers());
      assertEquals(
          "Blacklisted tracker count incorrect in cluster status " + "after decommissioning",
          0,
          jt.getClusterStatus(false).getBlacklistedTrackers());
      assertEquals("Tracker is not lost upon host decommissioning", 1, jt.taskTrackers().size());
    } finally {
      if (mr != null) {
        mr.shutdown();
        mr = null;
        jt = null;
        FileUtil.fullyDelete(new File(TEST_DIR.toString()));
      }
    }
  }
コード例 #6
0
ファイル: TestJobRetire.java プロジェクト: AkihiroSuda/PCheck
  /**
   * Test job retire with tasks that report their *first* status only after the job retires. Steps :
   * - Start a mini-mr cluster with 1 task-tracker having only map slots. Note that this
   * task-tracker will take care of setup/cleanup and the map tasks. - Submit a job with 1 map task
   * and 1 reduce task - Wait for the job to finish the map task - Start a 2nd tracker that waits
   * for a long time after contacting the JT. - Wait for the 2nd tracker to get stuck - Kill the job
   * - Wait for the job to retire - Check if the tip mappings are cleaned up.
   */
  public void testJobRetireWithUnreportedTasks() throws Exception {
    MiniMRCluster mr = null;
    try {
      JobConf conf = new JobConf();
      // set the num-map-slots to 1 so that no reduce tasks but setup/cleanup
      // can run on it
      conf.setInt("mapred.tasktracker.map.tasks.maximum", 1);
      conf.setInt("mapred.tasktracker.reduce.tasks.maximum", 0);

      mr = startCluster(conf, 1);
      JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker();

      RunningJob job =
          UtilsForTests.runJob(
              mr.createJobConf(), new Path(testDir, "in-1"), new Path(testDir, "out-1"), 1, 1);
      JobID id = JobID.downgrade(job.getID());
      JobInProgress jip = jobtracker.getJob(id);

      // wait 100 secs for the job to complete its map task
      for (int i = 0; i < 1000 && jip.finishedMaps() < 1; i++) {
        UtilsForTests.waitFor(100);
      }
      assertEquals(jip.finishedMaps(), 1);

      // start a tracker that will wait
      LOG.info("Adding a waiting tracker");
      TaskTrackerRunner testTrackerRunner =
          mr.new TaskTrackerRunner(1, 1, null, mr.createJobConf()) {
            @Override
            TaskTracker createTaskTracker(JobConf conf) throws InterruptedException, IOException {
              return new WaitingTaskTracker(conf);
            }
          };
      mr.addTaskTracker(testTrackerRunner);
      LOG.info("Waiting tracker added");

      WaitingTaskTracker testTT = (WaitingTaskTracker) testTrackerRunner.getTaskTracker();

      // wait 100 secs for the newly started task-tracker to join
      for (int i = 0; i < 1000 && jobtracker.taskTrackers().size() < 2; i++) {
        UtilsForTests.waitFor(100);
      }
      assertEquals(jobtracker.taskTrackers().size(), 2);
      LOG.info("Cluster is now up with 2 trackers");
      // stop the test-tt as its no longer required
      mr.stopTaskTracker(mr.getTaskTrackerID(testTT.getName()));

      // 1 reduce task should be scheduled
      assertEquals("TestTT contacted but no reduce task scheduled on it", 1, jip.runningReduces());

      // kill the job
      LOG.info("Killing job " + id);
      job.killJob();

      // check if the reduce task attempt status is missing
      TaskInProgress tip = jip.getTasks(TaskType.REDUCE)[0];
      assertNull(tip.getTaskStatus(tip.getAllTaskAttemptIDs()[0]));

      // wait for the job to retire
      waitTillRetire(id, jobtracker);

      // check the taskidToTIPMap
      for (TaskAttemptID tid : jobtracker.taskidToTIPMap.keySet()) {
        LOG.info("TaskidToTIP : " + tid);
      }
      assertEquals("'taskid' to TIP mapping still exists", 0, jobtracker.taskidToTIPMap.size());
    } finally {
      if (mr != null) {
        mr.shutdown();
      }
      // cleanup
      FileUtil.fullyDelete(new File(testDir.toString()));
    }
  }
コード例 #7
0
  /** Test if {@link CurrentJHParser} can read events from current JH files. */
  @Test
  public void testCurrentJHParser() throws Exception {
    final Configuration conf = new Configuration();
    final FileSystem lfs = FileSystem.getLocal(conf);

    final Path rootTempDir =
        new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(lfs);

    final Path tempDir = new Path(rootTempDir, "TestCurrentJHParser");
    lfs.delete(tempDir, true);

    String queueName = "testQueue";
    // Run a MR job
    // create a MR cluster
    conf.setInt("mapred.tasktracker.map.tasks.maximum", 1);
    conf.setInt("mapred.tasktracker.reduce.tasks.maximum", 1);
    conf.set("mapred.queue.names", queueName);
    MiniMRCluster mrCluster = new MiniMRCluster(1, "file:///", 1, null, null, new JobConf(conf));

    // run a job
    Path inDir = new Path(tempDir, "input");
    Path outDir = new Path(tempDir, "output");
    JobHistoryParser parser = null;
    RewindableInputStream ris = null;
    ArrayList<String> seenEvents = new ArrayList<String>(10);
    RunningJob rJob = null;

    try {
      JobConf jobConf = mrCluster.createJobConf();
      jobConf.setQueueName(queueName);
      // construct a job with 1 map and 1 reduce task.
      rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 1);
      rJob.waitForCompletion();
      assertTrue("Job failed", rJob.isSuccessful());

      JobID id = rJob.getID();

      // get the jobhistory filepath
      Path inputPath =
          new Path(JobHistory.getHistoryFilePath(org.apache.hadoop.mapred.JobID.downgrade(id)));
      // wait for 10 secs for the jobhistory file to move into the done folder
      for (int i = 0; i < 100; ++i) {
        if (lfs.exists(inputPath)) {
          break;
        }
        TimeUnit.MILLISECONDS.wait(100);
      }

      assertTrue("Missing job history file", lfs.exists(inputPath));

      InputDemuxer inputDemuxer = new DefaultInputDemuxer();
      inputDemuxer.bindTo(inputPath, conf);

      Pair<String, InputStream> filePair = inputDemuxer.getNext();

      assertNotNull(filePair);

      ris = new RewindableInputStream(filePair.second());

      // Test if the JobHistoryParserFactory can detect the parser correctly
      parser = JobHistoryParserFactory.getParser(ris);

      // Get ParsedJob
      String jobId = TraceBuilder.extractJobID(filePair.first());
      JobBuilder builder = new JobBuilder(jobId);

      HistoryEvent e;
      while ((e = parser.nextEvent()) != null) {
        String eventString = e.getEventType().toString();
        System.out.println(eventString);
        seenEvents.add(eventString);
        if (builder != null) {
          builder.process(e);
        }
      }

      ParsedJob parsedJob = builder.build();
      // validate the obtainXXX api of ParsedJob, ParsedTask and
      // ParsedTaskAttempt.
      validateParsedJob(parsedJob, 1, 1, queueName);
    } finally {
      // stop the MR cluster
      mrCluster.shutdown();

      if (ris != null) {
        ris.close();
      }
      if (parser != null) {
        parser.close();
      }

      // cleanup the filesystem
      lfs.delete(tempDir, true);
    }

    // Check against the gold standard
    System.out.println("testCurrentJHParser validating using gold std ");
    String[] goldLines =
        new String[] {
          "JOB_SUBMITTED",
          "JOB_PRIORITY_CHANGED",
          "JOB_STATUS_CHANGED",
          "JOB_INITED",
          "JOB_INFO_CHANGED",
          "TASK_STARTED",
          "MAP_ATTEMPT_STARTED",
          "MAP_ATTEMPT_FINISHED",
          "MAP_ATTEMPT_FINISHED",
          "TASK_UPDATED",
          "TASK_FINISHED",
          "JOB_STATUS_CHANGED",
          "TASK_STARTED",
          "MAP_ATTEMPT_STARTED",
          "MAP_ATTEMPT_FINISHED",
          "MAP_ATTEMPT_FINISHED",
          "TASK_UPDATED",
          "TASK_FINISHED",
          "TASK_STARTED",
          "MAP_ATTEMPT_STARTED",
          "MAP_ATTEMPT_FINISHED",
          "REDUCE_ATTEMPT_FINISHED",
          "TASK_UPDATED",
          "TASK_FINISHED",
          "TASK_STARTED",
          "MAP_ATTEMPT_STARTED",
          "MAP_ATTEMPT_FINISHED",
          "MAP_ATTEMPT_FINISHED",
          "TASK_UPDATED",
          "TASK_FINISHED",
          "JOB_STATUS_CHANGED",
          "JOB_FINISHED"
        };

    // Check the output with gold std
    assertEquals("Size mismatch", goldLines.length, seenEvents.size());

    int index = 0;
    for (String goldLine : goldLines) {
      assertEquals("Content mismatch", goldLine, seenEvents.get(index++));
    }
  }