コード例 #1
0
ファイル: TestJobRetire.java プロジェクト: AkihiroSuda/PCheck
 @Override
 HeartbeatResponse transmitHeartBeat(long now) throws IOException {
   HeartbeatResponse response = super.transmitHeartBeat(now);
   LOG.info("WaitingTaskTracker waiting");
   // wait forever
   UtilsForTests.waitFor(Long.MAX_VALUE);
   throw new IOException("WaitingTaskTracker interrupted. Bailing out");
 }
コード例 #2
0
ファイル: TestJobRetire.java プロジェクト: AkihiroSuda/PCheck
 // wait till the job retires
 private void waitTillRetire(JobID id, JobTracker jobtracker) {
   // wait for job to get retired
   JobInProgress job = jobtracker.getJob(id);
   for (int i = 0; i < 10 && job != null; i++) {
     UtilsForTests.waitFor(1000);
     job = jobtracker.getJob(id);
   }
   assertNull("Job did not retire", job);
 }
コード例 #3
0
 /**
  * Set the invalid mapred local directory location and run the job. Verify the job status.
  *
  * @throws Exception - if an error occurs.
  */
 @Test
 public void testJobStatusForInvalidTaskControllerConf() throws Exception {
   conf = remoteJTClient.getDaemonConf();
   if (conf.get("mapred.task.tracker.task-controller")
       .equals("org.apache.hadoop.mapred.LinuxTaskController")) {
     StringBuffer mapredLocalDir = new StringBuffer();
     LOG.info("JobConf.MAPRED_LOCAL_DIR_PROPERTY:" + conf.get(JobConf.MAPRED_LOCAL_DIR_PROPERTY));
     mapredLocalDir.append(conf.get(JobConf.MAPRED_LOCAL_DIR_PROPERTY));
     mapredLocalDir.append(",");
     mapredLocalDir.append("/mapred/local");
     String jobArgs[] = {
       "-D",
       "mapred.local.dir=" + mapredLocalDir.toString(),
       "-m",
       "1",
       "-r",
       "1",
       "-mt",
       "1000",
       "-rt",
       "1000",
       "-recordt",
       "100"
     };
     SleepJob job = new SleepJob();
     JobConf jobConf = new JobConf(conf);
     int exitStatus = ToolRunner.run(jobConf, job, jobArgs);
     Assert.assertEquals("Exit Code:", 0, exitStatus);
     UtilsForTests.waitFor(100);
     JobClient jobClient = jtClient.getClient();
     JobID jobId = jobClient.getAllJobs()[0].getJobID();
     LOG.info("JobId:" + jobId);
     if (jobId != null) {
       JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
       Assert.assertEquals(
           "Job has not been succeeded", jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
     }
   } else {
     Assert.assertTrue("Linux Task controller not found.", false);
   }
 }
コード例 #4
0
  @SuppressWarnings("unchecked")
  public void testCommitter() throws Exception {
    JobConf job = new JobConf();
    setConfForFileOutputCommitter(job);
    JobContext jContext = new JobContextImpl(job, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(job, taskID);
    FileOutputCommitter committer = new FileOutputCommitter();
    FileOutputFormat.setWorkOutputPath(job, committer.getTempTaskOutputPath(tContext));

    committer.setupJob(jContext);
    committer.setupTask(tContext);
    String file = "test.txt";

    // A reporter that does nothing
    Reporter reporter = Reporter.NULL;
    // write output
    FileSystem localFs = FileSystem.getLocal(job);
    TextOutputFormat theOutputFormat = new TextOutputFormat();
    RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(localFs, job, file, reporter);
    writeOutput(theRecordWriter, reporter);

    // do commit
    committer.commitTask(tContext);
    committer.commitJob(jContext);

    // validate output
    File expectedFile = new File(new Path(outDir, file).toString());
    StringBuffer expectedOutput = new StringBuffer();
    expectedOutput.append(key1).append('\t').append(val1).append("\n");
    expectedOutput.append(val1).append("\n");
    expectedOutput.append(val2).append("\n");
    expectedOutput.append(key2).append("\n");
    expectedOutput.append(key1).append("\n");
    expectedOutput.append(key2).append('\t').append(val2).append("\n");
    String output = UtilsForTests.slurp(expectedFile);
    assertEquals(output, expectedOutput.toString());

    FileUtil.fullyDelete(new File(outDir.toString()));
  }
コード例 #5
0
ファイル: TestJobRetire.java プロジェクト: AkihiroSuda/PCheck
  private JobID validateJobRetire(JobConf jobConf, Path inDir, Path outDir, JobTracker jobtracker)
      throws IOException {

    RunningJob rj = UtilsForTests.runJob(jobConf, inDir, outDir, 0, 0);
    rj.waitForCompletion();
    assertTrue(rj.isSuccessful());
    JobID id = rj.getID();

    // wait for job to get retired
    waitTillRetire(id, jobtracker);
    RetireJobInfo retired = jobtracker.retireJobs.get(id);
    assertTrue(
        "History url not set",
        retired.getHistoryFile() != null && retired.getHistoryFile().length() > 0);
    assertNotNull("Job is not in cache", jobtracker.getJobStatus(id));

    // get the job conf filename
    String name = jobtracker.getLocalJobFilePath(id);
    File file = new File(name);

    assertFalse("JobConf file not deleted", file.exists());
    // test redirection
    URL jobUrl = new URL(rj.getTrackingURL());
    HttpURLConnection conn = (HttpURLConnection) jobUrl.openConnection();
    conn.setInstanceFollowRedirects(false);
    conn.connect();
    assertEquals(HttpURLConnection.HTTP_MOVED_TEMP, conn.getResponseCode());
    conn.disconnect();

    URL redirectedUrl = new URL(conn.getHeaderField("Location"));
    conn = (HttpURLConnection) redirectedUrl.openConnection();
    conn.connect();
    assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
    conn.disconnect();

    return id;
  }
コード例 #6
0
  void testNormalWriteAndRead(
      final int segments, final int length, final int byteMax, final CompressionCodec codec)
      throws Exception {

    Random random = new Random(37);

    // Prepare data for testing
    SimpleSeekableFormat.Buffer[] data = new SimpleSeekableFormat.Buffer[segments];
    for (int s = 0; s < segments; s++) {
      data[s] = createBuffer(random, length, byteMax);
    }

    // Write data to dataSegmentOutput
    ByteArrayOutputStream dataSegmentOutput = new ByteArrayOutputStream();
    for (int s = 0; s < segments; s++) {
      DataSegmentWriter writer = new DataSegmentWriter(data[s], codec, null);
      writer.writeTo(new DataOutputStream(dataSegmentOutput));
    }

    // Read dataSegments back to a Buffer
    byte[][] newData = new byte[segments][];
    ByteArrayInputStream dataSegmentInput =
        new ByteArrayInputStream(dataSegmentOutput.toByteArray());
    Configuration conf = new Configuration();
    HashMap<Text, Decompressor> decompressorCache = new HashMap<Text, Decompressor>();
    for (int s = 0; s < segments; s++) {
      DataSegmentReader reader =
          new DataSegmentReader(new DataInputStream(dataSegmentInput), conf, decompressorCache);
      newData[s] = IOUtils.toByteArray(reader.getInputStream());
    }

    // Compare data and newData
    for (int s = 0; s < segments; s++) {
      UtilsForTests.assertArrayEquals("Segment[" + s + "]", data[s].toByteArray(), newData[s]);
    }
  }
コード例 #7
0
  /** Check refreshNodes for decommissioning blacklisted nodes. */
  public void testBlacklistedNodeDecommissioning() throws Exception {
    LOG.info("Testing blacklisted node decommissioning");
    MiniMRCluster mr = null;
    JobTracker jt = null;

    try {
      // start mini mr
      JobConf jtConf = new JobConf();
      jtConf.set("mapred.max.tracker.blacklists", "1");
      mr = new MiniMRCluster(0, 0, 2, "file:///", 1, null, null, null, jtConf);
      jt = mr.getJobTrackerRunner().getJobTracker();

      assertEquals("Trackers not up", 2, jt.taskTrackers().size());
      // validate the total tracker count
      assertEquals(
          "Active tracker count mismatch", 2, jt.getClusterStatus(false).getTaskTrackers());
      // validate blacklisted count
      assertEquals(
          "Blacklisted tracker count mismatch",
          0,
          jt.getClusterStatus(false).getBlacklistedTrackers());

      // run a failing job to blacklist the tracker
      JobConf jConf = mr.createJobConf();
      jConf.set("mapred.max.tracker.failures", "1");
      jConf.setJobName("test-job-fail-once");
      jConf.setMapperClass(FailOnceMapper.class);
      jConf.setReducerClass(IdentityReducer.class);
      jConf.setNumMapTasks(1);
      jConf.setNumReduceTasks(0);

      RunningJob job =
          UtilsForTests.runJob(jConf, new Path(TEST_DIR, "in"), new Path(TEST_DIR, "out"));
      job.waitForCompletion();

      // validate the total tracker count
      assertEquals(
          "Active tracker count mismatch", 1, jt.getClusterStatus(false).getTaskTrackers());
      // validate blacklisted count
      assertEquals(
          "Blacklisted tracker count mismatch",
          1,
          jt.getClusterStatus(false).getBlacklistedTrackers());

      // find the blacklisted tracker
      String trackerName = null;
      for (TaskTrackerStatus status : jt.taskTrackers()) {
        if (jt.isBlacklisted(status.getTrackerName())) {
          trackerName = status.getTrackerName();
          break;
        }
      }
      // get the hostname
      String hostToDecommission = JobInProgress.convertTrackerNameToHostName(trackerName);
      LOG.info("Decommissioning tracker " + hostToDecommission);

      // decommission the node
      HashSet<String> decom = new HashSet<String>(1);
      decom.add(hostToDecommission);
      jt.decommissionNodes(decom);

      // validate
      // check the cluster status and tracker size
      assertEquals(
          "Tracker is not lost upon host decommissioning",
          1,
          jt.getClusterStatus(false).getTaskTrackers());
      assertEquals(
          "Blacklisted tracker count incorrect in cluster status " + "after decommissioning",
          0,
          jt.getClusterStatus(false).getBlacklistedTrackers());
      assertEquals("Tracker is not lost upon host decommissioning", 1, jt.taskTrackers().size());
    } finally {
      if (mr != null) {
        mr.shutdown();
        mr = null;
        jt = null;
        FileUtil.fullyDelete(new File(TEST_DIR.toString()));
      }
    }
  }
コード例 #8
0
ファイル: TestJobRetire.java プロジェクト: AkihiroSuda/PCheck
  /**
   * Test job retire with tasks that report their *first* status only after the job retires. Steps :
   * - Start a mini-mr cluster with 1 task-tracker having only map slots. Note that this
   * task-tracker will take care of setup/cleanup and the map tasks. - Submit a job with 1 map task
   * and 1 reduce task - Wait for the job to finish the map task - Start a 2nd tracker that waits
   * for a long time after contacting the JT. - Wait for the 2nd tracker to get stuck - Kill the job
   * - Wait for the job to retire - Check if the tip mappings are cleaned up.
   */
  public void testJobRetireWithUnreportedTasks() throws Exception {
    MiniMRCluster mr = null;
    try {
      JobConf conf = new JobConf();
      // set the num-map-slots to 1 so that no reduce tasks but setup/cleanup
      // can run on it
      conf.setInt("mapred.tasktracker.map.tasks.maximum", 1);
      conf.setInt("mapred.tasktracker.reduce.tasks.maximum", 0);

      mr = startCluster(conf, 1);
      JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker();

      RunningJob job =
          UtilsForTests.runJob(
              mr.createJobConf(), new Path(testDir, "in-1"), new Path(testDir, "out-1"), 1, 1);
      JobID id = JobID.downgrade(job.getID());
      JobInProgress jip = jobtracker.getJob(id);

      // wait 100 secs for the job to complete its map task
      for (int i = 0; i < 1000 && jip.finishedMaps() < 1; i++) {
        UtilsForTests.waitFor(100);
      }
      assertEquals(jip.finishedMaps(), 1);

      // start a tracker that will wait
      LOG.info("Adding a waiting tracker");
      TaskTrackerRunner testTrackerRunner =
          mr.new TaskTrackerRunner(1, 1, null, mr.createJobConf()) {
            @Override
            TaskTracker createTaskTracker(JobConf conf) throws InterruptedException, IOException {
              return new WaitingTaskTracker(conf);
            }
          };
      mr.addTaskTracker(testTrackerRunner);
      LOG.info("Waiting tracker added");

      WaitingTaskTracker testTT = (WaitingTaskTracker) testTrackerRunner.getTaskTracker();

      // wait 100 secs for the newly started task-tracker to join
      for (int i = 0; i < 1000 && jobtracker.taskTrackers().size() < 2; i++) {
        UtilsForTests.waitFor(100);
      }
      assertEquals(jobtracker.taskTrackers().size(), 2);
      LOG.info("Cluster is now up with 2 trackers");
      // stop the test-tt as its no longer required
      mr.stopTaskTracker(mr.getTaskTrackerID(testTT.getName()));

      // 1 reduce task should be scheduled
      assertEquals("TestTT contacted but no reduce task scheduled on it", 1, jip.runningReduces());

      // kill the job
      LOG.info("Killing job " + id);
      job.killJob();

      // check if the reduce task attempt status is missing
      TaskInProgress tip = jip.getTasks(TaskType.REDUCE)[0];
      assertNull(tip.getTaskStatus(tip.getAllTaskAttemptIDs()[0]));

      // wait for the job to retire
      waitTillRetire(id, jobtracker);

      // check the taskidToTIPMap
      for (TaskAttemptID tid : jobtracker.taskidToTIPMap.keySet()) {
        LOG.info("TaskidToTIP : " + tid);
      }
      assertEquals("'taskid' to TIP mapping still exists", 0, jobtracker.taskidToTIPMap.size());
    } finally {
      if (mr != null) {
        mr.shutdown();
      }
      // cleanup
      FileUtil.fullyDelete(new File(testDir.toString()));
    }
  }
コード例 #9
0
  @Test
  public void mrRun() throws Exception {
    FileSystem fs = dfsCluster.getFileSystem();
    Path inDir = fs.makeQualified(new Path("/user/testing/testMapperReducer/input"));
    fs.delete(inDir, true);
    String DATADIR = "/user/testing/testMapperReducer/data";
    Path dataDir = fs.makeQualified(new Path(DATADIR));
    fs.delete(dataDir, true);
    Path outDir = fs.makeQualified(new Path("/user/testing/testMapperReducer/output"));
    fs.delete(outDir, true);

    assertTrue(fs.mkdirs(inDir));
    Path INPATH = new Path(inDir, "input.txt");
    OutputStream os = fs.create(INPATH);
    Writer wr = new OutputStreamWriter(os, StandardCharsets.UTF_8);
    wr.write(DATADIR + "/" + inputAvroFile);
    wr.close();

    assertTrue(fs.mkdirs(dataDir));
    fs.copyFromLocalFile(new Path(DOCUMENTS_DIR, inputAvroFile), dataDir);

    JobConf jobConf = getJobConf();
    jobConf.set("jobclient.output.filter", "ALL");
    if (ENABLE_LOCAL_JOB_RUNNER) { // enable Hadoop LocalJobRunner; this enables to run in debugger
      // and set breakpoints
      jobConf.set("mapred.job.tracker", "local");
    }
    jobConf.setMaxMapAttempts(1);
    jobConf.setMaxReduceAttempts(1);
    jobConf.setJar(SEARCH_ARCHIVES_JAR);

    int shards = 2;
    int maxReducers = Integer.MAX_VALUE;
    if (ENABLE_LOCAL_JOB_RUNNER) {
      // local job runner has a couple of limitations: only one reducer is supported and the
      // DistributedCache doesn't work.
      // see http://blog.cloudera.com/blog/2009/07/advice-on-qa-testing-your-mapreduce-jobs/
      maxReducers = 1;
      shards = 1;
    }

    String[] args =
        new String[] {
          "--morphline-file=" + tempDir + "/test-morphlines/solrCellDocumentTypes.conf",
          "--morphline-id=morphline1",
          "--solr-home-dir=" + MINIMR_CONF_DIR.getAbsolutePath(),
          "--output-dir=" + outDir.toString(),
          "--shards=" + shards,
          "--verbose",
          numRuns % 2 == 0 ? "--input-list=" + INPATH.toString() : dataDir.toString(),
          numRuns % 3 == 0
              ? "--reducers=" + shards
              : (numRuns % 3 == 1 ? "--reducers=-1" : "--reducers=" + Math.min(8, maxReducers))
        };
    if (numRuns % 3 == 2) {
      args = concat(args, new String[] {"--fanout=2"});
    }
    if (numRuns == 0) {
      // force (slow) MapReduce based randomization to get coverage for that as well
      args =
          concat(
              new String[] {"-D", MapReduceIndexerTool.MAIN_MEMORY_RANDOMIZATION_THRESHOLD + "=-1"},
              args);
    }
    MapReduceIndexerTool tool = createTool();
    int res = ToolRunner.run(jobConf, tool, args);
    assertEquals(0, res);
    Job job = tool.job;
    assertTrue(job.isComplete());
    assertTrue(job.isSuccessful());

    if (numRuns % 3 != 2) {
      // Only run this check if mtree merge is disabled.
      // With mtree merge enabled the BatchWriter counters aren't available anymore because
      // variable "job" now refers to the merge job rather than the indexing job
      assertEquals(
          "Invalid counter "
              + SolrRecordWriter.class.getName()
              + "."
              + SolrCounters.DOCUMENTS_WRITTEN,
          count,
          job.getCounters()
              .findCounter(SolrCounters.class.getName(), SolrCounters.DOCUMENTS_WRITTEN.toString())
              .getValue());
    }

    // Check the output is as expected
    outDir = new Path(outDir, MapReduceIndexerTool.RESULTS_DIR);
    Path[] outputFiles = FileUtil.stat2Paths(fs.listStatus(outDir));

    System.out.println("outputfiles:" + Arrays.toString(outputFiles));

    UtilsForTests.validateSolrServerDocumentCount(MINIMR_CONF_DIR, fs, outDir, count, shards);

    // run again with --dryrun mode:
    tool = createTool();
    args = concat(args, new String[] {"--dry-run"});
    res = ToolRunner.run(jobConf, tool, args);
    assertEquals(0, res);

    numRuns++;
  }