예제 #1
0
 @Override
 public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException {
   Configuration conf = jobContext.getConfiguration();
   for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
     Job job = getJob(jobContext.getJobID(), e.getKey(), conf);
     configureJob(e.getKey(), job, outputs.get(e.getKey()));
     e.getValue().abortJob(job, state);
   }
 }
예제 #2
0
 public static void checkOutputSpecs(JobContext jc) throws IOException, InterruptedException {
   Map<String, OutputConfig> outputs = getNamedOutputs(jc.getConfiguration());
   for (Map.Entry<String, OutputConfig> e : outputs.entrySet()) {
     String namedOutput = e.getKey();
     Job job = getJob(jc.getJobID(), e.getKey(), jc.getConfiguration());
     OutputFormat fmt = getOutputFormat(namedOutput, job, e.getValue());
     fmt.checkOutputSpecs(job);
   }
 }
예제 #3
0
 @Override
 public void commitJob(JobContext jobContext) throws IOException {
   Configuration conf = jobContext.getConfiguration();
   Set<Path> handledPaths = Sets.newHashSet();
   for (Map.Entry<String, OutputCommitter> e : committers.entrySet()) {
     OutputCommitter oc = e.getValue();
     Job job = getJob(jobContext.getJobID(), e.getKey(), conf);
     configureJob(e.getKey(), job, outputs.get(e.getKey()));
     if (oc instanceof FileOutputCommitter) {
       Path outputPath = ((FileOutputCommitter) oc).getWorkPath().getParent();
       if (handledPaths.contains(outputPath)) {
         continue;
       } else {
         handledPaths.add(outputPath);
       }
     }
     oc.commitJob(job);
   }
 }
예제 #4
0
 public static org.apache.hadoop.mapred.JobContext createJobContext(
     org.apache.hadoop.mapreduce.JobContext context) {
   return createJobContext(
       (JobConf) context.getConfiguration(), context.getJobID(), Reporter.NULL);
 }