public void configure(JobConf job) { this.jobconf = job; String cassConfig; // Get the cached files try { localFiles = DistributedCache.getLocalCacheFiles(job); } catch (IOException e) { throw new RuntimeException(e); } cassConfig = localFiles[0].getParent().toString(); System.setProperty("storage-config", cassConfig); try { StorageService.instance.initClient(); } catch (Exception e) { throw new RuntimeException(e); } try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { throw new RuntimeException(e); } }
public static void runSortJob(String... args) throws Exception { Path input = new Path(args[0]); Path output = new Path(args[1]); JobConf job = new JobConf(); job.setNumReduceTasks(2); job.setInputFormat(KeyValueTextInputFormat.class); job.setOutputFormat(TextOutputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); FileInputFormat.setInputPaths(job, input); FileOutputFormat.setOutputPath(job, output); job.setJarByClass(SampleJob.class); output.getFileSystem(job).delete(output, true); JobClient jc = new JobClient(job); JobClient.setTaskOutputFilter(job, JobClient.TaskStatusFilter.ALL); RunningJob rj = jc.submitJob(job); try { if (!jc.monitorAndPrintJob(job, rj)) { System.out.println("Job Failed: " + rj.getFailureInfo()); throw new IOException("Job failed!"); } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } }
protected void waitForOpenSlot(int maxProcessesOnNode, Reporter reporter) throws IOException, InterruptedException { while (true) { // sleep for a random length of time between 0 and 60 seconds long sleepTime = (long) (Math.random() * 1000 * 60); logger.info("sleeping for " + sleepTime); Thread.sleep(sleepTime); int numRunningMappers = getNumRunningMappers(); logger.info("num running mappers: " + numRunningMappers); if (numRunningMappers < maxProcessesOnNode) return; reporter.progress(); } }
public void close() { try { // release the cache DistributedCache.releaseCache( new URI("/cassandra/storage-conf.xml#storage-conf.xml"), this.jobconf); } catch (IOException e) { throw new RuntimeException(e); } catch (URISyntaxException e) { throw new RuntimeException(e); } try { // Sleep just in case the number of keys we send over is small Thread.sleep(3 * 1000); } catch (InterruptedException e) { throw new RuntimeException(e); } StorageService.instance.stopClient(); }
/** * Map method. Copies one file from source file system to destination. * * @param key src len * @param value FilePair (FileStatus src, Path dst) * @param out Log of failed copies * @param reporter */ public void map( LongWritable key, FilePair value, OutputCollector<WritableComparable<?>, Text> out, Reporter reporter) throws IOException { final FileStatus srcstat = value.input; final Path relativedst = new Path(value.output); try { copy(srcstat, relativedst, out, reporter); } catch (IOException e) { ++failcount; reporter.incrCounter(Counter.FAIL, 1); updateStatus(reporter); final String sfailure = "FAIL " + relativedst + " : " + StringUtils.stringifyException(e); out.collect(null, new Text(sfailure)); LOG.info(sfailure); try { for (int i = 0; i < 3; ++i) { try { final Path tmp = new Path(job.get(TMP_DIR_LABEL), relativedst); if (destFileSys.delete(tmp, true)) break; } catch (Throwable ex) { // ignore, we are just cleaning up LOG.debug("Ignoring cleanup exception", ex); } // update status, so we don't get timed out updateStatus(reporter); Thread.sleep(3 * 1000); } } catch (InterruptedException inte) { throw (IOException) new IOException().initCause(inte); } } finally { updateStatus(reporter); } }