@Override public void updateDemand() { demand = 0; if (isRunnable()) { // For reduces, make sure enough maps are done that reduces can launch if (taskType == TaskType.REDUCE && !job.scheduleReduces()) return; // Add up demand from each TaskInProgress; each TIP can either // - have no attempts running, in which case it demands 1 slot // - have N attempts running, in which case it demands N slots, and may // potentially demand one more slot if it needs to be speculated TaskInProgress[] tips = (taskType == TaskType.MAP ? job.getTasks(TaskType.MAP) : job.getTasks(TaskType.REDUCE)); boolean speculationEnabled = (taskType == TaskType.MAP ? job.hasSpeculativeMaps() : job.hasSpeculativeReduces()); long time = scheduler.getClock().getTime(); for (TaskInProgress tip : tips) { if (!tip.isComplete()) { if (tip.isRunning()) { // Count active tasks and any speculative task we want to launch demand += tip.getActiveTasks().size(); if (speculationEnabled && tip.hasSpeculativeTask(time, job.getStatus().mapProgress())) demand += 1; } else { // Need to launch 1 task demand += 1; } } } } }
@Override public void start() { // Register servlet with JobTracker's Jetty server if (taskTrackerManager instanceof JobTracker) { JobTracker jobTracker = (JobTracker) taskTrackerManager; HttpServer infoServer = jobTracker.infoServer; infoServer.setAttribute("booking", this); infoServer.addServlet("booking", "/booking", BookSchedulerServlet.class); } super.start(); }
@Override public Task assignTask(TaskTrackerStatus tts, long currentTime, Collection<JobInProgress> visited) throws IOException { if (isRunnable()) { visited.add(job); TaskTrackerManager ttm = scheduler.taskTrackerManager; ClusterStatus clusterStatus = ttm.getClusterStatus(); int numTaskTrackers = clusterStatus.getTaskTrackers(); if (taskType == TaskType.MAP) { LocalityLevel localityLevel = scheduler.getAllowedLocalityLevel(job, currentTime); scheduler.getEventLog().log("ALLOWED_LOC_LEVEL", job.getJobID(), localityLevel); // obtainNewMapTask needs to be passed 1 + the desired locality level return job.obtainNewMapTask( tts, numTaskTrackers, ttm.getNumberOfUniqueHosts(), localityLevel.toCacheLevelCap()); } else { return job.obtainNewReduceTask(tts, numTaskTrackers, ttm.getNumberOfUniqueHosts()); } } else { return null; } }
public void initialize() throws IOException, SAXException, AllocationConfigurationException, ParserConfigurationException { Configuration conf = scheduler.getConf(); this.poolNameProperty = conf.get("mapred.fairscheduler.poolnameproperty", "user.name"); this.allocFile = conf.get("mapred.fairscheduler.allocation.file"); if (allocFile == null) { LOG.warn( "No mapred.fairscheduler.allocation.file given in jobconf - " + "the fair scheduler will not use any queues."); } reloadAllocs(); lastSuccessfulReload = System.currentTimeMillis(); lastReloadAttempt = System.currentTimeMillis(); // Create the default pool so that it shows up in the web UI getPool(Pool.DEFAULT_POOL_NAME); }
@Override protected void jobNoLongerRunning(JobInProgress job) { super.jobNoLongerRunning(job); finishJobStatus.put(job.getJobConf().getJobName(), job.getStatus()); }
private boolean isRunnable() { JobInfo info = scheduler.getJobInfo(job); int runState = job.getStatus().getRunState(); return (info != null && info.runnable && runState == JobStatus.RUNNING); }
@Override public double getWeight() { return scheduler.getJobWeight(job, taskType); }