@Override protected Job handleJobResult(JobResult received, IbisIdentifier from) { // TODO Auto-generated method stub String cluster = from.location().getParent().toString(); System.err.println(from.location().toString() + " " + received.getStats().getRuntime()); /* assumes jobs don't need to be replicated on the same cluster, except on failure */ Job doneJob = schedJobs.remove(received.getJobID()); workers .get(cluster) .get(from.location().getLevel(0)) .addJobStats(received.getStats().getRuntime()); /*create category if it doesn't exist yet * upper duration since we pay in discrete increments of priced time unit*/ doneJobs.put(doneJob.getJobID(), doneJob); if (hosts.get(from.location().toString()).schedJobs.size() == 0) return sayGB(from); Job nextJob = hosts.get(from.location().toString()).schedJobs.remove(0); nextJob.startTime = System.nanoTime(); return nextJob; }
@Override protected Job handleJobRequest(IbisIdentifier from) { String cluster = from.location().getParent().toString(); String node = from.location().getLevel(0); /*DEBUG*/ System.err.println( "served first job request from node " + from.location().toString() + " in cluster " + cluster); workers.get(cluster).put(node, new WorkerStats(node, System.currentTimeMillis(), from)); /*release unnecessary workers*/ if (hosts.get(from.location().toString()).schedJobs.size() == 0) return sayGB(from); Job nextJob = hosts.get(from.location().toString()).schedJobs.remove(0); /*the fact that pending jobs are timed from master side (hence including the latency to the worker) should * be mentioned and should also have some impact on the convergence speed of the histogram in those cases where * the job size is somewhat equal to this latency. * */ nextJob.startTime = System.nanoTime(); // sJobs.put(nextJob.jobID, nextJob); /* might be the case that even here I return sayGB() */ return nextJob; }