synchronized CacheEntry findVictim(boolean forUncache) { Item victim = findMin(); if (forUncache) entries.remove(victim); return victim.entry; int id_out; FreeSlots++; if (A1in.getLength() > Kin) { Job A1inTail = A1in.DequeueFront(); id_out = A1inTail.JobID(); Job headerJob = new Job(id_out, 1); A1out.EnqueueJob(0, headerJob); if (A1out.getLength() > Kout) { Job removedHeader = A1out.DequeueFront(); removedHeader = null; } // put X into the reclaimed page slot return A1inTail; } else { Job AmTail = Am.DequeueFront(); // put X into the reclaimed page slot return AmTail; } }
/** * This method MUST be called each time a job is joined in a join node * * @param job identifier of merged job */ public void removeForkedJob(Job job) { updateJobNumber(job); // Updates job number data structure only jobs--; jobsPerClass[job.getJobClass().getId()]--; lastModifyNumberPerClass[job.getJobClass().getId()] = lastModifyNumber = NetSystem.getTime(); }
@Override public void onApplicationStop() { List<Class> jobs = Play.classloader.getAssignableClasses(Job.class); for (final Class clazz : jobs) { // @OnApplicationStop if (clazz.isAnnotationPresent(OnApplicationStop.class)) { try { Job<?> job = ((Job<?>) clazz.newInstance()); scheduledJobs.add(job); job.run(); if (job.wasError) { if (job.lastException != null) { throw job.lastException; } throw new RuntimeException("@OnApplicationStop Job has failed"); } } catch (InstantiationException e) { throw new UnexpectedException("Job could not be instantiated", e); } catch (IllegalAccessException e) { throw new UnexpectedException("Job could not be instantiated", e); } catch (Throwable ex) { if (ex instanceof PlayException) { throw (PlayException) ex; } throw new UnexpectedException(ex); } } } executor.shutdownNow(); executor.getQueue().clear(); }
private Job getJobFromUrl(String url) { try { HttpGet httpget = new HttpGet(url); HttpClient client = new DefaultHttpClient(); HttpResponse response = client.execute(httpget); InputStream in = response.getEntity().getContent(); BufferedReader reader = new BufferedReader(new InputStreamReader(in)); StringBuilder str = new StringBuilder(); String line, html = null; while ((line = reader.readLine()) != null) { str.append(line); } in.close(); String xml = str.toString(); String color = fromXmlToColor(xml); Status status = fromColorToStatus(color); Job job = new Job(status); job.setColor(color); job.setUrl(url); return job; } catch (Exception ex) { System.err.println("Could not get Url: " + ex); return null; } }
@Override protected void handleLostConnections() { String cluster; String node; for (SendPortIdentifier lost : masterRP.lostConnections()) { System.out.println("lost connection with " + lost.ibisIdentifier().location().toString()); cluster = lost.ibisIdentifier().location().getParent().toString(); node = lost.ibisIdentifier().location().getLevel(0); if (!workers.get(cluster).get(node).isFinished()) { for (Job j : schedJobs.values()) if (j.getNode().compareTo(node) == 0) { schedJobs.remove(j.getJobID()); /*begin hpdc tests*/ if (j.getNode().contains("slow")) { j.args[0] = new Long(3 * Long.parseLong(j.args[0]) / 2).toString(); } /*end hpdc tests*/ bot.tasks.add(j); workers.get(cluster).get(j.getNode()).workerFinished(System.currentTimeMillis()); System.err.println( "Node " + node + " in cluster " + cluster + " failed during execution of job " + j.jobID); break; } } } }
@Test(timeout = 60000) public void testMapReduceWithCustomKeyValueSource() throws Exception { TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(3); final HazelcastInstance h1 = nodeFactory.newHazelcastInstance(); final HazelcastInstance h2 = nodeFactory.newHazelcastInstance(); final HazelcastInstance h3 = nodeFactory.newHazelcastInstance(); assertClusterSizeEventually(3, h1); assertClusterSizeEventually(3, h2); assertClusterSizeEventually(3, h3); JobTracker jobTracker = h1.getJobTracker("default"); Job<String, Integer> job = jobTracker.newJob(new CustomKeyValueSource()); ICompletableFuture<Map<String, Integer>> completableFuture = job.chunkSize(10) .mapper(new CustomMapper()) .combiner(new CustomCombinerFactory()) .reducer(new CustomReducerFactory()) .submit(); Map<String, Integer> result = completableFuture.get(); assertEquals(1000, result.size()); List<Map.Entry<String, Integer>> entrySet = new ArrayList(result.entrySet()); Collections.sort(entrySet, ENTRYSET_COMPARATOR); int count = 0; for (Map.Entry<String, Integer> entry : entrySet) { assertEquals(String.valueOf(count), entry.getKey()); assertEquals(count++ * 6, (int) entry.getValue()); } }
@Override protected Job handleJobResult(JobResult received, IbisIdentifier from) { // TODO Auto-generated method stub String cluster = from.location().getParent().toString(); System.err.println(from.location().toString() + " " + received.getStats().getRuntime()); /* assumes jobs don't need to be replicated on the same cluster, except on failure */ Job doneJob = schedJobs.remove(received.getJobID()); workers .get(cluster) .get(from.location().getLevel(0)) .addJobStats(received.getStats().getRuntime()); /*create category if it doesn't exist yet * upper duration since we pay in discrete increments of priced time unit*/ doneJobs.put(doneJob.getJobID(), doneJob); if (hosts.get(from.location().toString()).schedJobs.size() == 0) return sayGB(from); Job nextJob = hosts.get(from.location().toString()).schedJobs.remove(0); nextJob.startTime = System.nanoTime(); return nextJob; }
@Override protected Job handleJobRequest(IbisIdentifier from) { String cluster = from.location().getParent().toString(); String node = from.location().getLevel(0); /*DEBUG*/ System.err.println( "served first job request from node " + from.location().toString() + " in cluster " + cluster); workers.get(cluster).put(node, new WorkerStats(node, System.currentTimeMillis(), from)); /*release unnecessary workers*/ if (hosts.get(from.location().toString()).schedJobs.size() == 0) return sayGB(from); Job nextJob = hosts.get(from.location().toString()).schedJobs.remove(0); /*the fact that pending jobs are timed from master side (hence including the latency to the worker) should * be mentioned and should also have some impact on the convergence speed of the histogram in those cases where * the job size is somewhat equal to this latency. * */ nextJob.startTime = System.nanoTime(); // sJobs.put(nextJob.jobID, nextJob); /* might be the case that even here I return sayGB() */ return nextJob; }
@Override public void onUpdate() { super.onUpdate(); if (!SimukraftReloaded.isDayTime()) { theStage = Stage.IDLE; } super.onUpdateGoingToWork(theFolk); if (System.currentTimeMillis() - timeSinceLastRun < runDelay) { return; } timeSinceLastRun = System.currentTimeMillis(); // ////////////////IDLE if (theStage == Stage.IDLE && SimukraftReloaded.isDayTime()) { theStage = Stage.SCANFORTREE; } else if (theStage == Stage.ARRIVEDATMILL) { theStage = Stage.SCANFORTREE; } else if (theStage == Stage.SCANFORTREE) { stageScanForTree(); } else if (theStage == Stage.GOTOTREE) { pickUpSaplings(); stageGotoTree(); } else if (theStage == Stage.CHOPPINGTREE) { stageChoppingTree(); pickUpSaplings(); } else if (theStage == Stage.RETURNWOOD) { stageReturnWood(); pickUpSaplings(); } }
public void submitJob(boolean testing) { _configuration.setWorkspace(_parameters.get(_jobfolder)); _jobpath = _configuration.getWorkspace() + _parameters.get(_jobname); _configuration.setWorkspace(_jobpath); String[] submission_parameters = parametersCommand(); boolean status = new File(_jobpath).mkdir(); // execute shellfile if (_job.getMainFrame().isVisible()) { // don't show confirmation dialogue when testing _job.switchToWaitingPanel(); // _job.getMainFrame().setEnabled(false); } Submission sub = new Submission(submission_parameters, _job, Submission.Jobtype.SGA, testing); _job.setSubmissionThread(sub); Thread t = new Thread(sub); t.start(); if (testing) { try { t.join(); } catch (InterruptedException e) { // TODO Auto-generated catch block Util.printErrors(e); } } }
@Test(timeout = 30000, expected = CancellationException.class) public void testInProcessCancellation() throws Exception { TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(3); final HazelcastInstance h1 = nodeFactory.newHazelcastInstance(); final HazelcastInstance h2 = nodeFactory.newHazelcastInstance(); final HazelcastInstance h3 = nodeFactory.newHazelcastInstance(); assertClusterSizeEventually(3, h1); IMap<Integer, Integer> m1 = h1.getMap(MAP_NAME); for (int i = 0; i < 100; i++) { m1.put(i, i); } JobTracker tracker = h1.getJobTracker("default"); Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1)); ICompletableFuture<Map<String, List<Integer>>> future = job.mapper(new TimeConsumingMapper()).submit(); future.cancel(true); try { Map<String, List<Integer>> result = future.get(); fail(); } catch (Exception e) { e.printStackTrace(); throw e; } }
protected boolean isGraphBuilt(final boolean allowBackgroundContinuation) throws InterruptedException { if (!isGraphBuilt()) { s_logger.info("Building dependency graph"); do { final Job job = createConstructionJob(); _activeJobCount.incrementAndGet(); synchronized (_activeJobs) { if (!_cancelled) { _activeJobs.add(job); } else { throw new CancellationException(); } } job.run(); synchronized (_activeJobs) { if (!_runQueue.isEmpty()) { // more jobs in the queue so keep going continue; } } if (allowBackgroundContinuation) { // Nothing in the queue for us so take a nap. There are background threads running and // maybe items on the deferred queue. s_logger.info("Waiting for background threads"); Thread.sleep(100); } else { return false; } } while (!isGraphBuilt()); } return true; }
@Override public Job getJob(String Id) { synchronized (this.lock) { for (Job j : this.nonSubmitted) if (j.getId().equals(Id)) return j; } return null; }
@RequestMapping( value = "/jobs/{jobId}/finalList", method = {RequestMethod.GET}) public String visitFinalListPage( @PathVariable String jobId, HttpServletRequest request, ModelMap model) { User user = (User) request.getSession().getAttribute("user"); if (user == null || !user.getRole().equals("manager")) { model.addAttribute("errorMsg", "User has no permission"); return "login"; } try { Job j = JobsDao.instance.getById(jobId); if (!j.getStatus().equals(RecruitmentStatus.SENT_INVITATIONS)) { return "redirect:/jobs"; } List<Application> validApplications = new ArrayList<Application>(); List<Application> applications = ApplicationsDao.instance.getByJob(ORSKEY, user.getShortKey(), jobId); for (Application a : applications) { if (a.getStatus().equals(ApplicationStatus.ACCEPTED_INVITATION)) { validApplications.add(a); } } model.addAttribute("applications", validApplications); model.addAttribute("job", j); return "finalList"; } catch (Exception e) { e.printStackTrace(); model.addAttribute("errorMsg", e.getMessage()); return "error"; } }
/** * Retrieves the value associated with the provided iteration of the given optimizing job. * * @param iteration The job iteration for which to retrieve the value. * @return The value associated with the provided iteration of the given optimizing job. * @throws SLAMDException If a problem occurs while trying to determine the value for the given * optimizing job iteration. */ @Override() public double getIterationOptimizationValue(Job iteration) throws SLAMDException { StatTracker[] trackers = iteration.getStatTrackers(optimizeStat); if ((trackers == null) || (trackers.length == 0)) { throw new SLAMDException( "The provided optimizing job iteration did " + "not include any values for the statistic to " + "optimize, \"" + optimizeStat + "\"."); } StatTracker tracker = trackers[0].newInstance(); tracker.aggregate(trackers); double summaryValue = tracker.getSummaryValue(); iteration.slamdServer.logMessage( Constants.LOG_LEVEL_JOB_DEBUG, "SingleStatisticWithReplicationLatency" + "OptimizationAlgorithm." + "getIterationOptimizationValue(" + iteration.getJobID() + ") returning " + summaryValue); return summaryValue; }
@Test(timeout = 30000) public void testPartitionPostpone() throws Exception { TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(3); final HazelcastInstance h1 = nodeFactory.newHazelcastInstance(); final HazelcastInstance h2 = nodeFactory.newHazelcastInstance(); final HazelcastInstance h3 = nodeFactory.newHazelcastInstance(); assertClusterSizeEventually(3, h1); IMap<Integer, Integer> m1 = h1.getMap(MAP_NAME); for (int i = 0; i < 100; i++) { m1.put(i, i); } JobTracker tracker = h1.getJobTracker("default"); KeyValueSource<Integer, Integer> kvs = KeyValueSource.fromMap(m1); KeyValueSource<Integer, Integer> wrapper = new MapKeyValueSourceAdapter<Integer, Integer>(kvs); Job<Integer, Integer> job = tracker.newJob(wrapper); ICompletableFuture<Map<String, List<Integer>>> future = job.mapper(new TestMapper()).submit(); Map<String, List<Integer>> result = future.get(); assertEquals(100, result.size()); for (List<Integer> value : result.values()) { assertEquals(1, value.size()); } }
@RequestMapping( value = "/jobs/{jobId}/shortlist", method = {RequestMethod.GET}) public String visitShortlistPage( @PathVariable String jobId, HttpServletRequest request, ModelMap model) { User user = (User) request.getSession().getAttribute("user"); if (user == null || !user.getRole().equals("manager")) { model.addAttribute("errorMsg", "User has no permission"); return "login"; } try { Job j = JobsDao.instance.getById(jobId); if (j.getStatus().equals(RecruitmentStatus.CREATED)) { return "redirect:/jobs"; } List<DetailedApplication> list = new ArrayList<DetailedApplication>(); List<Application> applications = ApplicationsDao.instance.getByJob(ORSKEY, user.getShortKey(), jobId); for (Application a : applications) { DetailedApplication da = new DetailedApplication(a); da.setReviews( (ArrayList<Review>) ReviewsDao.instance.getByApplication(ORSKEY, user.getShortKey(), a.get_appId())); list.add(da); } model.addAttribute("applications", list); model.addAttribute("job", j); return "shortlist"; } catch (Exception e) { e.printStackTrace(); model.addAttribute("errorMsg", e.getMessage()); return "error"; } }
@Test(timeout = 30000) public void testNullFromObjectCombiner() throws Exception { TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(3); HazelcastInstance h1 = nodeFactory.newHazelcastInstance(); HazelcastInstance h2 = nodeFactory.newHazelcastInstance(); HazelcastInstance h3 = nodeFactory.newHazelcastInstance(); IMap<Integer, Integer> m1 = h1.getMap(MAP_NAME); for (int i = 0; i < 100; i++) { m1.put(i, i); } JobTracker jobTracker = h1.getJobTracker("default"); Job<Integer, Integer> job = jobTracker.newJob(KeyValueSource.fromMap(m1)); JobCompletableFuture<Map<String, BigInteger>> future = job.chunkSize(1) .mapper(new GroupingTestMapper()) .combiner(new ObjectCombinerFactory()) .reducer(new ObjectReducerFactory()) .submit(); int[] expectedResults = new int[4]; for (int i = 0; i < 100; i++) { int index = i % 4; expectedResults[index] += i; } Map<String, BigInteger> map = future.get(); for (int i = 0; i < 4; i++) { assertEquals(BigInteger.valueOf(expectedResults[i]), map.get(String.valueOf(i))); } }
@Test(timeout = 30000) public void testMapperReducer() throws Exception { TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(3); HazelcastInstance h1 = nodeFactory.newHazelcastInstance(); HazelcastInstance h2 = nodeFactory.newHazelcastInstance(); HazelcastInstance h3 = nodeFactory.newHazelcastInstance(); IMap<Integer, Integer> m1 = h1.getMap(MAP_NAME); for (int i = 0; i < 100; i++) { m1.put(i, i); } JobTracker tracker = h1.getJobTracker("default"); Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1)); ICompletableFuture<Map<String, Integer>> future = job.mapper(new GroupingTestMapper()).reducer(new TestReducerFactory()).submit(); Map<String, Integer> result = future.get(); // Precalculate results int[] expectedResults = new int[4]; for (int i = 0; i < 100; i++) { int index = i % 4; expectedResults[index] += i; } for (int i = 0; i < 4; i++) { assertEquals(expectedResults[i], (int) result.get(String.valueOf(i))); } }
@Test(timeout = 30000) public void testMapperReducerCollator() throws Exception { TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(3); HazelcastInstance h1 = nodeFactory.newHazelcastInstance(); HazelcastInstance h2 = nodeFactory.newHazelcastInstance(); HazelcastInstance h3 = nodeFactory.newHazelcastInstance(); IMap<Integer, Integer> m1 = h1.getMap(MAP_NAME); for (int i = 0; i < 100; i++) { m1.put(i, i); } JobTracker tracker = h1.getJobTracker("default"); Job<Integer, Integer> job = tracker.newJob(KeyValueSource.fromMap(m1)); ICompletableFuture<Integer> future = job.mapper(new GroupingTestMapper()) .reducer(new TestReducerFactory()) .submit(new TestCollator()); int result = future.get(); // Precalculate result int expectedResult = 0; for (int i = 0; i < 100; i++) { expectedResult += i; } for (int i = 0; i < 4; i++) { assertEquals(expectedResult, result); } }
public Job Reclaim(int id) { Job newjob = new Job(id, 1); int id_out; if (FreeSlots > 0) { // put X into a free page slot return newjob; } else { if (A1in.getLength() > Kin) { Job A1inTail = A1in.DequeueFront(); id_out = A1inTail.JobID(); // delete A1inTail; ??? paged out of A1in buffer (but may still need to keep the job) Job headerJob = new Job(id_out, 1); A1out.EnqueueJob(0, headerJob); if (A1out.getLength() > Kout) { Job removedHeader = A1out.DequeueFront(); removedHeader = null; } // put X into the reclaimed page slot return newjob; } else { Job AmTail = Am.DequeueFront(); AmTail = null; // put X into the reclaimed page slot return newjob; } } }
/** * This method MUST be called each time a job is forked into a fork node * * @param job identifier of new created job */ public void addForkedJob(Job job) { updateJobNumber(job); // Updates job number data structure only lastModifyNumberPerClass[job.getJobClass().getId()] = lastModifyNumber = NetSystem.getTime(); jobs++; jobsPerClass[job.getJobClass().getId()]++; }
@Test(timeout = 30000) public void testDataSerializableIntermediateObject() throws Exception { TestHazelcastInstanceFactory nodeFactory = createHazelcastInstanceFactory(3); HazelcastInstance h1 = nodeFactory.newHazelcastInstance(); HazelcastInstance h2 = nodeFactory.newHazelcastInstance(); HazelcastInstance h3 = nodeFactory.newHazelcastInstance(); IMap<Integer, Integer> m1 = h1.getMap(MAP_NAME); for (int i = 0; i < 100; i++) { m1.put(i, i); } JobTracker jobTracker = h1.getJobTracker("default"); Job<Integer, Integer> job = jobTracker.newJob(KeyValueSource.fromMap(m1)); ICompletableFuture<Integer> future = job.mapper(new TestMapper()) .combiner(new DataSerializableIntermediateCombinerFactory()) .reducer(new DataSerializableIntermediateReducerFactory()) .submit(new DataSerializableIntermediateCollator()); // Precalculate result int expectedResult = 0; for (int i = 0; i < 100; i++) { expectedResult += i; } expectedResult = (int) ((double) expectedResult / 100); assertEquals(expectedResult, (int) future.get()); }
/** * This method MUST be called each time a new job is added to the network * * @param job identifier of created job */ public void addJob(Job job) { job.resetSystemEnteringTime(); updateJobNumber(job); // Updates job number data structures lastModifyNumberPerClass[job.getJobClass().getId()] = lastModifyNumber = NetSystem.getTime(); jobs++; jobsPerClass[job.getJobClass().getId()]++; }
@Override public boolean isFinished(String res, String rep) { synchronized (this.lock) { for (Job j : this.finished) if (j.getResource().equals(res) && j.getRepresentationTarget().equals(rep)) return true; } return false; }
// =================================================== // ObjHtmlPanel.Listener public void linkSelected(java.net.URL href, String target) { String url = href.toExternalForm(); int slash = url.lastIndexOf('/'); if (slash > 0) url = url.substring(slash + 1); Job t = actionMap.get(url); fapp.guiRun().run(this, new Job(t.getPermissions(), t.getCBRunnable())); }
@Test public void testExtractDataUris() throws Exception { expect(bigquery.create(EXTRACT_JOB_INFO)).andReturn(EXTRACT_JOB_INFO); replay(bigquery); Job job = table.extract("CSV", ImmutableList.of("URI")); assertSame(bigquery, job.bigquery()); assertEquals(EXTRACT_JOB_INFO, job.info()); }
@Test public void testCopyFromId() throws Exception { expect(bigquery.create(COPY_JOB_INFO)).andReturn(COPY_JOB_INFO); replay(bigquery); Job job = table.copy(TABLE_ID2); assertSame(bigquery, job.bigquery()); assertEquals(COPY_JOB_INFO, job.info()); }
@Test public void testLoadDataUris() throws Exception { expect(bigquery.create(LOAD_JOB_INFO)).andReturn(LOAD_JOB_INFO); replay(bigquery); Job job = table.load(FormatOptions.json(), ImmutableList.of("URI")); assertSame(bigquery, job.bigquery()); assertEquals(LOAD_JOB_INFO, job.info()); }
/** * Submit a list of jobs to Cook scheduler. It will <br> * -- firstly associate each job with the provided {@link JobListener}<br> * -- secondly submit these jobs to Cook scheduler and track them until they complete. * * @param jobs The list of jobs expected to submit. * @param listener specifies an instance of {@link JobListener} listening all job status updates. * @throws JobClientException */ public void submit(List<Job> jobs, JobListener listener) throws JobClientException { // It is ok to change the listeners map even if the actual submission fails because it won't // update the internal status map {@code _activeUUIDTOJob}. for (Job job : jobs) { _activeUUIDToListener.put(job.getUUID(), listener); } submit(jobs); }