/** * Unschedule the given job. * * @param job the job to unschedule * @throws JobSchedulerException thrown if an exception occurs during job unscheduling */ public void unschedule(final org.easybatch.core.job.Job job) throws JobSchedulerException { LOGGER.log(Level.INFO, "Unscheduling job {0} ", job); try { scheduler.unscheduleJob(TriggerKey.triggerKey(TRIGGER_NAME_PREFIX + job.getExecutionId())); } catch (SchedulerException e) { throw new JobSchedulerException("Unable to unschedule job " + job, e); } }
/** * 移除一个任务 * * @param jobName * @param jobGroupName * @param triggerName * @param triggerGroupName * @throws SchedulerException */ public void removeJob( String jobName, String jobGroupName, String triggerName, String triggerGroupName) throws SchedulerException { Scheduler sched = sf.getScheduler(); sched.pauseTrigger(triggerName, triggerGroupName); // 停止触发器 sched.unscheduleJob(triggerName, triggerGroupName); // 移除触发器 sched.deleteJob(jobName, jobGroupName); // 删除任务 }
public boolean unscheduleJob(TriggerKey triggerKey) { boolean flag = false; try { if (scheduler.checkExists(triggerKey)) { flag = scheduler.unscheduleJob(triggerKey); } } catch (SchedulerException e) { logger.error(e.getMessage()); } return flag; }
protected void unschedule(Scheduler scheduler, JobKey jobKey) throws Exception { JobDetail jobDetail = scheduler.getJobDetail(jobKey); TriggerKey triggerKey = new TriggerKey(jobKey.getName(), jobKey.getGroup()); if (jobDetail == null) { return; } unregisterMessageListener(scheduler, jobKey); JobDataMap jobDataMap = jobDetail.getJobDataMap(); JobState jobState = getJobState(jobDataMap); Trigger trigger = scheduler.getTrigger(triggerKey); if (trigger == null) { return; } jobState.setTriggerDate(SchedulerEngine.END_TIME, new Date()); jobState.setTriggerDate(SchedulerEngine.FINAL_FIRE_TIME, trigger.getPreviousFireTime()); jobState.setTriggerDate(SchedulerEngine.NEXT_FIRE_TIME, null); jobState.setTriggerDate(SchedulerEngine.PREVIOUS_FIRE_TIME, trigger.getPreviousFireTime()); jobState.setTriggerDate(SchedulerEngine.START_TIME, trigger.getStartTime()); jobState.setTriggerState(TriggerState.UNSCHEDULED); jobState.clearExceptions(); jobDataMap.put(SchedulerEngine.JOB_STATE, JobStateSerializeUtil.serialize(jobState)); scheduler.unscheduleJob(triggerKey); scheduler.addJob(jobDetail, true); }
@RequestMapping("/cancelScheduledJob") public String cancelScheduledJob( HttpServletRequest request, HttpServletResponse response, @RequestParam("theJobName") String theJobName, @RequestParam("theJobGroupName") String theJobGroupName, @RequestParam("theTriggerName") String triggerName, @RequestParam("theTriggerGroupName") String triggerGroupName, @RequestParam("redirection") String redirection, ModelMap model) throws SchedulerException { scheduler.getJobDetail(theJobName, theJobGroupName); logger.debug("About to pause the job-->" + theJobName + "Job Group Name -->" + theJobGroupName); SimpleTrigger oldTrigger = (SimpleTrigger) scheduler.getTrigger(triggerName, triggerGroupName); if (oldTrigger != null) { Date startTime = new Date(oldTrigger.getStartTime().getTime() + oldTrigger.getRepeatInterval()); if (triggerGroupName.equals(ExtractController.TRIGGER_GROUP_NAME)) { interruptQuartzJob(scheduler, theJobName, theJobGroupName); } scheduler.pauseJob(theJobName, theJobGroupName); SimpleTrigger newTrigger = new SimpleTrigger(triggerName, triggerGroupName); newTrigger.setJobName(theJobName); newTrigger.setJobGroup(theJobGroupName); newTrigger.setJobDataMap(oldTrigger.getJobDataMap()); newTrigger.setVolatility(false); newTrigger.setRepeatCount(oldTrigger.getRepeatCount()); newTrigger.setRepeatInterval(oldTrigger.getRepeatInterval()); newTrigger.setMisfireInstruction( SimpleTrigger.MISFIRE_INSTRUCTION_RESCHEDULE_NEXT_WITH_REMAINING_COUNT); newTrigger.setStartTime(startTime); newTrigger.setRepeatInterval(oldTrigger.getRepeatInterval()); scheduler.unscheduleJob( triggerName, triggerGroupName); // these are the jobs which are from extract data and are not not // required to be rescheduled. ArrayList<String> pageMessages = new ArrayList<String>(); if (triggerGroupName.equals(ExtractController.TRIGGER_GROUP_NAME)) { scheduler.rescheduleJob(triggerName, triggerGroupName, newTrigger); pageMessages.add("The Job " + theJobName + " has been cancelled"); } else if (triggerGroupName.equals(XsltTriggerService.TRIGGER_GROUP_NAME)) { JobDetailBean jobDetailBean = new JobDetailBean(); jobDetailBean.setGroup(XsltTriggerService.TRIGGER_GROUP_NAME); jobDetailBean.setName(newTrigger.getName()); jobDetailBean.setJobClass(org.akaza.openclinica.job.XsltStatefulJob.class); jobDetailBean.setJobDataMap(newTrigger.getJobDataMap()); jobDetailBean.setDurability(true); // need durability? jobDetailBean.setVolatility(false); scheduler.deleteJob(theJobName, theJobGroupName); scheduler.scheduleJob(jobDetailBean, newTrigger); pageMessages.add("The Job " + theJobName + " has been rescheduled"); } request.setAttribute("pageMessages", pageMessages); logger.debug("jobDetails>" + scheduler.getJobDetail(theJobName, theJobGroupName)); } sdvUtil.forwardRequestFromController(request, response, "/pages/" + redirection); return null; }
@Test public void testBasicStorageFunctions() throws Exception { Scheduler sched = createScheduler("testBasicStorageFunctions", 2); // test basic storage functions of scheduler... JobDetail job = newJob().ofType(TestJob.class).withIdentity("j1").storeDurably().build(); assertFalse("Unexpected existence of job named 'j1'.", sched.checkExists(jobKey("j1"))); sched.addJob(job, false); assertTrue( "Expected existence of job named 'j1' but checkExists return false.", sched.checkExists(jobKey("j1"))); job = sched.getJobDetail(jobKey("j1")); assertNotNull("Stored job not found!", job); sched.deleteJob(jobKey("j1")); Trigger trigger = newTrigger() .withIdentity("t1") .forJob(job) .startNow() .withSchedule(simpleSchedule().repeatForever().withIntervalInSeconds(5)) .build(); assertFalse("Unexpected existence of trigger named 't1'.", sched.checkExists(triggerKey("t1"))); sched.scheduleJob(job, trigger); assertTrue( "Expected existence of trigger named 't1' but checkExists return false.", sched.checkExists(triggerKey("t1"))); job = sched.getJobDetail(jobKey("j1")); assertNotNull("Stored job not found!", job); trigger = sched.getTrigger(triggerKey("t1")); assertNotNull("Stored trigger not found!", trigger); job = newJob().ofType(TestJob.class).withIdentity("j2", "g1").build(); trigger = newTrigger() .withIdentity("t2", "g1") .forJob(job) .startNow() .withSchedule(simpleSchedule().repeatForever().withIntervalInSeconds(5)) .build(); sched.scheduleJob(job, trigger); job = newJob().ofType(TestJob.class).withIdentity("j3", "g1").build(); trigger = newTrigger() .withIdentity("t3", "g1") .forJob(job) .startNow() .withSchedule(simpleSchedule().repeatForever().withIntervalInSeconds(5)) .build(); sched.scheduleJob(job, trigger); List<String> jobGroups = sched.getJobGroupNames(); List<String> triggerGroups = sched.getTriggerGroupNames(); assertTrue("Job group list size expected to be = 2 ", jobGroups.size() == 2); assertTrue("Trigger group list size expected to be = 2 ", triggerGroups.size() == 2); Set<JobKey> jobKeys = sched.getJobKeys(GroupMatcher.jobGroupEquals(JobKey.DEFAULT_GROUP)); Set<TriggerKey> triggerKeys = sched.getTriggerKeys(GroupMatcher.triggerGroupEquals(TriggerKey.DEFAULT_GROUP)); assertTrue("Number of jobs expected in default group was 1 ", jobKeys.size() == 1); assertTrue("Number of triggers expected in default group was 1 ", triggerKeys.size() == 1); jobKeys = sched.getJobKeys(GroupMatcher.jobGroupEquals("g1")); triggerKeys = sched.getTriggerKeys(GroupMatcher.triggerGroupEquals("g1")); assertTrue("Number of jobs expected in 'g1' group was 2 ", jobKeys.size() == 2); assertTrue("Number of triggers expected in 'g1' group was 2 ", triggerKeys.size() == 2); TriggerState s = sched.getTriggerState(triggerKey("t2", "g1")); assertTrue("State of trigger t2 expected to be NORMAL ", s.equals(TriggerState.NORMAL)); sched.pauseTrigger(triggerKey("t2", "g1")); s = sched.getTriggerState(triggerKey("t2", "g1")); assertTrue("State of trigger t2 expected to be PAUSED ", s.equals(TriggerState.PAUSED)); sched.resumeTrigger(triggerKey("t2", "g1")); s = sched.getTriggerState(triggerKey("t2", "g1")); assertTrue("State of trigger t2 expected to be NORMAL ", s.equals(TriggerState.NORMAL)); Set<String> pausedGroups = sched.getPausedTriggerGroups(); assertTrue("Size of paused trigger groups list expected to be 0 ", pausedGroups.size() == 0); sched.pauseTriggers(GroupMatcher.triggerGroupEquals("g1")); // test that adding a trigger to a paused group causes the new trigger to be paused also... job = newJob().ofType(TestJob.class).withIdentity("j4", "g1").build(); trigger = newTrigger() .withIdentity("t4", "g1") .forJob(job) .startNow() .withSchedule(simpleSchedule().repeatForever().withIntervalInSeconds(5)) .build(); sched.scheduleJob(job, trigger); // TODO: nexus hack: JobStoreImpl DOES NOT "remember" paused groups sched.pauseJob(jobKey("j4", "g1")); pausedGroups = sched.getPausedTriggerGroups(); assertTrue( "Size of paused trigger groups list expected to be 1: " + pausedGroups, pausedGroups.size() == 1); s = sched.getTriggerState(triggerKey("t2", "g1")); assertTrue("State of trigger t2 expected to be PAUSED ", s.equals(TriggerState.PAUSED)); s = sched.getTriggerState(triggerKey("t4", "g1")); assertTrue("State of trigger t4 expected to be PAUSED ", s.equals(TriggerState.PAUSED)); sched.resumeTriggers(GroupMatcher.triggerGroupEquals("g1")); s = sched.getTriggerState(triggerKey("t2", "g1")); assertTrue("State of trigger t2 expected to be NORMAL ", s.equals(TriggerState.NORMAL)); s = sched.getTriggerState(triggerKey("t4", "g1")); assertTrue("State of trigger t4 expected to be NORMAL ", s.equals(TriggerState.NORMAL)); pausedGroups = sched.getPausedTriggerGroups(); assertTrue("Size of paused trigger groups list expected to be 0 ", pausedGroups.size() == 0); assertFalse( "Scheduler should have returned 'false' from attempt to unschedule non-existing trigger. ", sched.unscheduleJob(triggerKey("foasldfksajdflk"))); assertTrue( "Scheduler should have returned 'true' from attempt to unschedule existing trigger. ", sched.unscheduleJob(triggerKey("t3", "g1"))); jobKeys = sched.getJobKeys(GroupMatcher.jobGroupEquals("g1")); triggerKeys = sched.getTriggerKeys(GroupMatcher.triggerGroupEquals("g1")); assertTrue( "Number of jobs expected in 'g1' group was 1 ", jobKeys.size() == 2); // job should have been deleted also, because it is non-durable assertTrue("Number of triggers expected in 'g1' group was 1 ", triggerKeys.size() == 2); assertTrue( "Scheduler should have returned 'true' from attempt to unschedule existing trigger. ", sched.unscheduleJob(triggerKey("t1"))); jobKeys = sched.getJobKeys(GroupMatcher.jobGroupEquals(JobKey.DEFAULT_GROUP)); triggerKeys = sched.getTriggerKeys(GroupMatcher.triggerGroupEquals(TriggerKey.DEFAULT_GROUP)); assertTrue( "Number of jobs expected in default group was 1 ", jobKeys.size() == 1); // job should have been left in place, because it is non-durable assertTrue("Number of triggers expected in default group was 0 ", triggerKeys.size() == 0); sched.shutdown(true); }
/** * This method reads all the task from an xml file and registers them with the MifosScheduler * * @param document Task configuration document * @throws TaskSystemException when something goes wrong */ @Deprecated private void registerTasksOldConfigurationFile(Document document) throws TaskSystemException { try { logger.warn( "Old format task.xml configuration file is deprecated. Please configure scheduler using spring managed beans."); NodeList rootSchedulerTasks = document.getElementsByTagName(SchedulerConstants.SCHEDULER_TASKS); Element rootNodeName = (Element) rootSchedulerTasks.item(0); NodeList collectionOfScheduledTasks = rootNodeName.getElementsByTagName(SchedulerConstants.SCHEDULER); DataSource dataSource = SessionFactoryUtils.getDataSource(StaticHibernateUtil.getSessionFactory()); SimpleJdbcTemplate jdbcTemplate = new SimpleJdbcTemplate(dataSource); JobRegistry jobRegistry = new MapJobRegistry(); this.jobLocator = jobRegistry; JdbcJobInstanceDao jobInstanceDao = new JdbcJobInstanceDao(); jobInstanceDao.setJdbcTemplate(jdbcTemplate); jobInstanceDao.setJobIncrementer( new MySQLMaxValueIncrementer(dataSource, "BATCH_JOB_SEQ", "id")); jobInstanceDao.afterPropertiesSet(); JdbcJobExecutionDao jobExecutionDao = new JdbcJobExecutionDao(); jobExecutionDao.setJdbcTemplate(jdbcTemplate); jobExecutionDao.setJobExecutionIncrementer( new MySQLMaxValueIncrementer(dataSource, "BATCH_JOB_EXECUTION_SEQ", "id")); jobExecutionDao.afterPropertiesSet(); JdbcStepExecutionDao stepExecutionDao = new JdbcStepExecutionDao(); stepExecutionDao.setJdbcTemplate(jdbcTemplate); stepExecutionDao.setStepExecutionIncrementer( new MySQLMaxValueIncrementer(dataSource, "BATCH_STEP_EXECUTION_SEQ", "id")); stepExecutionDao.afterPropertiesSet(); JdbcExecutionContextDao executionContextDao = new JdbcExecutionContextDao(); executionContextDao.setJdbcTemplate(jdbcTemplate); executionContextDao.afterPropertiesSet(); JobRepository jobRepository = new SimpleJobRepository( jobInstanceDao, jobExecutionDao, stepExecutionDao, executionContextDao); this.jobRepository = jobRepository; SimpleJobLauncher jobLauncher = new SimpleJobLauncher(); jobLauncher.setJobRepository(jobRepository); jobLauncher.setTaskExecutor(new SyncTaskExecutor()); jobLauncher.afterPropertiesSet(); this.jobLauncher = jobLauncher; JobExplorer jobExplorer = new SimpleJobExplorer( jobInstanceDao, jobExecutionDao, stepExecutionDao, executionContextDao); this.jobExplorer = jobExplorer; Map<String, Object> jobData = new HashMap<String, Object>(); jobData.put("jobLocator", jobRegistry); jobData.put("jobLauncher", jobLauncher); jobData.put("jobExplorer", jobExplorer); jobData.put("jobRepository", jobRepository); JobRegistryBeanPostProcessor jobRegistryProcessor = new JobRegistryBeanPostProcessor(); jobRegistryProcessor.setJobRegistry(jobRegistry); ResourcelessTransactionManager transactionManager = new ResourcelessTransactionManager(); Date loanArrearsTaskInitialTime = null; Long loanArrearsTaskDelayTime = null; boolean portfolioAtRiskTaskExists = false; for (int i = 0; i < collectionOfScheduledTasks.getLength(); i++) { Element scheduledTask = (Element) collectionOfScheduledTasks.item(i); Element subNodeName1 = (Element) scheduledTask.getElementsByTagName(SchedulerConstants.TASK_CLASS_NAME).item(0); String taskName = ((Text) subNodeName1.getFirstChild()).getData().trim(); Element subNodeName2 = (Element) scheduledTask.getElementsByTagName(SchedulerConstants.INITIAL_TIME).item(0); String initialTime = ((Text) subNodeName2.getFirstChild()).getData().trim(); Element subNodeName3; String delayTime = null; if ((scheduledTask.getElementsByTagName(SchedulerConstants.DELAY_TIME)) != null) { subNodeName3 = (Element) scheduledTask.getElementsByTagName(SchedulerConstants.DELAY_TIME).item(0); if (subNodeName3.getFirstChild() != null) { delayTime = ((Text) subNodeName3.getFirstChild()).getData().trim(); } } if (Long.parseLong(delayTime) < 86400) { throw new IllegalArgumentException("Please specify the delay time >= 86400(1 day)"); } if (scheduler.getJobDetail(taskName, Scheduler.DEFAULT_GROUP) != null) { scheduler.unscheduleJob(taskName, Scheduler.DEFAULT_GROUP); } if ("LoanArrearsTask".equals(taskName)) { loanArrearsTaskInitialTime = parseInitialTime(initialTime); loanArrearsTaskDelayTime = Long.parseLong(delayTime) * 1000; continue; } if ("PortfolioAtRiskTask".equals(taskName)) { portfolioAtRiskTaskExists = true; continue; } schedule( taskName, parseInitialTime(initialTime), Long.parseLong(delayTime) * 1000, jobRegistry, jobRepository, jobData, transactionManager); } if (loanArrearsTaskInitialTime != null) { if (portfolioAtRiskTaskExists) { scheduleLoanArrearsAndPortfolioAtRisk( loanArrearsTaskInitialTime, loanArrearsTaskDelayTime, jobRegistry, jobRepository, jobData, transactionManager); } else { schedule( "LoanArrearsTask", loanArrearsTaskInitialTime, loanArrearsTaskDelayTime, jobRegistry, jobRepository, jobData, transactionManager); } } } catch (Exception e) { throw new TaskSystemException(e); } }