@Override public final void execute(final JobExecutionContext context) throws JobExecutionException { log.debug("Elastic job: job execute begin, job execution context:{}.", context); shardingService.shardingIfNecessary(); JobExecutionMultipleShardingContext shardingContext = executionContextService.getJobExecutionShardingContext(); if (executionService.misfireIfNecessary(shardingContext.getShardingItems())) { log.info( "Previous job is still running, new job will start after previous job completed. Misfired job had recorded."); return; } executionService.cleanPreviousExecutionInfo(); executeJobInternal(shardingContext); log.debug("Elastic job: execute normal completed, sharding context:{}.", shardingContext); while (!executionService.getMisfiredJobItems(shardingContext.getShardingItems()).isEmpty() && !stoped && !shardingService.isNeedSharding()) { log.debug("Elastic job: execute misfired job, sharding context:{}.", shardingContext); executionService.clearMisfire(shardingContext.getShardingItems()); executeJobInternal(shardingContext); log.debug("Elastic job: misfired job completed, sharding context:{}.", shardingContext); } if (configService.isFailover() && !stoped) { failoverService.failoverIfNecessary(); } log.debug("Elastic job: execute all completed, job execution context:{}.", context); }
/** * 设置任务被错过执行的标记. * * @param items 需要设置错过执行的任务分片项 */ public void setMisfire(final List<Integer> items) { if (!configService.isMonitorExecution()) { return; } for (int each : items) { jobNodeStorage.createJobNodeIfNeeded(ExecutionNode.getMisfireNode(each)); } }
private void registerElasticEnv() { listenerManager.startAllListeners(); leaderElectionService.leaderElection(); configService.persistJobConfiguration(); serverService.persistServerOnline(); serverService.clearJobStopedStatus(); statisticsService.startProcessCountJob(); shardingService.setReshardingFlag(); monitorService.listen(); }
/** * 判断分片项中是否还有执行中的作业. * * @param items 需要判断的分片项列表 * @return 分片项中是否还有执行中的作业 */ public boolean hasRunningItems(final List<Integer> items) { if (!configService.isMonitorExecution()) { return false; } for (int each : items) { if (jobNodeStorage.isJobNodeExisted(ExecutionNode.getRunningNode(each))) { return true; } } return false; }
private void executeJobInternal(final JobExecutionMultipleShardingContext shardingContext) { if (shardingContext.getShardingItems().isEmpty()) { log.debug("Elastic job: sharding item is empty, job execution context:{}.", shardingContext); return; } executionService.registerJobBegin(shardingContext); executeJob(shardingContext); executionService.registerJobCompleted(shardingContext); if (configService.isFailover()) { failoverService.updateFailoverComplete(shardingContext.getShardingItems()); } }
/** 初始化作业监听服务. */ public void listen() { int port = configService.getMonitorPort(); if (port < 0) { return; } try { log.info("Elastic job: monitor service is running, the port is '{}'", port); openSocketForMonitor(port); } catch (final IOException ex) { log.warn(ex.getMessage()); } }
/** * 获取当前作业服务器运行时分片上下文. * * @return 当前作业服务器运行时分片上下文 */ public JobExecutionMultipleShardingContext getJobExecutionShardingContext() { JobExecutionMultipleShardingContext result = new JobExecutionMultipleShardingContext(); result.setJobName(jobConfiguration.getJobName()); result.setShardingTotalCount(configService.getShardingTotalCount()); List<Integer> shardingItems = shardingService.getLocalHostShardingItems(); if (configService.isFailover()) { List<Integer> failoverItems = failoverService.getLocalHostFailoverItems(); if (!failoverItems.isEmpty()) { result.setShardingItems(failoverItems); } else { shardingItems.removeAll(failoverService.getLocalHostTakeOffItems()); result.setShardingItems(shardingItems); } } else { result.setShardingItems(shardingItems); } boolean isMonitorExecution = configService.isMonitorExecution(); if (isMonitorExecution) { removeRunningItems(shardingItems); } result.setJobParameter(configService.getJobParameter()); result.setMonitorExecution(isMonitorExecution); result.setFetchDataCount(configService.getFetchDataCount()); if (result.getShardingItems().isEmpty()) { return result; } Map<Integer, String> shardingItemParameters = configService.getShardingItemParameters(); for (int each : result.getShardingItems()) { if (shardingItemParameters.containsKey(each)) { result.getShardingItemParameters().put(each, shardingItemParameters.get(each)); } } result.setOffsets(offsetService.getOffsets(result.getShardingItems())); return result; }
/** * 注册作业完成信息. * * @param jobExecutionShardingContext 作业运行时分片上下文 */ public void registerJobCompleted( final JobExecutionMultipleShardingContext jobExecutionShardingContext) { if (!configService.isMonitorExecution()) { return; } serverService.updateServerStatus(ServerStatus.READY); for (int each : jobExecutionShardingContext.getShardingItems()) { jobNodeStorage.createJobNodeIfNeeded(ExecutionNode.getCompletedNode(each)); jobNodeStorage.removeJobNodeIfExisted(ExecutionNode.getRunningNode(each)); jobNodeStorage.replaceJobNode( ExecutionNode.getLastCompleteTimeNode(each), System.currentTimeMillis()); } }
private Properties getBaseQuartzProperties(final String jobName) { Properties result = new Properties(); result.put("org.quartz.threadPool.class", "org.quartz.simpl.SimpleThreadPool"); result.put("org.quartz.threadPool.threadCount", "1"); result.put( "org.quartz.scheduler.instanceName", Joiner.on("_").join(jobName, SCHEDULER_INSTANCE_NAME_SUFFIX)); if (!configService.isMisfire()) { result.put("org.quartz.jobStore.misfireThreshold", "1"); } prepareEnvironments(result); return result; }
private CronTrigger createTrigger(final String cronExpression) { CronScheduleBuilder cronScheduleBuilder = CronScheduleBuilder.cronSchedule(cronExpression); if (configService.isMisfire()) { cronScheduleBuilder = cronScheduleBuilder.withMisfireHandlingInstructionFireAndProceed(); } else { cronScheduleBuilder = cronScheduleBuilder.withMisfireHandlingInstructionDoNothing(); } return TriggerBuilder.newTrigger() .withIdentity( Joiner.on("_").join(jobConfiguration.getJobName(), CRON_TRIGGER_INDENTITY_SUFFIX)) .withSchedule(cronScheduleBuilder) .build(); }
/** 初始化作业. */ public void init() { log.debug("Elastic job: job controller init, job name is: {}.", jobConfiguration.getJobName()); coordinatorRegistryCenter.addCacheData("/" + jobConfiguration.getJobName()); registerElasticEnv(); jobDetail = createJobDetail(); try { scheduler = initializeScheduler(jobDetail.getKey().toString()); scheduleJob(createTrigger(configService.getCron())); } catch (final SchedulerException ex) { throw new JobException(ex); } JobRegistry.getInstance().addJob(jobConfiguration.getJobName(), this); }
private void fixExecutionInfo(final List<Integer> items) { int newShardingTotalCount = configService.getShardingTotalCount(); int currentShardingTotalCount = items.size(); if (newShardingTotalCount > currentShardingTotalCount) { for (int i = currentShardingTotalCount; i < newShardingTotalCount; i++) { jobNodeStorage.createJobNodeIfNeeded(ExecutionNode.ROOT + "/" + i); } } else if (newShardingTotalCount < currentShardingTotalCount) { for (int i = newShardingTotalCount; i < currentShardingTotalCount; i++) { jobNodeStorage.removeJobNodeIfExisted(ExecutionNode.ROOT + "/" + i); } } jobNodeStorage.removeJobNodeIfExisted(ExecutionNode.NECESSARY); }
@Before public void setUp() throws NoSuchFieldException { MockitoAnnotations.initMocks(this); when(configService.getJobName()).thenReturn(ElasticJobAssert.JOB_NAME); unstreamingIndividualThroughputDataFlowElasticJob = new FooUnstreamingIndividualThroughputDataFlowElasticJob(jobCaller); unstreamingIndividualThroughputDataFlowElasticJob.setConfigService(configService); unstreamingIndividualThroughputDataFlowElasticJob.setShardingService(shardingService); unstreamingIndividualThroughputDataFlowElasticJob.setExecutionContextService( executionContextService); unstreamingIndividualThroughputDataFlowElasticJob.setExecutionService(executionService); unstreamingIndividualThroughputDataFlowElasticJob.setFailoverService(failoverService); unstreamingIndividualThroughputDataFlowElasticJob.setOffsetService(offsetService); shardingContext = ElasticJobAssert.getShardingContext(); ElasticJobAssert.prepareForIsNotMisfireAndIsNotFailover( configService, executionContextService, executionService, shardingContext); }
@Test public void assertExecuteWhenFetchDataIsNotEmptyAndDataIsOne() throws JobExecutionException { when(jobCaller.fetchData()).thenReturn(Collections.<Object>singletonList(1)); when(jobCaller.processData(1)).thenReturn(true); when(configService.getConcurrentDataProcessThreadCount()).thenReturn(2); unstreamingIndividualThroughputDataFlowElasticJob.execute(null); verify(jobCaller).processData(1); verify(configService).getConcurrentDataProcessThreadCount(); ElasticJobAssert.verifyForIsNotMisfireAndIsNotFailover( configService, shardingService, executionContextService, executionService, failoverService, shardingContext); ElasticJobAssert.assertProcessCountStatistics(1, 0); }
/** * 注册作业启动信息. * * @param jobExecutionShardingContext 作业运行时分片上下文 */ public void registerJobBegin( final JobExecutionMultipleShardingContext jobExecutionShardingContext) { if (!jobExecutionShardingContext.getShardingItems().isEmpty() && configService.isMonitorExecution()) { serverService.updateServerStatus(ServerStatus.RUNNING); for (int each : jobExecutionShardingContext.getShardingItems()) { jobNodeStorage.fillEphemeralJobNode(ExecutionNode.getRunningNode(each), ""); jobNodeStorage.replaceJobNode( ExecutionNode.getLastBeginTimeNode(each), System.currentTimeMillis()); Date nextFireTime = JobRegistry.getInstance().getJob(jobConfiguration.getJobName()).getNextFireTime(); if (null != nextFireTime) { jobNodeStorage.replaceJobNode( ExecutionNode.getNextFireTimeNode(each), nextFireTime.getTime()); } } } }
@Test public void assertExecuteWhenFetchDataIsNotEmptyAndConcurrentDataProcessThreadCountIsOneAndProcessFailureWithException() throws JobExecutionException { when(jobCaller.fetchData()).thenReturn(Arrays.<Object>asList(1, 2)); doThrow(RuntimeException.class).when(jobCaller).processData(any()); when(configService.getConcurrentDataProcessThreadCount()).thenReturn(1); unstreamingIndividualThroughputDataFlowElasticJob.execute(null); verify(jobCaller).processData(1); verify(jobCaller).processData(2); verify(configService).getConcurrentDataProcessThreadCount(); ElasticJobAssert.verifyForIsNotMisfireAndIsNotFailover( configService, shardingService, executionContextService, executionService, failoverService, shardingContext); ElasticJobAssert.assertProcessCountStatistics(0, 2); }
@Test public void testGetCron() { when(configService.getCron()).thenReturn("0 * * * * *"); assertThat(schedulerFacade.getCron(), is("0 * * * * *")); }
@Test public void testIsMisfire() { when(configService.isMisfire()).thenReturn(true); assertTrue(schedulerFacade.isMisfire()); }