/** * 创建任务JOb * * @return */ private JobDetail createJobDetail() { JobDataMap jobDataMap = new JobDataMap(); jobDataMap.put("jobConfiguration", jobConfiguration); JobDetail result = JobBuilder.newJob(jobConfiguration.getJobClass()) .setJobData(jobDataMap) .withIdentity(jobConfiguration.getJobName()) .build(); return result; }
private JobDetail createJobDetail() { JobDetail result = JobBuilder.newJob(jobConfiguration.getJobClass()) .withIdentity(jobConfiguration.getJobName()) .build(); result.getJobDataMap().put("configService", configService); result.getJobDataMap().put("shardingService", shardingService); result.getJobDataMap().put("executionContextService", executionContextService); result.getJobDataMap().put("executionService", executionService); result.getJobDataMap().put("failoverService", failoverService); result.getJobDataMap().put("offsetService", offsetService); return result; }
/** 初始化作业. */ public void init() { log.debug("Elastic job: job controller init, job name is: {}.", jobConfiguration.getJobName()); coordinatorRegistryCenter.addCacheData("/" + jobConfiguration.getJobName()); registerElasticEnv(); jobDetail = createJobDetail(); try { scheduler = initializeScheduler(jobDetail.getKey().toString()); scheduleJob(createTrigger(configService.getCron())); } catch (final SchedulerException ex) { throw new JobException(ex); } JobRegistry.getInstance().addJob(jobConfiguration.getJobName(), this); }
/** * fsCleanup: Cleanup intermediate files and reducer files directory for a job * * @param jobConf: job's configuration */ public void fsCleanup(JobConfiguration jobConf) { Path path = Paths.get(jobConf.getJobDir() + "-intermediate"); if (Files.exists(path)) { File dir = new File(jobConf.getJobDir() + "-intermediate"); deleteDir(dir); } path = Paths.get(jobConf.getJobDir() + "-reducerfiles"); if (Files.exists(path)) { File dir = new File(jobConf.getJobDir() + "-reducerfiles"); deleteDir(dir); } }
/** * 创建触发器 * * @param cronExpression * @return */ private CronTrigger createTrigger(final String cronExpression) { CronScheduleBuilder cronScheduleBuilder = CronScheduleBuilder.cronSchedule(cronExpression); return TriggerBuilder.newTrigger() .withIdentity(jobConfiguration.getJobName()) .withSchedule(cronScheduleBuilder) .build(); }
/** 停止作业. */ public void stopJob() { try { JobRegistry.getInstance().getJobInstance(jobConfiguration.getJobName()).stop(); scheduler.pauseAll(); } catch (final SchedulerException ex) { throw new JobException(ex); } }
/** 重新调度作业. */ public void rescheduleJob(final String cronExpression) { try { scheduler.rescheduleJob( TriggerKey.triggerKey( Joiner.on("_").join(jobConfiguration.getJobName(), CRON_TRIGGER_INDENTITY_SUFFIX)), createTrigger(cronExpression)); } catch (final SchedulerException ex) { throw new JobException(ex); } }
/** * performReduce: Add the reduceTask to the list of current running reduceTasks, Allocate a new * ReduceRunner and an executor service and start the reduceTask * * @param reduceTask: reduce task to be added */ public void performReduce(ReduceTask reduceTask) { JobConfiguration jobConf = reduceTask.getJobConf(); Path path = Paths.get(jobConf.getJobDir() + "/" + jobConf.getOutputDirectoryPath()); // create an output directory for the final output if one doesn't exist if (!Files.exists(path)) { File dir = new File(jobConf.getJobDir() + "/" + jobConf.getOutputDirectoryPath()); dir.mkdir(); } reduceLock.lock(); reduceTasks.add(reduceTask); reduceLock.unlock(); ReduceRunner runner = new ReduceRunner(reduceTask); // add the DataNode as a listener for this task runner.addListener(this); ExecutorService executor = Executors.newSingleThreadExecutor(); addExecutorToMap(reduceTask.getJobConf().getJobID(), executor); executor.execute(runner); }
/** 恢复手工停止的作业. */ public void resumeManualStopedJob() { try { if (scheduler.isShutdown()) { return; } JobRegistry.getInstance().getJobInstance(jobConfiguration.getJobName()).resume(); scheduler.resumeAll(); } catch (final SchedulerException ex) { throw new JobException(ex); } serverService.clearJobStopedStatus(); }
private CronTrigger createTrigger(final String cronExpression) { CronScheduleBuilder cronScheduleBuilder = CronScheduleBuilder.cronSchedule(cronExpression); if (configService.isMisfire()) { cronScheduleBuilder = cronScheduleBuilder.withMisfireHandlingInstructionFireAndProceed(); } else { cronScheduleBuilder = cronScheduleBuilder.withMisfireHandlingInstructionDoNothing(); } return TriggerBuilder.newTrigger() .withIdentity( Joiner.on("_").join(jobConfiguration.getJobName(), CRON_TRIGGER_INDENTITY_SUFFIX)) .withSchedule(cronScheduleBuilder) .build(); }
/** * 恢复因服务器崩溃而停止的作业. * * <p>不会恢复手工设置停止运行的作业. */ public void resumeCrashedJob() { serverService.persistServerOnline(); executionService.clearRunningInfo(shardingService.getLocalHostShardingItems()); if (serverService.isJobStopedManually()) { return; } JobRegistry.getInstance().getJobInstance(jobConfiguration.getJobName()).resume(); try { scheduler.resumeAll(); } catch (final SchedulerException ex) { throw new JobException(ex); } }
/** * notifyOfMapperThreadCompletion: We come in here when a mapper task is successfully completed. * At this point the mapper has generated intermediate files. In this function we run the * partition function on the keys and decide which reducers we need to send the intermediate files * to. * * <p>The sending of intermediate files as soon as a map task completes is better than sending all * at the end, especially in situations where the network bandwidth can be a bottleneck. * * @param mapRunner */ @Override public synchronized void notifyOfMapperThreadCompletion(MapRunner mapRunner) { MapTask finished = mapRunner.getMapTask(); JobConfiguration jobConf = finished.getJobConf(); final int chunkID = finished.getChunkID(); File folder = new File(jobConf.getJobDir() + "-intermediate/"); File[] intermediateFiles = folder.listFiles( new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.endsWith(Integer.toString(chunkID)); } }); /** Iterate through all the intermediate files created for a particular chunkID */ for (File file : intermediateFiles) { try { /** The intermediate filename is key-chunkID. Extract the key from the filename */ String[] arr = file.getName().split("-"); String key = ""; for (int i = 0; i < arr.length - 1; i++) { key += arr[i]; } // create new partitioner object Partitioner partitioner = new Partitioner(jobConf.getNumberOfReducers(), key); /** * run a partition function which returns a reducer to which this intermediate file should * be sent to */ int dataNodeIndex = partitioner.partition(); /** * get the list of all the data nodes. Sort them. Use the dataNodeIndex returned by the * partition function to get the actual reducer node */ communicator.acquireDataNodesLock(); List<String> allDataNodes = communicator.getAllDataNodes(); Collections.sort(allDataNodes); String reducerNode = allDataNodes.get(dataNodeIndex); /** * Get the communicator object of the reducer node, Read the intermediate file into the * memory and call receiveIntermediateFile() on the communicator of the reducer node. */ String intermediateFilePath = jobConf.getJobDir() + "-intermediate/" + key.toString() + "-" + chunkID; Registry registry; registry = LocateRegistry.getRegistry(reducerNode, communicator.getREGISTRY_PORT()); CommunicatorInterface communicator = (CommunicatorInterface) registry.lookup("communicator_" + reducerNode); communicator.receiveIntermediateFile( jobConf, Files.readAllBytes(Paths.get(intermediateFilePath)), file.getName()); /** * At the end of the task completion we send to master a list of reducer nodes to which * intermediate files were sent to. Hence store this reducer node to the list */ finished.addReducer(reducerNode); } catch (RemoteException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } catch (NotBoundException e) { e.printStackTrace(); } catch (IndexOutOfBoundsException e) { System.out.println("This job is about to be terminated"); } communicator.releaseDataNodesLock(); } // notify the master that map task has successfully completed notifyMasterOfMapTaskCompletion(finished); // remove this task from local data structure mapLock.lock(); mapTasks.remove(finished); mapLock.unlock(); }
/** * 初始化作业. * * @throws SchedulerException */ public void init() throws SchedulerException { jobDetail = createJobDetail(); scheduler = initializeScheduler(jobDetail.getKey().toString()); scheduleJob(createTrigger(jobConfiguration.getCron())); }
/** * 重新调度作业. * * @throws SchedulerException */ public void rescheduleJob(final String cronExpression) throws SchedulerException { scheduler.rescheduleJob( TriggerKey.triggerKey(jobConfiguration.getJobName()), createTrigger(cronExpression)); }