@Override public synchronized void handleEvent(Event event) { JobRunner runner = (JobRunner) event.getRunner(); if (event.getType() == Type.JOB_STATUS_CHANGED) { updateFlow(); } else if (event.getType() == Type.JOB_FINISHED) { synchronized (mainSyncObj) { ExecutableNode node = runner.getNode(); activeJobRunners.remove(node.getJobId()); logger.info("Job Finished " + node.getJobId() + " with status " + node.getStatus()); if (runner.getOutputProps() != null) { logger.info("Job " + node.getJobId() + " had output props."); jobOutputProps.put(node.getJobId(), runner.getOutputProps()); } updateFlow(); if (node.getStatus() == Status.FAILED) { // Retry failure if conditions are met. if (!runner.isCancelled() && runner.getRetries() > node.getAttempt()) { logger.info( "Job " + node.getJobId() + " will be retried. Attempt " + node.getAttempt() + " of " + runner.getRetries()); node.setDelayedExecution(runner.getRetryBackoff()); node.resetForRetry(); } else { if (!runner.isCancelled() && runner.getRetries() > 0) { logger.info("Job " + node.getJobId() + " has run out of retry attempts"); // Setting delayed execution to 0 in case this is manually re-tried. node.setDelayedExecution(0); } flowFailed = true; ExecutionOptions options = flow.getExecutionOptions(); // The KILLED status occurs when cancel is invoked. We want to keep this // status even in failure conditions. if (flow.getStatus() != Status.KILLED && flow.getStatus() != Status.FAILED) { flow.setStatus(Status.FAILED_FINISHING); if (options.getFailureAction() == FailureAction.CANCEL_ALL && !flowCancelled) { logger.info("Flow failed. Failure option is Cancel All. Stopping execution."); cancel(); } } } } interrupt(); fireEventListeners(event); } } }
private Status getImpliedStatus(ExecutableNode node) { switch (node.getStatus()) { case FAILED: case KILLED: case SKIPPED: case SUCCEEDED: case QUEUED: case RUNNING: return null; default: break; } boolean shouldKill = false; for (String dependency : node.getInNodes()) { ExecutableNode dependencyNode = flow.getExecutableNode(dependency); Status depStatus = dependencyNode.getStatus(); switch (depStatus) { case FAILED: case KILLED: shouldKill = true; case SKIPPED: case SUCCEEDED: continue; case RUNNING: case QUEUED: case DISABLED: return null; default: // Return null means it's not ready to run. return null; } } ExecutionOptions options = flow.getExecutionOptions(); if (shouldKill || flowCancelled || (flowFailed && options.getFailureAction() != FailureAction.FINISH_ALL_POSSIBLE)) { return Status.KILLED; } // If it's disabled but ready to run, we want to make sure it continues being disabled. if (node.getStatus() == Status.DISABLED) { return Status.DISABLED; } // All good to go, ready to run. return Status.READY; }
public FlowRunner( ExecutableFlow flow, ExecutorLoader executorLoader, ProjectLoader projectLoader, JobTypeManager jobtypeManager) throws ExecutorManagerException { this.execId = flow.getExecutionId(); this.flow = flow; this.executorLoader = executorLoader; this.projectLoader = projectLoader; this.execDir = new File(flow.getExecutionPath()); this.jobtypeManager = jobtypeManager; ExecutionOptions options = flow.getExecutionOptions(); this.pipelineLevel = options.getPipelineLevel(); this.pipelineExecId = options.getPipelineExecutionId(); this.proxyUsers = flow.getProxyUsers(); }
/** * Constructor. If executorService is null, then it will create it's own for thread pools. * * @param flow * @param executorLoader * @param projectLoader * @param jobtypeManager * @param executorService * @throws ExecutorManagerException */ public FlowRunner( ExecutableFlow flow, ExecutorLoader executorLoader, ProjectLoader projectLoader, JobTypeManager jobtypeManager, ExecutorService executorService) throws ExecutorManagerException { this.execId = flow.getExecutionId(); this.flow = flow; this.executorLoader = executorLoader; this.projectLoader = projectLoader; this.execDir = new File(flow.getExecutionPath()); this.jobtypeManager = jobtypeManager; ExecutionOptions options = flow.getExecutionOptions(); this.pipelineLevel = options.getPipelineLevel(); this.pipelineExecId = options.getPipelineExecutionId(); this.failureAction = options.getFailureAction(); this.proxyUsers = flow.getProxyUsers(); this.executorService = executorService; this.finishedNodes = new SwapQueue<ExecutableNode>(); }
private JobRunner createJobRunner(ExecutableNode node, Props previousOutput) { String source = node.getJobPropsSource(); String propsSource = node.getPropsSource(); // If no properties are set, we just set the global properties. Props parentProps = propsSource == null ? globalProps : sharedProps.get(propsSource); // Set up overrides ExecutionOptions options = flow.getExecutionOptions(); @SuppressWarnings("unchecked") Props flowProps = new Props(null, options.getFlowParameters()); flowProps.putAll(commonProps); flowProps.setParent(parentProps); parentProps = flowProps; // We add the previous job output and put into this props. if (previousOutput != null) { Props earliestParent = previousOutput.getEarliestAncestor(); earliestParent.setParent(parentProps); parentProps = previousOutput; } // Load job file. File path = new File(execDir, source); Props prop = null; // load the override props if any try { prop = projectLoader.fetchProjectProperty( flow.getProjectId(), flow.getVersion(), node.getJobId() + ".jor"); } catch (ProjectManagerException e) { e.printStackTrace(); logger.error("Error loading job override property for job " + node.getJobId()); } if (prop == null) { // if no override prop, load the original one on disk try { prop = new Props(null, path); } catch (IOException e) { e.printStackTrace(); logger.error("Error loading job file " + source + " for job " + node.getJobId()); } } // setting this fake source as this will be used to determine the location of log files. prop.setSource(path.getPath()); prop.setParent(parentProps); JobRunner jobRunner = new JobRunner(node, prop, path.getParentFile(), executorLoader, jobtypeManager); if (watcher != null) { jobRunner.setPipeline(watcher, pipelineLevel); } if (validateUserProxy) { jobRunner.setValidatedProxyUsers(proxyUsers); } jobRunner.setDelayStart(node.getDelayedExecution()); jobRunner.setLogSettings(logger, jobLogFileSize, jobLogNumFiles); jobRunner.addListener(listener); return jobRunner; }