public LiteWorkflowInstance(LiteWorkflowApp def, Configuration conf, String instanceId) { this(); this.def = ParamChecker.notNull(def, "def"); this.instanceId = ParamChecker.notNull(instanceId, "instanceId"); this.conf = ParamChecker.notNull(conf, "conf"); refreshLog(); status = Status.PREP; }
/** * Calculate the difference of timezone offset in minutes between dataset and coordinator job. * * <p>Depends on: * * <p>1. Timezone of both dataset and job * * <p>2. Action creation Time * * @return difference in minutes (DataSet TZ Offset - Application TZ offset) */ public static int ph2_coord_tzOffset() { Date actionCreationTime = getActionCreationtime(); TimeZone dsTZ = ParamChecker.notNull(getDatasetTZ(), "DatasetTZ"); TimeZone jobTZ = ParamChecker.notNull(getJobTZ(), "JobTZ"); // Apply the TZ into Calendar object Calendar dsTime = Calendar.getInstance(dsTZ); dsTime.setTime(actionCreationTime); Calendar jobTime = Calendar.getInstance(jobTZ); jobTime.setTime(actionCreationTime); return (dsTime.get(Calendar.ZONE_OFFSET) - jobTime.get(Calendar.ZONE_OFFSET)) / (1000 * 60); }
/** * Constructor to create the Coordinator Submit Command. * * @param conf : Configuration for Coordinator job * @param authToken : To be used for authentication */ public CoordSubmitXCommand(Configuration conf, String authToken) { super("coord_submit", "coord_submit", 1); this.conf = ParamChecker.notNull(conf, "conf"); this.authToken = ParamChecker.notEmpty(authToken, "authToken"); this.bundleId = null; this.coordName = null; }
/** * Return Job Name. * * <p> * * @return coordinator name */ public static String ph2_coord_name() throws Exception { ELEvaluator eval = ELEvaluator.getCurrent(); SyncCoordAction action = ParamChecker.notNull( (SyncCoordAction) eval.getVariable(COORD_ACTION), "Coordinator Action"); return action.getName(); }
/** * Return nominal time or Action Creation Time. * * <p> * * @return coordinator action creation or materialization date time * @throws Exception if unable to format the Date object to String */ public static String ph2_coord_nominalTime() throws Exception { ELEvaluator eval = ELEvaluator.getCurrent(); SyncCoordAction action = ParamChecker.notNull( (SyncCoordAction) eval.getVariable(COORD_ACTION), "Coordinator Action"); return DateUtils.formatDateUTC(action.getNominalTime()); }
/** * Check if all given actions are eligible to rerun. * * @param coordActions list of CoordinatorActionBean * @return true if all actions are eligible to rerun */ private static boolean checkAllActionsRunnable(List<CoordinatorActionBean> coordActions) { ParamChecker.notNull(coordActions, "Coord actions to be rerun"); boolean ret = false; for (CoordinatorActionBean coordAction : coordActions) { ret = true; if (!coordAction.isTerminalStatus()) { ret = false; break; } } return ret; }
public CoordActionNotificationXCommand(CoordinatorActionBean actionBean) { super("coord_action_notification", "coord_action_notification", 0); ParamChecker.notNull(actionBean, "Action Bean"); this.actionBean = actionBean; }
public synchronized boolean signal(String executionPath, String signalValue) throws WorkflowException { ParamChecker.notEmpty(executionPath, "executionPath"); ParamChecker.notNull(signalValue, "signalValue"); log.debug( XLog.STD, "Signaling job execution path [{0}] signal value [{1}]", executionPath, signalValue); if (status != Status.RUNNING) { throw new WorkflowException(ErrorCode.E0716); } NodeInstance nodeJob = executionPaths.get(executionPath); if (nodeJob == null) { status = Status.FAILED; log.error("invalid execution path [{0}]", executionPath); } NodeDef nodeDef = null; if (!status.isEndState()) { nodeDef = def.getNode(nodeJob.nodeName); if (nodeDef == null) { status = Status.FAILED; log.error("invalid transition [{0}]", nodeJob.nodeName); } } if (!status.isEndState()) { NodeHandler nodeHandler = newInstance(nodeDef.getHandlerClass()); boolean exiting = true; Context context = new Context(nodeDef, executionPath, signalValue); if (!nodeJob.started) { try { nodeHandler.loopDetection(context); exiting = nodeHandler.enter(context); nodeJob.started = true; } catch (WorkflowException ex) { status = Status.FAILED; List<String> killedNodes = terminateNodes(Status.KILLED); if (killedNodes.size() > 1) { log.warn( XLog.STD, "Workflow completed [{0}], killing [{1}] running nodes", status, killedNodes.size()); } throw ex; } } if (exiting) { List<String> pathsToStart = new ArrayList<String>(); List<String> fullTransitions; try { fullTransitions = nodeHandler.multiExit(context); int last = fullTransitions.size() - 1; // TEST THIS if (last >= 0) { String transitionTo = getTransitionNode(fullTransitions.get(last)); if (nodeDef instanceof ForkNodeDef) { transitionTo = "*"; // WF action cannot hold all transitions for a fork. // transitions are hardcoded in the WF app. } persistentVars.put( nodeDef.getName() + WorkflowInstance.NODE_VAR_SEPARATOR + TRANSITION_TO, transitionTo); } } catch (WorkflowException ex) { status = Status.FAILED; throw ex; } if (context.status == Status.KILLED) { status = Status.KILLED; log.debug(XLog.STD, "Completing job, kill node [{0}]", nodeJob.nodeName); } else { if (context.status == Status.FAILED) { status = Status.FAILED; log.debug(XLog.STD, "Completing job, fail node [{0}]", nodeJob.nodeName); } else { if (context.status == Status.SUCCEEDED) { status = Status.SUCCEEDED; log.debug(XLog.STD, "Completing job, end node [{0}]", nodeJob.nodeName); } /* else if (context.status == Status.SUSPENDED) { status = Status.SUSPENDED; log.debug(XLog.STD, "Completing job, end node [{0}]", nodeJob.nodeName); } */ else { for (String fullTransition : fullTransitions) { // this is the whole trick for forking, we need the // executionpath and the transition // in the case of no forking last element of // executionpath is different from transition // in the case of forking they are the same log.debug( XLog.STD, "Exiting node [{0}] with transition[{1}]", nodeJob.nodeName, fullTransition); String execPathFromTransition = getExecutionPath(fullTransition); String transition = getTransitionNode(fullTransition); def.validateTransition(nodeJob.nodeName, transition); NodeInstance nodeJobInPath = executionPaths.get(execPathFromTransition); if ((nodeJobInPath == null) || (!transition.equals(nodeJobInPath.nodeName))) { // TODO explain this IF better // If the WfJob is signaled with the parent // execution executionPath again // The Fork node will execute again.. and replace // the Node WorkflowJobBean // so this is required to prevent that.. // Question : Should we throw an error in this case // ?? executionPaths.put(execPathFromTransition, new NodeInstance(transition)); pathsToStart.add(execPathFromTransition); } } // signal all new synch transitions for (String pathToStart : pathsToStart) { signal(pathToStart, "::synch::"); } } } } } } if (status.isEndState()) { if (status == Status.FAILED) { List<String> failedNodes = terminateNodes(status); log.warn( XLog.STD, "Workflow completed [{0}], failing [{1}] running nodes", status, failedNodes.size()); } else { List<String> killedNodes = terminateNodes(Status.KILLED); if (killedNodes.size() > 1) { log.warn( XLog.STD, "Workflow completed [{0}], killing [{1}] running nodes", status, killedNodes.size()); } } } return status.isEndState(); }
public BulkJPAExecutor(Map<String, List<String>> bulkFilter, int start, int len) { ParamChecker.notNull(bulkFilter, "bulkFilter"); this.bulkFilter = bulkFilter; this.start = start; this.len = len; }