@Override
  public RepeatStatus execute(final StepContribution sc, final ChunkContext context)
      throws Exception {

    log.info("First simple task ..... execute !!! ");
    log.info("+++ StepContribution :  {} ", sc);
    log.info(
        "+++  ChunkContext  :  {}  -> jobName  : {} ",
        context,
        context.getStepContext().getJobName());
    log.info(
        "+++  StepContext :  jobParameters :  {} , stepExecution : {} , stepName :  {} ",
        context.getStepContext().getJobParameters(),
        context.getStepContext().getStepExecution(),
        context.getStepContext().getStepName());
    ExecutionContext jobExecutionContext =
        context.getStepContext().getStepExecution().getJobExecution().getExecutionContext();
    JobParameters jobParams =
        context.getStepContext().getStepExecution().getJobExecution().getJobParameters();
    log.info("time : {}", jobParams.getDate("time"));
    log.info("test : {}", jobParams.getString("test"));
    log.info("message : {}", message);
    jobExecutionContext.put("x", "y");
    // promote
    // promote
    ExecutionContext stepExecutionContext =
        context.getStepContext().getStepExecution().getJobExecution().getExecutionContext();
    stepExecutionContext.put("login", "przodownikR1");
    Thread.sleep(4000);

    return FINISHED;
  }
  @Override
  public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext)
      throws Exception {
    RepeatStatus status = RepeatStatus.CONTINUABLE;

    if (chunkContext.getStepContext().getStepExecution().getReadCount() < resources.length) {
      // On each iteration we add as an attachment a resource
      Resource resource =
          resources[chunkContext.getStepContext().getStepExecution().getReadCount()];

      helper.addAttachment(resource.getFilename(), resource.getFile());
      // We confirm that we read one resource
      contribution.incrementReadCount();
    } else {
      // We send the e-mail on the last iteration
      this.mailSender.send(helper.getMimeMessage());
      // We confirm the number of attachments
      contribution.incrementWriteCount(resources.length);
      status = RepeatStatus.FINISHED;
    }

    return status;
  }
Exemplo n.º 3
0
  protected String[] getCmd(StepContribution contribution, ChunkContext context) throws Exception {
    if (script == null || script.isEmpty()) {
      throw new UnexpectedJobExecutionException("No script is given to the HiveTasklet to execute");
    }

    String name = context.getStepContext().getStepName();
    Properties sysProps = System.getProperties();
    String paramFile = String.format("%s/hive_param_%s.ini", sysProps.getProperty("tmp"), name);
    if (params == null) {
      params = new HashMap<>();
    }
    try (PrintWriter out = new PrintWriter(new File(paramFile))) {
      out.println("-- autogen");
      out.println(String.format("SET mapred.job.queue.name=%s;", queue));
      out.println(String.format("SET hive.metastore.warehouse.dir=%s;", warehouse));
      for (String key : params.keySet()) {
        out.println(String.format("SET %s=%s;", key, params.get(key)));
      }
    }

    log.info(String.format("Generating parameter file at %s", paramFile));

    return new String[] {"hive", "-i", paramFile, "-f", script};
  }
  @Override
  public RepeatStatus executeStep(
      ChunkContext chunkContext, JobExecutionStatusHolder jobExecutionStatusHolder)
      throws Exception {

    String targetName =
        getJobParameters(chunkContext).getString(JobConstants.TARGET_NAME_JOB_PARAM);
    String clusterName =
        getJobParameters(chunkContext).getString(JobConstants.CLUSTER_NAME_JOB_PARAM);
    if (targetName == null) {
      targetName = clusterName;
    }
    String jobName = chunkContext.getStepContext().getJobName();
    logger.info(
        "target : " + targetName + ", operation: " + managementOperation + ", jobname: " + jobName);

    serviceSyncup.syncUp(clusterName);
    logger.debug("Try to sync up service status for cluster " + clusterName);

    boolean vmPowerOn = false;
    String vmPowerOnStr = getJobParameters(chunkContext).getString(JobConstants.IS_VM_POWER_ON);
    if (vmPowerOnStr != null) {
      logger.info("vm original status is power on? " + vmPowerOnStr);
      vmPowerOn = Boolean.parseBoolean(vmPowerOnStr);
    }

    if (checkVMStatus && targetName.split("-").length == 3 && !vmPowerOn) {
      return RepeatStatus.FINISHED;
    }

    // Only check host time for cluster config, disk fix, scale up (management
    // operation configure), start (management operation start) and create
    // (resume only)
    SoftwareManager softwareMgr = null;
    try {
      softwareMgr = softwareMgrs.getSoftwareManagerByClusterName(clusterName);
    } catch (SoftwareManagerCollectorException e) {
      if (ManagementOperation.PRE_DESTROY.equals(managementOperation)
          || ManagementOperation.DESTROY.equals(managementOperation)) {
        return RepeatStatus.FINISHED;
      }
      throw e;
    }
    if (ManagementOperation.CONFIGURE.equals(managementOperation)
        || ManagementOperation.START.equals(managementOperation)
        || JobConstants.RESUME_CLUSTER_JOB_NAME.equals(jobName)) {
      logger.info("Start to check host time.");
      List<NodeEntity> nodes = lockClusterEntityMgr.getClusterEntityMgr().findAllNodes(clusterName);
      Set<String> hostnames = new HashSet<String>();
      for (NodeEntity node : nodes) {
        // for software operation, we can only handle VMs who are already VM_READY
        // Add this filter to tolerate some vm failures in cluster start
        boolean force = JobUtils.getJobParameterForceClusterOperation(chunkContext);
        if (force && (node.getStatus() != NodeStatus.VM_READY)) {
          continue;
        }
        hostnames.add(node.getHostName());
      }
      ClusterCreate clusterSpec = clusterManager.getClusterSpec(clusterName);
      SyncHostsUtils.SyncHosts(clusterSpec, hostnames, softwareMgr);
    }

    StatusUpdater statusUpdater =
        new DefaultStatusUpdater(jobExecutionStatusHolder, getJobExecutionId(chunkContext));

    ISoftwareManagementTask task = null;
    String appMgrName = softwareMgr.getName();
    validateUserExistense();
    if (!Constants.IRONFAN.equals(appMgrName)) {
      task = createExternalTask(chunkContext, targetName, clusterName, statusUpdater);
    } else {
      task = createThriftTask(chunkContext, targetName, statusUpdater);
    }

    if (task != null) {
      Map<String, Object> ret = task.call();

      if (!(Boolean) ret.get("succeed")) {
        String errorMessage = (String) ret.get("errorMessage");
        putIntoJobExecutionContext(chunkContext, JobConstants.CURRENT_ERROR_MESSAGE, errorMessage);
        putIntoJobExecutionContext(chunkContext, JobConstants.SOFTWARE_MANAGEMENT_STEP_FAILE, true);
        throw TaskException.EXECUTION_FAILED(errorMessage);
      }
    }

    return RepeatStatus.FINISHED;
  }
Exemplo n.º 5
0
 private static ExecutionContext getJobExecutionContext(ChunkContext chunkContext) {
   return chunkContext.getStepContext().getStepExecution().getJobExecution().getExecutionContext();
 }
Exemplo n.º 6
0
 public static long getJobExecutionId(ChunkContext chunkContext) {
   return chunkContext.getStepContext().getStepExecution().getJobExecution().getId();
 }
Exemplo n.º 7
0
 public static JobParameters getJobParameters(ChunkContext chunkContext) {
   return chunkContext.getStepContext().getStepExecution().getJobParameters();
 }