@Override
  protected void invokePostStepArtifacts() {
    // Invoke the reducer after all parallel steps are done
    if (this.partitionReducerProxy != null) {

      try {
        if ((BatchStatus.COMPLETED).equals(stepContext.getBatchStatus())) {
          this.partitionReducerProxy.afterPartitionedStepCompletion(PartitionStatus.COMMIT);
        } else {
          this.partitionReducerProxy.afterPartitionedStepCompletion(PartitionStatus.ROLLBACK);
        }
      } catch (Exception e) {
        ExceptionConfig.wrapBatchException(e);
      }
    }

    // Called in spec'd order, e.g. Sec. 11.7
    if (stepListeners != null) {
      for (StepListener listenerProxy : stepListeners) {
        // Call afterStep on all the step listeners
        try {
          listenerProxy.afterStep();
        } catch (Exception e) {
          ExceptionConfig.wrapBatchException(e);
        }
      }
    }
  }
  private void checkCompletedWork() {
    /**
     * check the batch status of each subJob after it's done to see if we need to issue a rollback
     * start rollback if any have stopped or failed
     */
    boolean rollback = false;

    for (final BatchWorkUnit subJob : completedWork) {
      final List<StepExecution> steps =
          persistenceManagerService.getStepExecutionsForJobExecution(
              subJob.getJobExecutionImpl().getExecutionId());
      if (steps.size() == 1) {
        for (final Metric metric : steps.iterator().next().getMetrics()) {
          stepContext.getMetric(metric.getType()).incValueBy(metric.getValue());
        }
      } /* else {
            // TODO: possible?
        }*/

      final BatchStatus batchStatus = subJob.getJobExecutionImpl().getJobContext().getBatchStatus();
      if (batchStatus.equals(BatchStatus.FAILED)) {
        rollback = true;

        // Keep track of the failing status and throw an exception to propagate after the rest of
        // the partitions are complete
        stepContext.setBatchStatus(BatchStatus.FAILED);
      }
    }

    // If rollback is false we never issued a rollback so we can issue a
    // logicalTXSynchronizationBeforeCompletion
    // NOTE: this will get issued even in a subjob fails or stops if no
    // logicalTXSynchronizationRollback method is provied
    // We are assuming that not providing a rollback was intentional
    if (rollback) {
      if (this.partitionReducerProxy != null) {
        try {
          this.partitionReducerProxy.rollbackPartitionedStep();
        } catch (Exception e) {
          ExceptionConfig.wrapBatchException(e);
        }
      }
      throw new BatchContainerRuntimeException("One or more partitions failed");
    } else {
      if (this.partitionReducerProxy != null) {
        try {
          this.partitionReducerProxy.beforePartitionedStepCompletion();
        } catch (Exception e) {
          ExceptionConfig.wrapBatchException(e);
        }
      }
    }
  }
  @Override
  protected void invokePreStepArtifacts() {

    if (stepListeners != null) {
      for (StepListener listenerProxy : stepListeners) {
        // Call beforeStep on all the step listeners
        try {
          listenerProxy.beforeStep();
        } catch (Exception e) {
          ExceptionConfig.wrapBatchException(e);
        }
      }
    }

    // Invoke the reducer before all parallel steps start (must occur
    // before mapper as well)
    if (this.partitionReducerProxy != null) {
      try {
        this.partitionReducerProxy.beginPartitionedStep();
      } catch (Exception e) {
        ExceptionConfig.wrapBatchException(e);
      }
    }
  }
  @Override
  protected void invokeCoreStep()
      throws JobRestartException, JobStartException, JobExecutionAlreadyCompleteException,
          JobExecutionNotMostRecentException {

    this.plan = this.generatePartitionPlan();

    // persist the partition plan so on restart we have the same plan to reuse
    stepStatus.setNumPartitions(plan.getPartitions());

    /* When true is specified, the partition count from the current run
     * is used and all results from past partitions are discarded. Any
     * resource cleanup or back out of work done in the previous run is the
     * responsibility of the application. The PartitionReducer artifact's
     * rollbackPartitionedStep method is invoked during restart before any
     * partitions begin processing to provide a cleanup hook.
     */
    if (plan.getPartitionsOverride()) {
      if (this.partitionReducerProxy != null) {
        try {
          this.partitionReducerProxy.rollbackPartitionedStep();
        } catch (Exception e) {
          ExceptionConfig.wrapBatchException(e);
        }
      }
    }

    // Set up a blocking queue to pick up collector data from a partitioned thread
    if (this.analyzerProxy != null) {
      this.analyzerStatusQueue = new LinkedBlockingQueue<PartitionDataWrapper>();
    }
    this.completedWorkQueue = new LinkedBlockingQueue<BatchPartitionWorkUnit>();

    // Build all sub jobs from partitioned step
    buildSubJobBatchWorkUnits();

    // kick off the threads
    executeAndWaitForCompletion();

    // Deal with the results.
    checkCompletedWork();
  }
  private void executeAndWaitForCompletion() throws JobRestartException {

    if (jobExecutionImpl.getJobContext().getBatchStatus().equals(BatchStatus.STOPPING)) {
      return;
    }

    int numTotalForThisExecution = parallelBatchWorkUnits.size();
    this.numPreviouslyCompleted = partitions - numTotalForThisExecution;
    int numCurrentCompleted = 0;
    int numCurrentSubmitted = 0;

    // Start up to to the max num we are allowed from the num threads attribute
    for (int i = 0; i < this.threads && i < numTotalForThisExecution; i++, numCurrentSubmitted++) {
      final BatchWorkUnit workUnit = parallelBatchWorkUnits.get(i);
      if (stepStatus.getStartCount() > 1 && !plan.getPartitionsOverride()) {
        kernelService.restartGeneratedJob(workUnit);
      } else {
        kernelService.startGeneratedJob(workUnit);
      }
    }

    while (true) {
      try {
        if (analyzerProxy != null) {
          PartitionDataWrapper dataWrapper = analyzerStatusQueue.take();
          if (PartitionEventType.ANALYZE_COLLECTOR_DATA.equals(dataWrapper.getEventType())) {
            try {
              analyzerProxy.analyzeCollectorData(dataWrapper.getCollectorData());
            } catch (Exception e) {
              ExceptionConfig.wrapBatchException(e);
            }
            continue; // without being ready to submit another
          } else if (PartitionEventType.ANALYZE_STATUS.equals(dataWrapper.getEventType())) {
            try {
              analyzerProxy.analyzeStatus(
                  dataWrapper.getBatchstatus(), dataWrapper.getExitStatus());
            } catch (Exception e) {
              ExceptionConfig.wrapBatchException(e);
            }
            completedWork.add(completedWorkQueue.take()); // Shouldn't be a a long wait.
          } else {
            throw new IllegalStateException("Invalid partition state");
          }
        } else {
          // block until at least one thread has finished to
          // submit more batch work. hold on to the finished work to look at later
          completedWork.add(completedWorkQueue.take());
        }
      } catch (final InterruptedException e) {
        throw new BatchContainerRuntimeException(e);
      }

      numCurrentCompleted++;
      if (numCurrentCompleted < numTotalForThisExecution) {
        if (numCurrentSubmitted < numTotalForThisExecution) {
          if (stepStatus.getStartCount() > 1) {
            kernelService.startGeneratedJob(parallelBatchWorkUnits.get(numCurrentSubmitted++));
          } else {
            kernelService.restartGeneratedJob(parallelBatchWorkUnits.get(numCurrentSubmitted++));
          }
        }
      } else {
        break;
      }
    }
  }
  private PartitionPlan generatePartitionPlan() {
    // Determine the number of partitions

    PartitionPlan plan = null;
    Integer previousNumPartitions = null;
    final org.apache.batchee.jaxb.PartitionMapper partitionMapper = step.getPartition().getMapper();

    // from persisted plan from previous run
    if (stepStatus.getNumPartitions() != null) {
      previousNumPartitions = stepStatus.getNumPartitions();
    }

    if (partitionMapper != null) { // from partition mapper

      final List<Property> propertyList =
          partitionMapper.getProperties() == null
              ? null
              : partitionMapper.getProperties().getPropertyList();

      // Set all the contexts associated with this controller.
      // Some of them may be null
      final InjectionReferences injectionRef =
          new InjectionReferences(jobExecutionImpl.getJobContext(), stepContext, propertyList);
      final PartitionMapper partitionMapperProxy =
          ProxyFactory.createPartitionMapperProxy(
              factory, partitionMapper.getRef(), injectionRef, jobExecutionImpl);

      PartitionPlan mapperPlan = null;
      try {
        mapperPlan = partitionMapperProxy.mapPartitions();
      } catch (Exception e) {
        ExceptionConfig.wrapBatchException(e);
      }

      // Set up the new partition plan
      plan = new BatchPartitionPlan();
      plan.setPartitionsOverride(mapperPlan.getPartitionsOverride());

      // When true is specified, the partition count from the current run
      // is used and all results from past partitions are discarded.
      if (mapperPlan.getPartitionsOverride() || previousNumPartitions == null) {
        plan.setPartitions(mapperPlan.getPartitions());
      } else {
        plan.setPartitions(previousNumPartitions);
      }

      if (mapperPlan.getThreads() == 0) {
        plan.setThreads(plan.getPartitions());
      } else {
        plan.setThreads(mapperPlan.getThreads());
      }

      plan.setPartitionProperties(mapperPlan.getPartitionProperties());
    } else if (step.getPartition().getPlan() != null) { // from static partition element in jsl

      final String partitionsAttr = step.getPartition().getPlan().getPartitions();
      String threadsAttr;

      int numPartitions = Integer.MIN_VALUE;
      int numThreads;
      Properties[] partitionProps = null;

      if (partitionsAttr != null) {
        try {
          numPartitions = Integer.parseInt(partitionsAttr);
        } catch (final NumberFormatException e) {
          throw new IllegalArgumentException(
              "Could not parse partition instances value in stepId: "
                  + step.getId()
                  + ", with instances="
                  + partitionsAttr,
              e);
        }
        partitionProps = new Properties[numPartitions];
        if (numPartitions < 1) {
          throw new IllegalArgumentException(
              "Partition instances value must be 1 or greater in stepId: "
                  + step.getId()
                  + ", with instances="
                  + partitionsAttr);
        }
      }

      threadsAttr = step.getPartition().getPlan().getThreads();
      if (threadsAttr != null) {
        try {
          numThreads = Integer.parseInt(threadsAttr);
          if (numThreads == 0) {
            numThreads = numPartitions;
          }
        } catch (final NumberFormatException e) {
          throw new IllegalArgumentException(
              "Could not parse partition threads value in stepId: "
                  + step.getId()
                  + ", with threads="
                  + threadsAttr,
              e);
        }
        if (numThreads < 0) {
          throw new IllegalArgumentException(
              "Threads value must be 0 or greater in stepId: "
                  + step.getId()
                  + ", with threads="
                  + threadsAttr);
        }
      } else { // default to number of partitions if threads isn't set
        numThreads = numPartitions;
      }

      if (step.getPartition().getPlan().getProperties() != null) {

        List<JSLProperties> jslProperties = step.getPartition().getPlan().getProperties();
        for (JSLProperties props : jslProperties) {
          int targetPartition = Integer.parseInt(props.getPartition());

          try {
            partitionProps[targetPartition] = CloneUtility.jslPropertiesToJavaProperties(props);
          } catch (ArrayIndexOutOfBoundsException e) {
            throw new BatchContainerRuntimeException(
                "There are only "
                    + numPartitions
                    + " partition instances, but there are "
                    + jslProperties.size()
                    + " partition properties lists defined. Remember that partition indexing is 0 based like Java arrays.",
                e);
          }
        }
      }
      plan = new BatchPartitionPlan();
      plan.setPartitions(numPartitions);
      plan.setThreads(numThreads);
      plan.setPartitionProperties(partitionProps);
      plan.setPartitionsOverride(false); // FIXME what is the default for a static plan??
    }

    // Set the other instance variables for convenience.
    this.partitions = plan.getPartitions();
    this.threads = plan.getThreads();
    this.partitionProperties = plan.getPartitionProperties();

    return plan;
  }