예제 #1
0
  @BeforeMethod
  public void setUp() {
    job = mock(Job.class);
    given(job.getName()).willReturn("mock");
    jobExecutor = createJobExecutor(job);

    context = new HashMap<Object, Object>();
  }
예제 #2
0
  void AddJobsToSpinner(List<Job> listOfJobs) {
    int counter = 0;
    String[] jobsNames = new String[listOfJobs.size()];

    for (Job job : listOfJobs) {
      jobsNames[counter] = job.getName();
      counter++;
    }
    jobsSpinnerAdapter =
        new ArrayAdapter<>(getApplicationContext(), R.layout.spinner_item, jobsNames);
    jobsSpinner.setAdapter(jobsSpinnerAdapter);
  }
예제 #3
0
  /**
   * @param job the job that we need to find the next parameters for
   * @return the next job parameters if they can be located
   * @throws JobParametersNotFoundException if there is a problem
   */
  private JobParameters getNextJobParameters(Job job) throws JobParametersNotFoundException {
    String jobIdentifier = job.getName();
    JobParameters jobParameters;
    List<JobInstance> lastInstances = jobExplorer.getJobInstances(jobIdentifier, 0, 1);

    JobParametersIncrementer incrementer = job.getJobParametersIncrementer();
    if (incrementer == null) {
      throw new JobParametersNotFoundException(
          "No job parameters incrementer found for job=" + jobIdentifier);
    }

    if (lastInstances.isEmpty()) {
      jobParameters = incrementer.getNext(new JobParameters());
      if (jobParameters == null) {
        throw new JobParametersNotFoundException(
            "No bootstrap parameters found from incrementer for job=" + jobIdentifier);
      }
    } else {
      jobParameters = incrementer.getNext(lastInstances.get(0).getJobParameters());
    }
    return jobParameters;
  }
예제 #4
0
 public JobContext(Job job, Map<String, String> settings, Configuration conf) {
   this.job = job;
   this.jobSettings = toJobSettings(job.getName(), Maps.<String, String>newHashMap(), settings);
   this.conf = toJobHadoopConf(this.jobSettings, conf);
 }
예제 #5
0
 /** Creates a context with the given settings and Hadoop configuration. */
 public JobContext(
     StreamDescription descrip, Job job, Map<String, String> settings, Configuration conf) {
   this.job = job;
   this.jobSettings = toJobSettings(job.getName(), getDefaultSettings(descrip), settings);
   this.conf = toJobHadoopConf(this.jobSettings, conf);
 }
예제 #6
0
  /**
   * calculate and update the job timings if it was lagged too much or took too long to run, spit
   * out a warning (and if its really excessive, kill the router)
   */
  void updateStats(Job job, long doStart, long origStartAfter, long duration) {
    if (_context.router() == null) return;
    String key = job.getName();
    long lag = doStart - origStartAfter; // how long were we ready and waiting?
    MessageHistory hist = _context.messageHistory();
    long uptime = _context.router().getUptime();

    if (lag < 0) lag = 0;
    if (duration < 0) duration = 0;

    JobStats stats = _jobStats.get(key);
    if (stats == null) {
      stats = new JobStats(key);
      _jobStats.put(key, stats);
      // yes, if two runners finish the same job at the same time, this could
      // create an extra object.  but, who cares, its pushed out of the map
      // immediately anyway.
    }
    stats.jobRan(duration, lag);

    String dieMsg = null;

    if (lag > _lagWarning) {
      dieMsg =
          "Lag too long for job "
              + job.getName()
              + " ["
              + lag
              + "ms and a run time of "
              + duration
              + "ms]";
    } else if (duration > _runWarning) {
      dieMsg =
          "Job run too long for job "
              + job.getName()
              + " ["
              + lag
              + "ms lag and run time of "
              + duration
              + "ms]";
    }

    if (dieMsg != null) {
      if (_log.shouldLog(Log.WARN)) _log.warn(dieMsg);
      if (hist != null) hist.messageProcessingError(-1, JobQueue.class.getName(), dieMsg);
    }

    if ((lag > _lagFatal) && (uptime > _warmupTime)) {
      // this is fscking bad - the network at this size shouldn't have this much real contention
      // so we're going to DIE DIE DIE
      if (_log.shouldLog(Log.WARN))
        _log.log(
            Log.WARN,
            "The router is either incredibly overloaded or (more likely) there's an error.",
            new Exception("ttttooooo mmmuuuccccchhhh llllaaagggg"));
      // try { Thread.sleep(5000); } catch (InterruptedException ie) {}
      // Router.getInstance().shutdown();
      return;
    }

    if ((uptime > _warmupTime) && (duration > _runFatal)) {
      // slow CPUs can get hosed with ElGamal, but 10s is too much.
      if (_log.shouldLog(Log.WARN))
        _log.log(
            Log.WARN,
            "The router is incredibly overloaded - either you have a 386, or (more likely) there's an error. ",
            new Exception("ttttooooo sssllloooowww"));
      // try { Thread.sleep(5000); } catch (InterruptedException ie) {}
      // Router.getInstance().shutdown();
      return;
    }
  }