예제 #1
0
 /**
  * Creates a new instance given a configuration and a load balancer.
  *
  * @param config The configuration details for this job scheduler.
  * @param balancer The load balancer to use for distributing jobs.
  */
 public MeandreJobScheduler(MeandreJobSchedulerConfig config, MeandreLoadBalancer balancer) {
   loadBalancer = balancer;
   workers = config.getServers();
   for (MeandreServerProxy server : workers) {
     loadBalancer.addServer(server);
   }
 }
예제 #2
0
 /** Initializes this instance. */
 @PostConstruct
 public void init() {
   assert config != null : "No configuration was provided to the job scheduler.";
   workers = config.getServers();
   for (MeandreServerProxy server : workers) {
     loadBalancer.addServer(server);
   }
 }
예제 #3
0
 /**
  * Remove a server from the job scheduler.
  *
  * @param server Server to remove.
  */
 public void removeServer(MeandreServerProxy server) {
   workersLock.lock();
   try {
     workers.remove(server);
     loadBalancer.removeServer(server);
   } finally {
     workersLock.unlock();
   }
 }
예제 #4
0
  /** Attempt to run any queued jobs. */
  public void runJobs() {
    Session session = null;
    JobDao jobDao = daoFactory.getJobDao();
    try {
      queueLock.lock();
      workersLock.lock();
      if (jobQueue.size() < 1) {
        logger.fine("No queued jobs.");
        return;
      }

      session = jobDao.getSessionFactory().openSession();
      jobDao.startManagedSession(session);

      while (!jobQueue.isEmpty()) {
        logger.fine("Found " + jobQueue.size() + " queued jobs.");
        MeandreServerProxy server = loadBalancer.nextAvailableServer();
        if (server == null) {
          logger.info("All servers are busy. Will try again in " + POLL_PERIOD + " seconds.");
          return;
        }
        logger.fine("Server " + server + " is available for processing.");

        Job job = jobQueue.peek();

        job.incrementNumTries();
        job.setJobStatus(JobStatus.SUBMITTED);
        job.setSubmitTimestamp(new Date());

        logger.fine("Preparing to update job " + job.getId() + " as submitted.");

        Transaction transaction = session.beginTransaction();
        transaction.begin();
        try {
          jobDao.makePersistent(job);
          transaction.commit();
          logger.fine("Job " + job.getId() + " updated.");
        } catch (HibernateException e) {
          logger.warning("Data access exception: " + e.getMessage());
          rollback(transaction);
          return;
        } catch (DataAccessException e) {
          logger.warning("Data access exception: " + e.getMessage());
          rollback(transaction);
          return;
        } catch (Exception e) {
          logger.warning(e.getMessage());
          rollback(transaction);
          return;
        }

        try {
          logger.fine("Attempting to contact server " + server + " to execute job.");

          ExecResponse response = server.executeJob(job);
          logger.fine("Execution response received.");

          // If the executeJob() method above succeeded, the Meandre
          // server will have (most likely) changed the job status to
          // "started". If the status changes, the NEMA probe running
          // on the Meandre server will have written the new status to
          // the same database the flow service uses. Therefore, we
          // want to refresh the state of the job here to pick up the
          // status change. Otherwise, the old status (submitted) will
          // be rewritten to the database, and the "started" state
          // will be lost.
          session.refresh(job);

          logger.fine("Attempting to record job execution response.");
          job.setHost(server.getMeandreServerProxyConfig().getHost());
          job.setPort(server.getMeandreServerProxyConfig().getPort());
          job.setExecPort(response.getPort());
          job.setExecutionInstanceId(response.getUri());

          transaction = session.beginTransaction();
          transaction.begin();
          try {
            jobDao.makePersistent(job);
            transaction.commit();
            logger.fine("Job execution response recorded in database.");
            jobQueue.remove();
          } catch (HibernateException e) {
            logger.warning("Data access exception: " + e.getMessage());
            rollback(transaction);
            return;
          } catch (DataAccessException e) {
            logger.warning("Data access exception: " + e.getMessage());
            rollback(transaction);
            return;
          } catch (Exception e) {
            logger.warning(e.getMessage());
            rollback(transaction);
            return;
          }
        } catch (MeandreServerException serverException) {
          logger.warning(serverException.getMessage());
          job.setSubmitTimestamp(null);
          job.setJobStatus(JobStatus.SCHEDULED);

          if (job.getNumTries() == MAX_EXECUTION_TRIES) {
            logger.info(
                "Unsuccessfully tried "
                    + MAX_EXECUTION_TRIES
                    + " times to execute job "
                    + job.getId()
                    + ". Will mark job as failed.");
            job.setJobStatus(JobStatus.FAILED);
            job.setEndTimestamp(new Date());
            job.setUpdateTimestamp(new Date());
            jobQueue.remove();
          }

          transaction = session.beginTransaction();
          transaction.begin();
          try {
            jobDao.makePersistent(job);
            transaction.commit();
          } catch (HibernateException e) {
            logger.warning("Data access exception: " + e.getMessage());
            rollback(transaction);
            return;
          } catch (DataAccessException e) {
            logger.warning("Data access exception: " + e.getMessage());
            rollback(transaction);
            return;
          } catch (Exception e) {
            logger.warning(e.getMessage());
            rollback(transaction);
            return;
          }
        }
      }
    } catch (Exception e) {
      e.printStackTrace();
    } finally {
      workersLock.unlock();
      queueLock.unlock();
      if (session != null) {
        try {
          jobDao.endManagedSession();
          session.close();
        } catch (HibernateException e) {
          logger.warning(e.getMessage());
        }
      }
    }
  }