@Test
  public void testIndexRebuilds() throws Exception {
    IOHelper.deleteFile(schedulerStoreDir);

    JobSchedulerStoreImpl schedulerStore = createScheduler();
    broker = createBroker(schedulerStore);
    broker.start();
    ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost");
    Connection connection = cf.createConnection();
    connection.start();
    for (int i = 0; i < NUM_JOBS; ++i) {
      scheduleRepeating(connection);
    }
    connection.close();

    JobScheduler scheduler = schedulerStore.getJobScheduler("JMS");
    assertNotNull(scheduler);
    assertEquals(NUM_JOBS, scheduler.getAllJobs().size());

    broker.stop();

    IOHelper.delete(new File(schedulerStoreDir, "scheduleDB.data"));

    schedulerStore = createScheduler();
    broker = createBroker(schedulerStore);
    broker.start();

    scheduler = schedulerStore.getJobScheduler("JMS");
    assertNotNull(scheduler);
    assertEquals(NUM_JOBS, scheduler.getAllJobs().size());
  }
 public void stop() {
   LOGGER.info("Stopping BIMserver");
   executorService.shutdown();
   if (bimDatabase != null) {
     bimDatabase.close();
   }
   if (bimScheduler != null) {
     bimScheduler.close();
   }
   if (longActionManager != null) {
     longActionManager.shutdown();
   }
   if (notificationsManager != null) {
     notificationsManager.shutdown();
   }
   if (embeddedWebServer != null) {
     embeddedWebServer.shutdown();
   }
   if (protocolBuffersServer != null) {
     protocolBuffersServer.shutdown();
   }
   if (commandLine != null) {
     commandLine.shutdown();
   }
   LOGGER.info("BIMserver stopped");
 }
Beispiel #3
0
  /** @see FlowService#executeJob(String, String, String, long, long, String) */
  @Override
  public Job executeJob(
      Credentials credentials,
      String token,
      String name,
      String description,
      long flowInstanceId,
      long userId,
      String userEmail) {
    FlowDao flowDao = daoFactory.getFlowDao();
    JobDao jobDao = daoFactory.getJobDao();

    Flow flowInstance = flowDao.findById(flowInstanceId, false);

    Job job = new Job();
    job.setToken(token);
    job.setName(name);
    job.setDescription(description);
    job.setFlow(flowInstance);
    job.setOwnerId(userId);
    job.setOwnerEmail(userEmail);
    SimpleCredentials simpleCreds = (SimpleCredentials) credentials;
    String serializedCreds = simpleCreds.getUserID() + ":" + new String(simpleCreds.getPassword());
    job.setCredentials(serializedCreds);
    jobDao.makePersistent(job);

    jobScheduler.scheduleJob(job);
    job.setJobStatus(JobStatus.SCHEDULED);
    job.setScheduleTimestamp(new Date());
    jobDao.makePersistent(job);

    jobStatusMonitor.start(job, notificationCreator);

    return job;
  }
  private void assignConnectorPolicy(ActivityCluster ac, Map<ActivityId, ActivityPlan> taskMap) {
    Map<ConnectorDescriptorId, IConnectorPolicy> cPolicyMap =
        new HashMap<ConnectorDescriptorId, IConnectorPolicy>();
    Set<ActivityId> activities = ac.getActivityMap().keySet();
    BitSet targetBitmap = new BitSet();
    for (ActivityId a1 : activities) {
      Task[] ac1TaskStates = taskMap.get(a1).getTasks();
      int nProducers = ac1TaskStates.length;
      List<IConnectorDescriptor> outputConns = ac.getActivityOutputMap().get(a1);
      if (outputConns != null) {
        for (IConnectorDescriptor c : outputConns) {
          ConnectorDescriptorId cdId = c.getConnectorId();
          ActivityId a2 = ac.getConsumerActivity(cdId);
          Task[] ac2TaskStates = taskMap.get(a2).getTasks();
          int nConsumers = ac2TaskStates.length;

          int[] fanouts = new int[nProducers];
          if (c.allProducersToAllConsumers()) {
            for (int i = 0; i < nProducers; ++i) {
              fanouts[i] = nConsumers;
            }
          } else {
            for (int i = 0; i < nProducers; ++i) {
              c.indicateTargetPartitions(nProducers, nConsumers, i, targetBitmap);
              fanouts[i] = targetBitmap.cardinality();
            }
          }
          IConnectorPolicy cp = assignConnectorPolicy(ac, c, nProducers, nConsumers, fanouts);
          cPolicyMap.put(cdId, cp);
        }
      }
    }
    scheduler.getJobRun().getConnectorPolicyMap().putAll(cPolicyMap);
  }
Beispiel #5
0
  public org.neo4j.server.database.RrdDbWrapper createRrdDbAndSampler(
      final Database db, JobScheduler scheduler) throws IOException {
    NodeManager nodeManager =
        db.getGraph().getDependencyResolver().resolveDependency(NodeManager.class);

    Sampleable[] primitives = {
      new NodeIdsInUseSampleable(nodeManager),
      new PropertyCountSampleable(nodeManager),
      new RelationshipCountSampleable(nodeManager)
    };

    Sampleable[] usage = {};

    final String rrdPath =
        config.getString(RRDB_LOCATION_PROPERTY_KEY, getDefaultRrdFile(db.getGraph()));
    final RrdDbWrapper rrdb =
        createRrdb(rrdPath, isEphemereal(db.getGraph()), join(primitives, usage));

    scheduler.scheduleAtFixedRate(
        new RrdJob(new RrdSamplerImpl(rrdb.get(), primitives)),
        RRD_THREAD_NAME + "[primitives]",
        SECONDS.toMillis(0),
        SECONDS.toMillis(3));

    return rrdb;
  }
 @Override
 public String getActiveProgramsList() throws RemoteException {
   String list =
       String.format(
           "%-20s %-10s %-10s %-20s %-20s%n", "Job", "ID", "Status", "Mappers", "Reducers");
   list = list.concat(scheduler.getActiveProgramsList());
   return list;
 }
 @Override
 public synchronized void jobFinished(boolean success, NodeJob job) throws RemoteException {
   if (!success) {
     dispatcher.enqueue(job);
   } else {
     scheduler.jobFinished(job);
   }
 }
 @Override
 public int dispatchJob(Class<?> clazz, String filename) throws RemoteException {
   Map<Integer, Set<Integer>> blockLocations = fsTable.get(filename);
   if (blockLocations == null) {
     return -1;
   }
   return scheduler.issueJob(clazz, filename, blockLocations.size());
 }
  @Test
  public void testIndexRebuildsAfterSomeJobsExpire() throws Exception {
    IOHelper.deleteFile(schedulerStoreDir);

    JobSchedulerStoreImpl schedulerStore = createScheduler();
    broker = createBroker(schedulerStore);
    broker.start();
    ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost");
    Connection connection = cf.createConnection();
    connection.start();
    for (int i = 0; i < NUM_JOBS; ++i) {
      scheduleRepeating(connection);
      scheduleOneShot(connection);
    }
    connection.close();

    JobScheduler scheduler = schedulerStore.getJobScheduler("JMS");
    assertNotNull(scheduler);
    assertEquals(NUM_JOBS * 2, scheduler.getAllJobs().size());

    final JobScheduler awaitingOneShotTimeout = scheduler;
    assertTrue(
        "One shot jobs should time out",
        Wait.waitFor(
            new Wait.Condition() {

              @Override
              public boolean isSatisified() throws Exception {
                return awaitingOneShotTimeout.getAllJobs().size() == NUM_JOBS;
              }
            },
            TimeUnit.MINUTES.toMillis(2)));

    broker.stop();

    IOHelper.delete(new File(schedulerStoreDir, "scheduleDB.data"));

    schedulerStore = createScheduler();
    broker = createBroker(schedulerStore);
    broker.start();

    scheduler = schedulerStore.getJobScheduler("JMS");
    assertNotNull(scheduler);
    assertEquals(NUM_JOBS, scheduler.getAllJobs().size());
  }
 private TaskCluster getTaskCluster(TaskId tid) {
   JobRun run = scheduler.getJobRun();
   ActivityCluster ac = run.getActivityClusterGraph().getActivityMap().get(tid.getActivityId());
   ActivityClusterPlan acp = run.getActivityClusterPlanMap().get(ac.getId());
   Task[] tasks = acp.getActivityPlanMap().get(tid.getActivityId()).getTasks();
   Task task = tasks[tid.getPartition()];
   assert task.getTaskId().equals(tid);
   return task.getTaskCluster();
 }
Beispiel #11
0
 /** @see FlowService#abortJob(long) */
 @Override
 public void abortJob(long jobId) throws IllegalStateException {
   Job job = daoFactory.getJobDao().findById(jobId, false);
   if (!job.isRunning()) {
     throw new IllegalStateException(
         "Cannot abort job " + jobId + " because it has already completed.");
   }
   try {
     jobScheduler.abortJob(job);
   } catch (MeandreServerException e) {
     throw new ServiceException("Job " + jobId + " could not be aborted.", e);
   }
 }
  public FacilityManagerMasterImpl(Config config)
      throws IOException, AlreadyBoundException, InterruptedException {
    super(config);

    currentNode = 0;
    String[] participants = config.getParticipantIps();
    int expectedNumParticipants = participants.length;

    managers = new FacilityManager[expectedNumParticipants];
    managers[getNodeId()] = this;

    clusterName = config.getClusterName();
    rmiPort = config.getRmiPort();

    /* FILESYSTEM INIT */
    fsTable = Collections.synchronizedMap(new HashMap<String, Map<Integer, Set<Integer>>>());

    Registry r = LocateRegistry.createRegistry(rmiPort);
    r.bind(clusterName + REGISTRY_MASTER_KEY, UnicastRemoteObject.exportObject(this, 0));
    connectParticipants();

    System.out.println("Waiting for slaves to connect...");
    while (managers.length != expectedNumParticipants) {
      Thread.sleep(1000);
    }

    healthChecker = new HealthChecker(this, expectedNumParticipants);
    scheduler = new JobScheduler(this, config);
    dispatcher = new JobDispatcher(this, config);
    dispatcher.setScheduler(scheduler);
    scheduler.setDispatcher(dispatcher);
    scheduler.setHealthChecker(healthChecker);

    healthChecker.start();
    dispatcher.start();

    System.out.println("All slaves connected.");
  }
Beispiel #13
0
 /** @see FlowService#getWorkerStatus(String, int) */
 @Override
 public MeandreServerProxyStatus getWorkerStatus(String host, int port) {
   if (host == null) {
     throw new IllegalArgumentException("Host must not be null.");
   }
   Map<MeandreServerProxyConfig, MeandreServerProxyStatus> status = jobScheduler.getWorkerStatus();
   Iterator<MeandreServerProxyConfig> configIterator = status.keySet().iterator();
   while (configIterator.hasNext()) {
     MeandreServerProxyConfig config = configIterator.next();
     if (host.equals(config.getHost()) && port == config.getPort()) {
       return status.get(config);
     }
   }
   return null;
 }
  public ActivityClusterPlan planActivityCluster(ActivityCluster ac) throws HyracksException {
    JobRun jobRun = scheduler.getJobRun();
    Map<ActivityId, ActivityPartitionDetails> pcMap = computePartitionCounts(ac);

    Map<ActivityId, ActivityPlan> activityPlanMap = buildActivityPlanMap(ac, jobRun, pcMap);

    assignConnectorPolicy(ac, activityPlanMap);

    TaskCluster[] taskClusters = computeTaskClusters(ac, jobRun, activityPlanMap);

    if (LOGGER.isLoggable(Level.INFO)) {
      LOGGER.info("Plan for " + ac);
      LOGGER.info("Built " + taskClusters.length + " Task Clusters");
      for (TaskCluster tc : taskClusters) {
        LOGGER.info("Tasks: " + Arrays.toString(tc.getTasks()));
      }
    }

    return new ActivityClusterPlan(taskClusters, activityPlanMap);
  }
Beispiel #15
0
  @PostConstruct
  public void init() {
    logger.info("Initializing NEMA Flow Service...");

    notificationCreator = new JobStatusNotificationCreator(daoFactory);
    try {
      headServer =
          meandreServerProxyFactory.getServerProxyInstance(flowServiceConfig.getHeadConfig(), true);
    } catch (MeandreServerException e) {
      throw new RuntimeException("Could not instantiate head server.", e);
    }
    flowServiceConfig.addChangeListener(this);

    // Any jobs marked as scheduled in the database will be put back in the
    // queue for execution.
    JobDao jobDao = daoFactory.getJobDao();
    List<Job> scheduledJobs;
    try {
      Session session = jobDao.getSessionFactory().openSession();
      jobDao.startManagedSession(session);
      scheduledJobs = jobDao.getJobsByStatus(Job.JobStatus.SCHEDULED);
      jobDao.endManagedSession();
      session.close();
    } catch (HibernateException e) {
      throw new RuntimeException("Problem searching for scheduled jobs" + " in the database.", e);
    }

    if (scheduledJobs != null && scheduledJobs.size() > 0) {
      logger.info(
          scheduledJobs.size()
              + " scheduled jobs found in the "
              + "database. Jobs will be rescheduled for execution.");
    }
    for (Job job : scheduledJobs) {
      jobScheduler.scheduleJob(job);
      jobStatusMonitor.start(job, notificationCreator);
    }
  }
Beispiel #16
0
  /** @see ConfigChangeListener#configChanged() */
  @Override
  public void configChanged() {
    logger.info("Received configuration change notification.");

    if (!headServer.getConfig().equals(flowServiceConfig.getHeadConfig())) {
      MeandreServerProxy newHead;
      try {
        newHead =
            meandreServerProxyFactory.getServerProxyInstance(
                flowServiceConfig.getHeadConfig(), true);
      } catch (MeandreServerException e) {
        throw new RuntimeException("Could not instantiate head server.", e);
      }
      meandreServerProxyFactory.release(headServer);
      headServer = newHead;
      logger.info(
          "Head server configuration has changed. New head "
              + "server is "
              + headServer.toString());
    } else {
      logger.info("Head server has not changed.");
    }
    jobScheduler.setWorkerConfigs(flowServiceConfig.getWorkerConfigs());
  }
 private void executePhase(ExecutionContext context, Set<ExecutionScript> executions)
     throws InterruptedException, IOException {
   assert context != null;
   assert executions != null;
   YSLOG.info(
       "I03000",
       context.getBatchId(),
       context.getFlowId(),
       context.getExecutionId(),
       context.getPhase());
   long start = System.currentTimeMillis();
   try {
     if (skipFlows.contains(context.getFlowId())) {
       YSLOG.info(
           "I03002",
           context.getBatchId(),
           context.getFlowId(),
           context.getExecutionId(),
           context.getPhase());
       return;
     }
     List<? extends Job> jobs;
     ErrorHandler handler;
     switch (context.getPhase()) {
       case SETUP:
         jobs = buildSetupJobs(context);
         handler = JobScheduler.STRICT;
         break;
       case CLEANUP:
         jobs = buildCleanupJobs(context);
         handler = JobScheduler.BEST_EFFORT;
         break;
       case FINALIZE:
         jobs = buildExecutionJobs(context, executions);
         handler = JobScheduler.BEST_EFFORT;
         break;
       default:
         jobs = buildExecutionJobs(context, executions);
         handler = JobScheduler.STRICT;
         break;
     }
     PhaseMonitor monitor = obtainPhaseMonitor(context);
     try {
       scheduler.execute(monitor, context, jobs, handler);
     } finally {
       monitor.close();
     }
     YSLOG.info(
         "I03001",
         context.getBatchId(),
         context.getFlowId(),
         context.getExecutionId(),
         context.getPhase());
   } catch (IOException e) {
     YSLOG.error(
         e,
         "E03001",
         context.getBatchId(),
         context.getFlowId(),
         context.getExecutionId(),
         context.getPhase());
     throw e;
   } catch (InterruptedException e) {
     YSLOG.warn(
         e,
         "W03001",
         context.getBatchId(),
         context.getFlowId(),
         context.getExecutionId(),
         context.getPhase());
     throw e;
   } finally {
     long end = System.currentTimeMillis();
     YSLOG.info(
         "I03999",
         context.getBatchId(),
         context.getFlowId(),
         context.getExecutionId(),
         context.getPhase(),
         end - start);
   }
 }
 private Map<ActivityId, ActivityPartitionDetails> computePartitionCounts(ActivityCluster ac)
     throws HyracksException {
   PartitionConstraintSolver solver = scheduler.getSolver();
   Set<LValueConstraintExpression> lValues = new HashSet<LValueConstraintExpression>();
   for (ActivityId anId : ac.getActivityMap().keySet()) {
     lValues.add(new PartitionCountExpression(anId.getOperatorDescriptorId()));
   }
   solver.solve(lValues);
   Map<OperatorDescriptorId, Integer> nPartMap = new HashMap<OperatorDescriptorId, Integer>();
   for (LValueConstraintExpression lv : lValues) {
     Object value = solver.getValue(lv);
     if (value == null) {
       throw new HyracksException("No value found for " + lv);
     }
     if (!(value instanceof Number)) {
       throw new HyracksException(
           "Unexpected type of value bound to "
               + lv
               + ": "
               + value.getClass()
               + "("
               + value
               + ")");
     }
     int nParts = ((Number) value).intValue();
     if (nParts <= 0) {
       throw new HyracksException("Unsatisfiable number of partitions for " + lv + ": " + nParts);
     }
     nPartMap.put(
         ((PartitionCountExpression) lv).getOperatorDescriptorId(), Integer.valueOf(nParts));
   }
   Map<ActivityId, ActivityPartitionDetails> activityPartsMap =
       new HashMap<ActivityId, ActivityPartitionDetails>();
   for (ActivityId anId : ac.getActivityMap().keySet()) {
     int nParts = nPartMap.get(anId.getOperatorDescriptorId());
     int[] nInputPartitions = null;
     List<IConnectorDescriptor> inputs = ac.getActivityInputMap().get(anId);
     if (inputs != null) {
       nInputPartitions = new int[inputs.size()];
       for (int i = 0; i < nInputPartitions.length; ++i) {
         ConnectorDescriptorId cdId = inputs.get(i).getConnectorId();
         ActivityId aid = ac.getProducerActivity(cdId);
         Integer nPartInt = nPartMap.get(aid.getOperatorDescriptorId());
         nInputPartitions[i] = nPartInt;
       }
     }
     int[] nOutputPartitions = null;
     List<IConnectorDescriptor> outputs = ac.getActivityOutputMap().get(anId);
     if (outputs != null) {
       nOutputPartitions = new int[outputs.size()];
       for (int i = 0; i < nOutputPartitions.length; ++i) {
         ConnectorDescriptorId cdId = outputs.get(i).getConnectorId();
         ActivityId aid = ac.getConsumerActivity(cdId);
         Integer nPartInt = nPartMap.get(aid.getOperatorDescriptorId());
         nOutputPartitions[i] = nPartInt;
       }
     }
     ActivityPartitionDetails apd =
         new ActivityPartitionDetails(nParts, nInputPartitions, nOutputPartitions);
     activityPartsMap.put(anId, apd);
   }
   return activityPartsMap;
 }
  public void start()
      throws DatabaseInitException, BimserverDatabaseException, PluginException,
          DatabaseRestartRequiredException, ServerException {
    try {
      LOGGER.debug("Starting BIMserver");
      SVersion localVersion = versionChecker.getLocalVersion();
      if (localVersion != null) {
        LOGGER.info(
            "Version: "
                + localVersion.getMajor()
                + "."
                + localVersion.getMinor()
                + "."
                + localVersion.getRevision()
                + " - "
                + localVersion.getDate());
      } else {
        LOGGER.info("Unknown version");
      }

      try {
        pluginManager.addPluginChangeListener(
            new PluginChangeListener() {
              @Override
              public void pluginStateChanged(PluginContext pluginContext, boolean enabled) {
                // Reflect this change also in the database
                Condition pluginCondition =
                    new AttributeCondition(
                        StorePackage.eINSTANCE.getPluginDescriptor_PluginClassName(),
                        new StringLiteral(pluginContext.getPlugin().getClass().getName()));
                DatabaseSession session = bimDatabase.createSession();
                try {
                  Map<Long, PluginDescriptor> pluginsFound =
                      session.query(pluginCondition, PluginDescriptor.class, Query.getDefault());
                  if (pluginsFound.size() == 0) {
                    LOGGER.error(
                        "Error changing plugin-state in database, plugin "
                            + pluginContext.getPlugin().getClass().getName()
                            + " not found");
                  } else if (pluginsFound.size() == 1) {
                    PluginDescriptor pluginConfiguration = pluginsFound.values().iterator().next();
                    pluginConfiguration.setEnabled(pluginContext.isEnabled());
                    session.store(pluginConfiguration);
                  } else {
                    LOGGER.error(
                        "Error, too many plugin-objects found in database for name "
                            + pluginContext.getPlugin().getClass().getName());
                  }
                  session.commit();
                } catch (BimserverDatabaseException e) {
                  LOGGER.error("", e);
                } catch (ServiceException e) {
                  LOGGER.error("", e);
                } finally {
                  session.close();
                }
              }
            });
        pluginManager.loadPlugin(
            ObjectIDMPlugin.class,
            new File(".").toURI(),
            "Internal",
            new SchemaFieldObjectIDMPlugin(),
            getClass().getClassLoader(),
            PluginSourceType.INTERNAL,
            null);
      } catch (Exception e) {
        LOGGER.error("", e);
      }

      try {
        metaDataManager.init();
        pluginManager.initAllLoadedPlugins();
      } catch (PluginException e) {
        LOGGER.error("", e);
      }
      serverStartTime = new GregorianCalendar();

      longActionManager = new LongActionManager();

      Set<EPackage> packages = new LinkedHashSet<>();
      packages.add(Ifc2x3tc1Package.eINSTANCE);
      packages.add(Ifc4Package.eINSTANCE);
      templateEngine = new TemplateEngine();
      templateEngine.init(config.getResourceFetcher().getResource("templates/"));
      Path databaseDir = config.getHomeDir().resolve("database");
      BerkeleyKeyValueStore keyValueStore = new BerkeleyKeyValueStore(databaseDir);

      schemaConverterManager.registerConverter(new Ifc2x3tc1ToIfc4SchemaConverterFactory());
      schemaConverterManager.registerConverter(new Ifc4ToIfc2x3tc1SchemaConverterFactory());

      metricsRegistry = new MetricsRegistry();

      Query.setPackageMetaDataForDefaultQuery(metaDataManager.getPackageMetaData("store"));

      bimDatabase = new Database(this, packages, keyValueStore, metaDataManager);
      try {
        bimDatabase.init();
      } catch (DatabaseRestartRequiredException e) {
        bimDatabase.close();
        keyValueStore = new BerkeleyKeyValueStore(databaseDir);
        bimDatabase = new Database(this, packages, keyValueStore, metaDataManager);
        try {
          bimDatabase.init();
        } catch (InconsistentModelsException e1) {
          LOGGER.error("", e);
          serverInfoManager.setServerState(ServerState.FATAL_ERROR);
          serverInfoManager.setErrorMessage("Inconsistent models");
        }
      } catch (InconsistentModelsException e) {
        LOGGER.error("", e);
        serverInfoManager.setServerState(ServerState.FATAL_ERROR);
        serverInfoManager.setErrorMessage("Inconsistent models");
      }

      DatabaseSession encsession = bimDatabase.createSession();
      try {
        byte[] encryptionkeyBytes = null;
        if (!bimDatabase.getRegistry().has(ENCRYPTIONKEY, encsession)) {
          encryptionkeyBytes = new byte[16];
          new SecureRandom().nextBytes(encryptionkeyBytes);
          bimDatabase.getRegistry().save(ENCRYPTIONKEY, encryptionkeyBytes, encsession);
          encsession.commit();
        } else {
          encryptionkeyBytes = bimDatabase.getRegistry().readByteArray(ENCRYPTIONKEY, encsession);
        }
        encryptionkey = new SecretKeySpec(encryptionkeyBytes, "AES");
      } finally {
        encsession.close();
      }

      protocolBuffersMetaData = new ProtocolBuffersMetaData();
      protocolBuffersMetaData.load(servicesMap, ProtocolBuffersBimServerClientFactory.class);

      serverInfoManager.init(this);

      webModuleManager = new WebModuleManager(this);

      jsonHandler = new JsonHandler(this);

      serializerFactory = new SerializerFactory();
      deserializerFactory = new DeserializerFactory();

      serverSettingsCache = new ServerSettingsCache(bimDatabase);

      serverInfoManager.update();

      if (serverInfoManager.getServerState() == ServerState.MIGRATION_REQUIRED) {
        serverInfoManager.registerStateChangeListener(
            new StateChangeListener() {
              @Override
              public void stateChanged(ServerState oldState, ServerState newState) {
                if (oldState == ServerState.MIGRATION_REQUIRED && newState == ServerState.RUNNING) {
                  try {
                    initDatabaseDependantItems();
                  } catch (BimserverDatabaseException e) {
                    LOGGER.error("", e);
                  }
                }
              }
            });
      } else {
        initDatabaseDependantItems();
      }

      mailSystem = new MailSystem(this);

      diskCacheManager = new DiskCacheManager(this, config.getHomeDir().resolve("cache"));

      mergerFactory = new MergerFactory(this);

      FileBasedReflectorFactoryBuilder factoryBuilder = new FileBasedReflectorFactoryBuilder();
      reflectorFactory = factoryBuilder.newReflectorFactory();
      if (reflectorFactory == null) {
        throw new RuntimeException("No reflector factory!");
      }
      servicesMap.setReflectorFactory(reflectorFactory);

      bimScheduler = new JobScheduler(this);
      bimScheduler.start();

      if (config.isStartEmbeddedWebServer()) {
        embeddedWebServer.start();
      }

      if (config.isStartCommandLine()) {
        commandLine = new CommandLine(this);
        commandLine.start();
      }
    } catch (Throwable e) {
      LOGGER.error("", e);
      serverInfoManager.setErrorMessage(e.getMessage());
    }
  }
 public void slaveDied(int id) {
   managers[id] = null;
   scheduler.nodeDied(id);
 }
  private TaskCluster[] buildConnectorPolicyAwareTaskClusters(
      ActivityCluster ac,
      Map<ActivityId, ActivityPlan> activityPlanMap,
      Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
    Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<TaskId, Set<TaskId>>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
      ActivityPlan ap = activityPlanMap.get(anId);
      Task[] tasks = ap.getTasks();
      for (Task t : tasks) {
        Set<TaskId> cluster = new HashSet<TaskId>();
        TaskId tid = t.getTaskId();
        cluster.add(tid);
        taskClusterMap.put(tid, cluster);
      }
    }

    JobRun jobRun = scheduler.getJobRun();
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
    for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e :
        taskConnectivity.entrySet()) {
      Set<TaskId> cluster = taskClusterMap.get(e.getKey());
      for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
        IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
        if (cPolicy.requiresProducerConsumerCoscheduling()) {
          cluster.add(p.getLeft());
        }
      }
    }

    /*
     * taskClusterMap contains for every TID x, x -> { coscheduled consumer TIDs U x }
     * We compute the transitive closure of this relation to find the largest set of
     * tasks that need to be co-scheduled
     */
    int counter = 0;
    TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
    Map<TaskId, Integer> ordinalMap = new HashMap<TaskId, Integer>();
    for (TaskId tid : taskClusterMap.keySet()) {
      ordinalList[counter] = tid;
      ordinalMap.put(tid, counter);
      ++counter;
    }

    int n = ordinalList.length;
    BitSet[] paths = new BitSet[n];
    for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
      int i = ordinalMap.get(e.getKey());
      BitSet bsi = paths[i];
      if (bsi == null) {
        bsi = new BitSet(n);
        paths[i] = bsi;
      }
      for (TaskId ttid : e.getValue()) {
        int j = ordinalMap.get(ttid);
        paths[i].set(j);
        BitSet bsj = paths[j];
        if (bsj == null) {
          bsj = new BitSet(n);
          paths[j] = bsj;
        }
        bsj.set(i);
      }
    }
    for (int k = 0; k < n; ++k) {
      for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
        for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
          paths[i].set(j, paths[k].get(j));
          paths[j].set(i, paths[i].get(j));
        }
      }
    }
    BitSet pending = new BitSet(n);
    pending.set(0, n);
    List<List<TaskId>> clusters = new ArrayList<List<TaskId>>();
    for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
      List<TaskId> cluster = new ArrayList<TaskId>();
      for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
        cluster.add(ordinalList[j]);
        pending.clear(j);
      }
      clusters.add(cluster);
    }

    List<TaskCluster> tcSet = new ArrayList<TaskCluster>();
    counter = 0;
    for (List<TaskId> cluster : clusters) {
      List<Task> taskStates = new ArrayList<Task>();
      for (TaskId tid : cluster) {
        taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
      }
      TaskCluster tc =
          new TaskCluster(
              new TaskClusterId(ac.getId(), counter++),
              ac,
              taskStates.toArray(new Task[taskStates.size()]));
      tcSet.add(tc);
      for (TaskId tid : cluster) {
        activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
      }
    }
    TaskCluster[] taskClusters = tcSet.toArray(new TaskCluster[tcSet.size()]);
    return taskClusters;
  }
 @Override
 public String getCompletedProgramsList() throws RemoteException {
   return scheduler.getCompletedProgramsList();
 }
Beispiel #23
0
 /** @see FlowService#getWorkerStatus() */
 @Override
 public Map<MeandreServerProxyConfig, MeandreServerProxyStatus> getWorkerStatus() {
   return jobScheduler.getWorkerStatus();
 }
Beispiel #24
0
 /** @see FlowService#getScheduledJobs() */
 @Override
 public List<Job> getScheduledJobs() {
   return jobScheduler.getScheduledJobs();
 }
 @Override
 public void stopProgram(String classname, String filename) throws RemoteException {
   scheduler.stopProgram(classname, filename);
 }