/** * Starts monitoring hibernate by attaching a statistics mbean to the entity manager injected by * ejb3. * * @throws RuntimeException */ private void startHibernateStatistics() throws RuntimeException { log.info("Starting hibernate statistics monitoring..."); try { systemManager.enableHibernateStatistics(); } catch (Exception e) { throw new RuntimeException("Cannot start hibernate statistics monitoring!", e); } }
@TransactionAttribute(TransactionAttributeType.NEVER) public void calculateAutoBaselines() { Properties conf = systemManager.getSystemConfiguration(subjectManager.getOverlord()); // frequency is how often the baselines are recalculated // data set is how far back for a particular scheduled measurement is included in the baseline // calcs // frequency of 3 days and data set of 10 days means "every 3 days, recalculate baselines // automatically. // For each scheduled measurement, take their last 10 days worth of data and use that data set // as the portion that will be used to get the min/max/average". String baselineFrequencyString = conf.getProperty(RHQConstants.BaselineFrequency); String baselineDataSetString = conf.getProperty(RHQConstants.BaselineDataSet); log.debug( "Found baseline defaults: " + "frequency=" + baselineFrequencyString + " dataset=" + baselineDataSetString); // Its time to auto-calculate the baselines again. // Determine how much data we need to calculate baselines for by determining the oldest and // youngest // measurement data to include in the calculations. long amountOfData = Long.parseLong(baselineDataSetString); long baselinesOlderThanTime = System.currentTimeMillis() - Long.parseLong(baselineFrequencyString); measurementBaselineManager.calculateAutoBaselines(amountOfData, baselinesOlderThanTime); // everything was calculated successfully, remember this time conf = systemManager.getSystemConfiguration( subjectManager .getOverlord()); // reload the config in case it was changed since we started try { systemManager.setSystemConfiguration(subjectManager.getOverlord(), conf, true); } catch (Exception e) { log.error( "Failed to remember the time when we just calc'ed baselines - it may recalculate again soon.", e); } }
/** Returns true if LDAP authentication is enabled, or false otherwise. */ public Boolean checkLdapConfiguredStatus() throws RuntimeException { try { SystemSettings systemSettings = systemManager.getUnmaskedSystemSettings(true); String value = systemSettings.get(SystemSetting.LDAP_BASED_JAAS_PROVIDER); boolean result = (value != null) ? Boolean.valueOf(value) : false; return result; } catch (Throwable t) { throw getExceptionToThrowToClient(t); } }
private void logServerStartedMessage() { Subject overlord = subjectManager.getOverlord(); ProductInfo productInfo = systemManager.getProductInfo(overlord); log.info("--------------------------------------------------"); // 50 dashes log.info( productInfo.getFullName() + " " + productInfo.getVersion() + " (build " + productInfo.getBuildNumber() + ") Server started."); log.info("--------------------------------------------------"); // 50 dashes }
/** * This will make sure all jobs that need to periodically run are scheduled. * * @throws RuntimeException if unable to schedule a job */ private void scheduleJobs() throws RuntimeException { log.info("Scheduling asynchronous jobs..."); /* * All jobs need to be set as non-volatile since a volatile job in a clustered environment is effectively * non-volatile; */ // TODO [mazz]: make all of the intervals here configurable via something like SystemManagerBean serverManager.scheduleServerHeartbeat(); cacheConsistencyManager.scheduleServerCacheReloader(); systemManager.scheduleConfigCacheReloader(); try { // Do not check until we are up at least 1 min, and every minute thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60; schedulerBean.scheduleSimpleRepeatingJob( SavedSearchResultCountRecalculationJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule asynchronous resource deletion job.", e); } try { // Do not check until we are up at least 1 min, and every 5 minutes thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60 * 5; schedulerBean.scheduleSimpleRepeatingJob( AsyncResourceDeleteJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule asynchronous resource deletion job.", e); } try { // Do not check until we are up at least 1 min, and every 5 minutes thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60 * 5; schedulerBean.scheduleSimpleRepeatingJob( PurgeResourceTypesJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule purge resource types job.", e); } try { // Do not check until we are up at least 1 min, and every 5 minutes thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60 * 5; schedulerBean.scheduleSimpleRepeatingJob( PurgePluginsJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule purge plugins job.", e); } // DynaGroup Auto-Recalculation Job try { // Do not check until we are up at least 1 min, and every minute thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60; schedulerBean.scheduleSimpleRepeatingJob( DynaGroupAutoRecalculationJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule DynaGroup auto-recalculation job.", e); } // Cluster Manager Job try { String oldJobName = "org.rhq.enterprise.server.scheduler.jobs.ClusterManagerJob"; boolean foundAndDeleted = schedulerBean.deleteJob(oldJobName, oldJobName); if (foundAndDeleted) { log.info("Unscheduling deprecated job references for " + oldJobName + "..."); } else { log.debug("No deprecated job references found for " + oldJobName + "."); } // Wait long enough to allow the Server instance jobs to start executing first. final long initialDelay = 1000L * 60 * 2; // 2 mins final long interval = 1000L * 30; // 30 secs schedulerBean.scheduleSimpleRepeatingJob( CloudManagerJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule cloud management job.", e); } // Suspected Agents Job try { // Do not check until we are up at least 10 mins, but check every 60 secs thereafter. final long initialDelay = 1000L * 60 * 10; // 10 mins final long interval = 1000L * 60; // 60 secs schedulerBean.scheduleSimpleRepeatingJob( CheckForSuspectedAgentsJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule suspected Agents job.", e); } // Timed Out Operations Job try { final long initialDelay = 1000L * 60 * 3; // 3 min final long interval = 1000L * 60 * 10; // 10 minutes schedulerBean.scheduleSimpleRepeatingJob( CheckForTimedOutOperationsJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule check-for-timed-out-operations job.", e); } // Timed Out Resource Configuration Update Requests Job // (NOTE: We don't need to check for timed out plugin Cofiguration updates, since those are // executed synchronously.) try { final long initialDelay = 1000L * 60 * 4; // 4 mins final long interval = 1000L * 60 * 10; // 10 mins schedulerBean.scheduleSimpleRepeatingJob( CheckForTimedOutConfigUpdatesJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule check-for-timed-out-configuration-update-requests job.", e); } // Timed Out Content Requests Job try { final long initialDelay = 1000L * 60 * 5; // 5 mins final long interval = 1000L * 60 * 15; // 15 mins schedulerBean.scheduleSimpleRepeatingJob( CheckForTimedOutContentRequestsJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule check-for-timed-out-artifact-requests job.", e); } // Data Purge Job try { // TODO [mazz]: make the data purge job's cron string configurable via SystemManagerBean // For Quartz cron syntax, see: // http://www.quartz-scheduler.org/documentation/quartz-2.1.x/tutorials/crontrigger String cronString = "0 0 * * * ?"; // every hour, on the hour schedulerBean.scheduleSimpleCronJob(DataPurgeJob.class, true, false, cronString); } catch (Exception e) { log.error("Cannot schedule data purge job.", e); } // Server Plugin Jobs try { ServerPluginServiceMBean mbean = LookupUtil.getServerPluginService(); MasterServerPluginContainer masterPC = mbean.getMasterPluginContainer(); masterPC.scheduleAllPluginJobs(); } catch (Exception e) { log.error("Cannot schedule server plugin jobs.", e); } // Alerting Availability Duration Job (create only, nothing actually scheduled here) try { schedulerBean.scheduleTriggeredJob(AlertAvailabilityDurationJob.class, false, null); } catch (Exception e) { log.error("Cannot create alert availability duration job.", e); } return; }