@Override @RequiredPermission(Permission.MANAGE_SETTINGS) public ServerDetails getServerDetails(Subject subject) { CoreServerMBean coreServerMBean = LookupUtil.getCoreServer(); ServerDetails serverDetails = new ServerDetails(); serverDetails.setProductInfo(getProductInfo(subject)); HashMap<Detail, String> details = serverDetails.getDetails(); DateFormat localTimeFormatter = DateFormat.getDateTimeInstance(DateFormat.LONG, DateFormat.FULL); details.put(ServerDetails.Detail.SERVER_LOCAL_TIME, localTimeFormatter.format(new Date())); details.put(ServerDetails.Detail.SERVER_TIMEZONE, TimeZone.getDefault().getDisplayName()); details.put( ServerDetails.Detail.SERVER_HOME_DIR, coreServerMBean.getJBossServerHomeDir().getAbsolutePath()); details.put( ServerDetails.Detail.SERVER_INSTALL_DIR, coreServerMBean.getInstallDir().getAbsolutePath()); SystemDatabaseInformation dbInfo = SystemDatabaseInformation.getInstance(); details.put(ServerDetails.Detail.DATABASE_CONNECTION_URL, dbInfo.getDatabaseConnectionURL()); details.put(ServerDetails.Detail.DATABASE_DRIVER_NAME, dbInfo.getDatabaseDriverName()); details.put(ServerDetails.Detail.DATABASE_DRIVER_VERSION, dbInfo.getDatabaseDriverVersion()); details.put(ServerDetails.Detail.DATABASE_PRODUCT_NAME, dbInfo.getDatabaseProductName()); details.put(ServerDetails.Detail.DATABASE_PRODUCT_VERSION, dbInfo.getDatabaseProductVersion()); details.put(ServerDetails.Detail.SERVER_IDENTITY, serverManager.getServer().getName()); return serverDetails; }
public void markStaleServersDown(Subject subject) { if (!authorizationManager.isOverlord(subject)) { throw new IllegalArgumentException( "The markStaleServersDown method must be called by the overlord"); } long staleTime = System.currentTimeMillis() - SERVER_DOWN_INTERVAL; String serverName = null; try { serverName = serverManager.getIdentity(); if (log.isDebugEnabled()) { log.debug(serverName + " is marking stale servers DOWN"); } } catch (Exception e) { log.error("Could not determine which instance is marking stale servers DOWN"); } Query query = entityManager.createNamedQuery(Server.QUERY_UPDATE_SET_STALE_DOWN); query.setParameter("downMode", Server.OperationMode.DOWN); query.setParameter("normalMode", Server.OperationMode.NORMAL); query.setParameter("staleTime", staleTime); query.setParameter("thisServerName", serverName); // might be null int resultCount = query.executeUpdate(); if (log.isDebugEnabled()) { log.debug(String.valueOf(resultCount) + " stale servers were marked DOWN"); } // Perform requested partition events. Note that we only need to execute one cloud partition // regardless of the number of pending requests, as the work would be duplicated. partitionEventManager.processRequestedPartitionEvents(); }
private void initializeServer() { // Ensure the class is loaded and the dbType is set for our current db Connection conn = null; try { conn = dataSource.getConnection(); DatabaseTypeFactory.setDefaultDatabaseType(DatabaseTypeFactory.getDatabaseType(conn)); } catch (Exception e) { log.error("Could not initialize server.", e); } finally { if (conn != null) { try { conn.close(); } catch (Exception e) { log.error("Failed to close temporary connection used for server initialization.", e); } } } // Ensure that this server is registered in the database. createDefaultServerIfNecessary(); // immediately put the server into MM if configured to do so if (ServerCommunicationsServiceUtil.getService().getMaintenanceModeAtStartup()) { log.info("Server is configured to start up in MAINTENANCE mode."); Server server = serverManager.getServer(); Integer[] serverId = new Integer[] {server.getId()}; topologyManager.updateServerMode( LookupUtil.getSubjectManager().getOverlord(), serverId, OperationMode.MAINTENANCE); } // Establish the current server mode for the server. This will move the server to NORMAL // mode from DOWN if necessary. This can also affect comm layer behavior. serverManager.establishCurrentServerMode(); if ("true".equals(System.getProperty("rhq.sync.endpoint-address", "false"))) { try { serverManager.syncEndpointAddress(); } catch (SyncEndpointAddressException e) { log.error("Failed to sync server endpoint address.", e); } } }
/** * For developer builds that don't use the HA installer to write a localhost entry into the {@link * Server} table, we will create a default one here. Then, if the "rhq.high-availability.name" * property is missing, the {@link ServerManagerLocal} will return this localhost entry. * * <p>If the installer was already run, then this method should be a no-op because a row would * already exist in the {@link Server} table */ private void createDefaultServerIfNecessary() { String identity = serverManager.getIdentity(); Server server = topologyManager.getServerByName(identity); if (server == null) { server = new Server(); server.setName(identity); String address; try { address = InetAddress.getLocalHost().getCanonicalHostName(); } catch (UnknownHostException e) { address = "localhost"; } server.setAddress(address); server.setPort(7080); server.setSecurePort(7443); server.setComputePower(1); server.setOperationMode(Server.OperationMode.INSTALLED); serverManager.create(server); log.info("Default HA server created: " + server); } }
/** * Starts the embedded agent, but only if the embedded agent is installed and it is enabled. * * @throws RuntimeException if the agent is installed and enabled but failed to start * @deprecated we don't have an embedded agent anymore, leaving this in case we resurrect it */ private void startEmbeddedAgent() throws RuntimeException { // we can't use EmbeddedAgentBootstrapServiceMBean because if the embedded agent // isn't installed, that class will not be available; we must use JMX API final ObjectName agentBootstrapMBean = ObjectNameFactory.create("rhq:service=EmbeddedAgentBootstrap"); final String agentEnabledAttribute = "AgentEnabled"; final String startAgentMethod = "startAgent"; final String configurationOverridesAttribute = "ConfigurationOverrides"; final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { // this will fail if the embedded agent isn't installed String enabled = (String) mbs.getAttribute(agentBootstrapMBean, agentEnabledAttribute); // if we got this far, the embedded agent is at least installed // now check to see if its enabled - if so start it; any startup exceptions now are thrown try { if (Boolean.valueOf(enabled)) { log.info("The embedded Agent is installed and enabled - it will now be started..."); // NOTE: we cannot directly import AgentConfigurationConstants, so we hardcode the // actual constant values here - need to keep an eye on these in the unlikely event // the constant values change. String AgentConfigurationConstants_SERVER_TRANSPORT = "rhq.agent.server.transport"; String AgentConfigurationConstants_SERVER_BIND_ADDRESS = "rhq.agent.server.bind-address"; String AgentConfigurationConstants_SERVER_BIND_PORT = "rhq.agent.server.bind-port"; // Get the configuration overrides as set in the configuration file. // If the agent's bind address isn't overridden with a non-empty value, // then we need to get the Server bind address and use it for the agent's bind address. // If the agent's server endpoint address/port are empty, we again use the values // appropriate for the Server this agent is embedded in. // Note that we don't look for the values in persisted preferences - we assume they // are always present in the configuration overrides (which they should always be); Properties overrides; String serverTransport; String serverAddress; String serverPort; String agentAddress; overrides = (Properties) mbs.getAttribute(agentBootstrapMBean, configurationOverridesAttribute); serverTransport = overrides.getProperty(AgentConfigurationConstants_SERVER_TRANSPORT); serverAddress = overrides.getProperty(AgentConfigurationConstants_SERVER_BIND_ADDRESS); serverPort = overrides.getProperty(AgentConfigurationConstants_SERVER_BIND_PORT); agentAddress = overrides.getProperty(ServiceContainerConfigurationConstants.CONNECTOR_BIND_ADDRESS); Server server = serverManager.getServer(); if (agentAddress == null || agentAddress.trim().equals("")) { overrides.setProperty( ServiceContainerConfigurationConstants.CONNECTOR_BIND_ADDRESS, server.getAddress()); } if (serverAddress == null || serverAddress.trim().equals("")) { overrides.setProperty( AgentConfigurationConstants_SERVER_BIND_ADDRESS, server.getAddress()); } if (serverPort == null || serverPort.trim().equals("")) { if (SecurityUtil.isTransportSecure(serverTransport)) { overrides.setProperty( AgentConfigurationConstants_SERVER_BIND_PORT, Integer.toString(server.getSecurePort())); } else { overrides.setProperty( AgentConfigurationConstants_SERVER_BIND_PORT, Integer.toString(server.getPort())); } } mbs.setAttribute( agentBootstrapMBean, new Attribute(configurationOverridesAttribute, overrides)); // We need to do the agent startup in a separate thread so we do not hang // this startup servlet. JBossAS 4.2 will not begin accepting HTTP requests // until this startup servlet has finished (this is different from JBossAS 4.0). // The agent needs to submit an HTTP request in order to complete its startup // (it needs to register with the server). // The side effect of this is the RHQ Server will still start even if the embedded // agent fails to start - this may not be a bad thing. We probably do not want // the entire RHQ Server to go down if its agent fails to start. Runnable agentStartRunnable = new Runnable() { public void run() { // this returns only when the agent has started and is registered (sends HTTP // request) try { mbs.invoke(agentBootstrapMBean, startAgentMethod, new Object[0], new String[0]); } catch (Throwable t) { log.error("Failed to start the embedded Agent - it will not be available!", t); } } }; Thread agentStartThread = new Thread(agentStartRunnable, "Embedded Agent Startup"); agentStartThread.setDaemon(true); agentStartThread.start(); } else { log.debug("The embedded Agent is not enabled, so it will not be started."); } } catch (Throwable t) { throw new RuntimeException("Failed to start the embedded Agent.", t); } } catch (RuntimeException se) { throw se; } catch (Throwable t) { log.info("The embedded Agent is not installed, so it will not be started (" + t + ")."); } return; }
/** * This will make sure all jobs that need to periodically run are scheduled. * * @throws RuntimeException if unable to schedule a job */ private void scheduleJobs() throws RuntimeException { log.info("Scheduling asynchronous jobs..."); /* * All jobs need to be set as non-volatile since a volatile job in a clustered environment is effectively * non-volatile; */ // TODO [mazz]: make all of the intervals here configurable via something like SystemManagerBean serverManager.scheduleServerHeartbeat(); cacheConsistencyManager.scheduleServerCacheReloader(); systemManager.scheduleConfigCacheReloader(); try { // Do not check until we are up at least 1 min, and every minute thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60; schedulerBean.scheduleSimpleRepeatingJob( SavedSearchResultCountRecalculationJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule asynchronous resource deletion job.", e); } try { // Do not check until we are up at least 1 min, and every 5 minutes thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60 * 5; schedulerBean.scheduleSimpleRepeatingJob( AsyncResourceDeleteJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule asynchronous resource deletion job.", e); } try { // Do not check until we are up at least 1 min, and every 5 minutes thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60 * 5; schedulerBean.scheduleSimpleRepeatingJob( PurgeResourceTypesJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule purge resource types job.", e); } try { // Do not check until we are up at least 1 min, and every 5 minutes thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60 * 5; schedulerBean.scheduleSimpleRepeatingJob( PurgePluginsJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule purge plugins job.", e); } // DynaGroup Auto-Recalculation Job try { // Do not check until we are up at least 1 min, and every minute thereafter. final long initialDelay = 1000L * 60; final long interval = 1000L * 60; schedulerBean.scheduleSimpleRepeatingJob( DynaGroupAutoRecalculationJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule DynaGroup auto-recalculation job.", e); } // Cluster Manager Job try { String oldJobName = "org.rhq.enterprise.server.scheduler.jobs.ClusterManagerJob"; boolean foundAndDeleted = schedulerBean.deleteJob(oldJobName, oldJobName); if (foundAndDeleted) { log.info("Unscheduling deprecated job references for " + oldJobName + "..."); } else { log.debug("No deprecated job references found for " + oldJobName + "."); } // Wait long enough to allow the Server instance jobs to start executing first. final long initialDelay = 1000L * 60 * 2; // 2 mins final long interval = 1000L * 30; // 30 secs schedulerBean.scheduleSimpleRepeatingJob( CloudManagerJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule cloud management job.", e); } // Suspected Agents Job try { // Do not check until we are up at least 10 mins, but check every 60 secs thereafter. final long initialDelay = 1000L * 60 * 10; // 10 mins final long interval = 1000L * 60; // 60 secs schedulerBean.scheduleSimpleRepeatingJob( CheckForSuspectedAgentsJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule suspected Agents job.", e); } // Timed Out Operations Job try { final long initialDelay = 1000L * 60 * 3; // 3 min final long interval = 1000L * 60 * 10; // 10 minutes schedulerBean.scheduleSimpleRepeatingJob( CheckForTimedOutOperationsJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule check-for-timed-out-operations job.", e); } // Timed Out Resource Configuration Update Requests Job // (NOTE: We don't need to check for timed out plugin Cofiguration updates, since those are // executed synchronously.) try { final long initialDelay = 1000L * 60 * 4; // 4 mins final long interval = 1000L * 60 * 10; // 10 mins schedulerBean.scheduleSimpleRepeatingJob( CheckForTimedOutConfigUpdatesJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule check-for-timed-out-configuration-update-requests job.", e); } // Timed Out Content Requests Job try { final long initialDelay = 1000L * 60 * 5; // 5 mins final long interval = 1000L * 60 * 15; // 15 mins schedulerBean.scheduleSimpleRepeatingJob( CheckForTimedOutContentRequestsJob.class, true, false, initialDelay, interval); } catch (Exception e) { log.error("Cannot schedule check-for-timed-out-artifact-requests job.", e); } // Data Purge Job try { // TODO [mazz]: make the data purge job's cron string configurable via SystemManagerBean // For Quartz cron syntax, see: // http://www.quartz-scheduler.org/documentation/quartz-2.1.x/tutorials/crontrigger String cronString = "0 0 * * * ?"; // every hour, on the hour schedulerBean.scheduleSimpleCronJob(DataPurgeJob.class, true, false, cronString); } catch (Exception e) { log.error("Cannot schedule data purge job.", e); } // Server Plugin Jobs try { ServerPluginServiceMBean mbean = LookupUtil.getServerPluginService(); MasterServerPluginContainer masterPC = mbean.getMasterPluginContainer(); masterPC.scheduleAllPluginJobs(); } catch (Exception e) { log.error("Cannot schedule server plugin jobs.", e); } // Alerting Availability Duration Job (create only, nothing actually scheduled here) try { schedulerBean.scheduleTriggeredJob(AlertAvailabilityDurationJob.class, false, null); } catch (Exception e) { log.error("Cannot create alert availability duration job.", e); } return; }