@BeforeClass public static void setup() throws AmbariException { injector = Guice.createInjector(new InMemoryDefaultTestModule()); injector.getInstance(GuiceJpaInitializer.class); configHelper = injector.getInstance(ConfigHelper.class); configFactory = injector.getInstance(ConfigFactory.class); clusters = injector.getInstance(Clusters.class); clusters.addHost(HOST1); clusters.getHost(HOST1).persist(); clusters.addCluster(CLUSTER1); Cluster cluster1 = clusters.getCluster(CLUSTER1); SERVICE_SITE_CLUSTER = new HashMap<String, String>(); SERVICE_SITE_CLUSTER.put(SERVICE_SITE_NAME1, SERVICE_SITE_VAL1); SERVICE_SITE_CLUSTER.put(SERVICE_SITE_NAME2, SERVICE_SITE_VAL2); SERVICE_SITE_CLUSTER.put(SERVICE_SITE_NAME3, SERVICE_SITE_VAL3); SERVICE_SITE_CLUSTER.put(SERVICE_SITE_NAME4, SERVICE_SITE_VAL4); SERVICE_SITE_SERVICE = new HashMap<String, String>(); SERVICE_SITE_SERVICE.put(SERVICE_SITE_NAME1, SERVICE_SITE_VAL1_S); SERVICE_SITE_SERVICE.put(SERVICE_SITE_NAME2, SERVICE_SITE_VAL2_S); SERVICE_SITE_SERVICE.put(SERVICE_SITE_NAME5, SERVICE_SITE_VAL5_S); SERVICE_SITE_HOST = new HashMap<String, String>(); SERVICE_SITE_HOST.put(SERVICE_SITE_NAME2, SERVICE_SITE_VAL2_H); SERVICE_SITE_HOST.put(SERVICE_SITE_NAME6, SERVICE_SITE_VAL6_H); GLOBAL_CLUSTER = new HashMap<String, String>(); GLOBAL_CLUSTER.put(GLOBAL_NAME1, GLOBAL_CLUSTER_VAL1); GLOBAL_CLUSTER.put(GLOBAL_NAME2, GLOBAL_CLUSTER_VAL2); CONFIG_ATTRIBUTES = new HashMap<String, Map<String, String>>(); // Cluster level global config Config globalConfig = configFactory.createNew(cluster1, GLOBAL_CONFIG, GLOBAL_CLUSTER, CONFIG_ATTRIBUTES); globalConfig.setTag(CLUSTER_VERSION_TAG); cluster1.addConfig(globalConfig); // Cluster level service config Config serviceSiteConfigCluster = configFactory.createNew( cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_CLUSTER, CONFIG_ATTRIBUTES); serviceSiteConfigCluster.setTag(CLUSTER_VERSION_TAG); cluster1.addConfig(serviceSiteConfigCluster); // Service level service config Config serviceSiteConfigService = configFactory.createNew( cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_SERVICE, CONFIG_ATTRIBUTES); serviceSiteConfigService.setTag(SERVICE_VERSION_TAG); cluster1.addConfig(serviceSiteConfigService); // Host level service config Config serviceSiteConfigHost = configFactory.createNew( cluster1, SERVICE_SITE_CONFIG, SERVICE_SITE_HOST, CONFIG_ATTRIBUTES); serviceSiteConfigHost.setTag(HOST_VERSION_TAG); cluster1.addConfig(serviceSiteConfigHost); ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class); createTask(db, 1, 1, HOST1, CLUSTER1); }
protected void deleteHosts(Set<HostRequest> requests) throws AmbariException { AmbariManagementController controller = getManagementController(); Clusters clusters = controller.getClusters(); List<HostRequest> okToRemove = new ArrayList<HostRequest>(); for (HostRequest hostRequest : requests) { String hostName = hostRequest.getHostname(); if (null == hostName) { continue; } Set<String> clusterNamesForHost = new HashSet<String>(); if (null != hostRequest.getClusterName()) { clusterNamesForHost.add(hostRequest.getClusterName()); } else { Set<Cluster> clustersForHost = clusters.getClustersForHost(hostRequest.getHostname()); if (null != clustersForHost) { for (Cluster c : clustersForHost) { clusterNamesForHost.add(c.getClusterName()); } } } for (String clusterName : clusterNamesForHost) { Cluster cluster = clusters.getCluster(clusterName); List<ServiceComponentHost> list = cluster.getServiceComponentHosts(hostName); if (!list.isEmpty()) { List<String> componentsToRemove = new ArrayList<String>(); for (ServiceComponentHost sch : list) { Service s = cluster.getService(sch.getServiceName()); ServiceComponent sc = s.getServiceComponent(sch.getServiceComponentName()); // Masters and Slaves must be deleted first. Clients are ok. if (!sc.isClientComponent()) { componentsToRemove.add(sch.getServiceComponentName()); } } if (!componentsToRemove.isEmpty()) { StringBuilder reason = new StringBuilder("Cannot remove host ") .append(hostName) .append(" from ") .append(hostRequest.getClusterName()) .append( ". The following roles exist, and these components must be stopped if running, and then deleted: "); reason.append(StringUtils.join(componentsToRemove, ", ")); throw new AmbariException(reason.toString()); } } } okToRemove.add(hostRequest); } for (HostRequest hostRequest : okToRemove) { // Assume the user also wants to delete it entirely, including all clusters. clusters.deleteHost(hostRequest.getHostname()); if (null != hostRequest.getClusterName()) { clusters.getCluster(hostRequest.getClusterName()).recalculateAllClusterVersionStates(); } } }
protected static Set<HostResponse> getHosts( AmbariManagementController controller, HostRequest request) throws AmbariException { // TODO/FIXME host can only belong to a single cluster so get host directly from Cluster // TODO/FIXME what is the requirement for filtering on host attributes? List<Host> hosts; Set<HostResponse> response = new HashSet<HostResponse>(); Cluster cluster = null; Clusters clusters = controller.getClusters(); String clusterName = request.getClusterName(); String hostName = request.getHostname(); if (clusterName != null) { // validate that cluster exists, throws exception if it doesn't. try { cluster = clusters.getCluster(clusterName); } catch (ObjectNotFoundException e) { throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e); } } if (hostName == null) { hosts = clusters.getHosts(); } else { hosts = new ArrayList<Host>(); try { hosts.add(clusters.getHost(request.getHostname())); } catch (HostNotFoundException e) { // add cluster name throw new HostNotFoundException(clusterName, hostName); } } for (Host h : hosts) { if (clusterName != null) { if (clusters.getClustersForHost(h.getHostName()).contains(cluster)) { HostResponse r = h.convertToResponse(); r.setClusterName(clusterName); r.setDesiredHostConfigs(h.getDesiredHostConfigs(cluster)); r.setMaintenanceState(h.getMaintenanceState(cluster.getClusterId())); response.add(r); } else if (hostName != null) { throw new HostNotFoundException(clusterName, hostName); } } else { HostResponse r = h.convertToResponse(); Set<Cluster> clustersForHost = clusters.getClustersForHost(h.getHostName()); // todo: host can only belong to a single cluster if (clustersForHost != null && clustersForHost.size() != 0) { r.setClusterName(clustersForHost.iterator().next().getClusterName()); } response.add(r); } } return response; }
protected synchronized void updateHosts(Set<HostRequest> requests) throws AmbariException { if (requests.isEmpty()) { LOG.warn("Received an empty requests set"); return; } AmbariManagementController controller = getManagementController(); Clusters clusters = controller.getClusters(); for (HostRequest request : requests) { if (request.getHostname() == null || request.getHostname().isEmpty()) { throw new IllegalArgumentException("Invalid arguments, hostname should be provided"); } } for (HostRequest request : requests) { if (LOG.isDebugEnabled()) { LOG.debug( "Received an updateHost request" + ", hostname=" + request.getHostname() + ", request=" + request); } Host host = clusters.getHost(request.getHostname()); String clusterName = request.getClusterName(); try { // The below method call throws an exception when trying to create a duplicate mapping in // the clusterhostmapping // table. This is done to detect duplicates during host create. In order to be robust, // handle these gracefully. clusters.mapHostToCluster(request.getHostname(), clusterName); } catch (DuplicateResourceException e) { // do nothing } if (null != request.getHostAttributes()) { host.setHostAttributes(request.getHostAttributes()); } String rackInfo = host.getRackInfo(); String requestRackInfo = request.getRackInfo(); boolean rackChange = requestRackInfo != null && !requestRackInfo.equals(rackInfo); if (rackChange) { host.setRackInfo(requestRackInfo); } if (null != request.getPublicHostName()) { host.setPublicHostName(request.getPublicHostName()); } if (null != clusterName && null != request.getMaintenanceState()) { Cluster c = clusters.getCluster(clusterName); MaintenanceState newState = MaintenanceState.valueOf(request.getMaintenanceState()); MaintenanceState oldState = host.getMaintenanceState(c.getClusterId()); if (!newState.equals(oldState)) { if (newState.equals(MaintenanceState.IMPLIED_FROM_HOST) || newState.equals(MaintenanceState.IMPLIED_FROM_SERVICE)) { throw new IllegalArgumentException( "Invalid arguments, can only set " + "maintenance state to one of " + EnumSet.of(MaintenanceState.OFF, MaintenanceState.ON)); } else { host.setMaintenanceState(c.getClusterId(), newState); } } } // Create configurations if (null != clusterName && null != request.getDesiredConfigs()) { Cluster c = clusters.getCluster(clusterName); if (clusters.getHostsForCluster(clusterName).containsKey(host.getHostName())) { for (ConfigurationRequest cr : request.getDesiredConfigs()) { if (null != cr.getProperties() && cr.getProperties().size() > 0) { LOG.info( MessageFormat.format( "Applying configuration with tag ''{0}'' to host ''{1}'' in cluster ''{2}''", cr.getVersionTag(), request.getHostname(), clusterName)); cr.setClusterName(c.getClusterName()); controller.createConfiguration(cr); } Config baseConfig = c.getConfig(cr.getType(), cr.getVersionTag()); if (null != baseConfig) { String authName = controller.getAuthName(); DesiredConfig oldConfig = host.getDesiredConfigs(c.getClusterId()).get(cr.getType()); if (host.addDesiredConfig(c.getClusterId(), cr.isSelected(), authName, baseConfig)) { Logger logger = LoggerFactory.getLogger("configchange"); logger.info( "cluster '" + c.getClusterName() + "', " + "host '" + host.getHostName() + "' " + "changed by: '" + authName + "'; " + "type='" + baseConfig.getType() + "' " + "version='" + baseConfig.getVersion() + "'" + "tag='" + baseConfig.getTag() + "'" + (null == oldConfig ? "" : ", from='" + oldConfig.getTag() + "'")); } } } } } if (clusterName != null && !clusterName.isEmpty()) { clusters.getCluster(clusterName).recalculateAllClusterVersionStates(); if (rackChange) { controller.registerRackChange(clusterName); } } // todo: if attempt was made to update a property other than those // todo: that are allowed above, should throw exception } }
@Transactional public void populateData(Cluster cluster) throws Exception { // remove any definitions and start over List<AlertDefinitionEntity> definitions = m_definitionDao.findAll(); for (AlertDefinitionEntity definition : definitions) { m_definitionDao.remove(definition); } AlertTargetEntity administrators = new AlertTargetEntity(); administrators.setDescription("The Administrators"); administrators.setNotificationType("EMAIL"); administrators.setTargetName("Administrators"); m_dispatchDAO.create(administrators); AlertTargetEntity operators = new AlertTargetEntity(); operators.setDescription("The Operators"); operators.setNotificationType("EMAIL"); operators.setTargetName("Operators"); m_dispatchDAO.create(operators); // create some definitions AlertDefinitionEntity namenode = new AlertDefinitionEntity(); namenode.setDefinitionName("NAMENODE"); namenode.setServiceName("HDFS"); namenode.setComponentName("NAMENODE"); namenode.setClusterId(cluster.getClusterId()); namenode.setHash(UUID.randomUUID().toString()); namenode.setScheduleInterval(Integer.valueOf(60)); namenode.setScope(Scope.ANY); namenode.setSource("{\"type\" : \"SCRIPT\"}"); namenode.setSourceType(SourceType.SCRIPT); m_definitionDao.create(namenode); AlertDefinitionEntity datanode = new AlertDefinitionEntity(); datanode.setDefinitionName("DATANODE"); datanode.setServiceName("HDFS"); datanode.setComponentName("DATANODE"); datanode.setClusterId(cluster.getClusterId()); datanode.setHash(UUID.randomUUID().toString()); datanode.setScheduleInterval(Integer.valueOf(60)); datanode.setScope(Scope.HOST); datanode.setSource("{\"type\" : \"SCRIPT\"}"); datanode.setSourceType(SourceType.SCRIPT); m_definitionDao.create(datanode); AlertDefinitionEntity aggregate = new AlertDefinitionEntity(); aggregate.setDefinitionName("YARN_AGGREGATE"); aggregate.setServiceName("YARN"); aggregate.setComponentName(null); aggregate.setClusterId(cluster.getClusterId()); aggregate.setHash(UUID.randomUUID().toString()); aggregate.setScheduleInterval(Integer.valueOf(60)); aggregate.setScope(Scope.SERVICE); aggregate.setSource("{\"type\" : \"SCRIPT\"}"); aggregate.setSourceType(SourceType.SCRIPT); m_definitionDao.create(aggregate); // create some history AlertHistoryEntity nnHistory = new AlertHistoryEntity(); nnHistory.setAlertState(AlertState.OK); nnHistory.setServiceName(namenode.getServiceName()); nnHistory.setComponentName(namenode.getComponentName()); nnHistory.setClusterId(cluster.getClusterId()); nnHistory.setAlertDefinition(namenode); nnHistory.setAlertLabel(namenode.getDefinitionName()); nnHistory.setAlertText(namenode.getDefinitionName()); nnHistory.setAlertTimestamp(calendar.getTimeInMillis()); nnHistory.setHostName(HOSTNAME); m_alertsDAO.create(nnHistory); AlertHistoryEntity dnHistory = new AlertHistoryEntity(); dnHistory.setAlertState(AlertState.WARNING); dnHistory.setServiceName(datanode.getServiceName()); dnHistory.setComponentName(datanode.getComponentName()); dnHistory.setClusterId(cluster.getClusterId()); dnHistory.setAlertDefinition(datanode); dnHistory.setAlertLabel(datanode.getDefinitionName()); dnHistory.setAlertText(datanode.getDefinitionName()); dnHistory.setAlertTimestamp(calendar.getTimeInMillis()); dnHistory.setHostName(HOSTNAME); m_alertsDAO.create(dnHistory); AlertHistoryEntity aggregateHistory = new AlertHistoryEntity(); aggregateHistory.setAlertState(AlertState.CRITICAL); aggregateHistory.setServiceName(aggregate.getServiceName()); aggregateHistory.setComponentName(aggregate.getComponentName()); aggregateHistory.setClusterId(cluster.getClusterId()); aggregateHistory.setAlertDefinition(aggregate); aggregateHistory.setAlertLabel(aggregate.getDefinitionName()); aggregateHistory.setAlertText(aggregate.getDefinitionName()); aggregateHistory.setAlertTimestamp(calendar.getTimeInMillis()); m_alertsDAO.create(aggregateHistory); AlertNoticeEntity nnPendingNotice = new AlertNoticeEntity(); nnPendingNotice.setAlertHistory(nnHistory); nnPendingNotice.setAlertTarget(administrators); nnPendingNotice.setNotifyState(NotificationState.PENDING); nnPendingNotice.setUuid(UUID.randomUUID().toString()); m_dispatchDAO.create(nnPendingNotice); AlertNoticeEntity dnDeliveredNotice = new AlertNoticeEntity(); dnDeliveredNotice.setAlertHistory(dnHistory); dnDeliveredNotice.setAlertTarget(administrators); dnDeliveredNotice.setNotifyState(NotificationState.FAILED); dnDeliveredNotice.setUuid(UUID.randomUUID().toString()); m_dispatchDAO.create(dnDeliveredNotice); AlertNoticeEntity aggregateFailedNotice = new AlertNoticeEntity(); aggregateFailedNotice.setAlertHistory(aggregateHistory); aggregateFailedNotice.setAlertTarget(operators); aggregateFailedNotice.setNotifyState(NotificationState.FAILED); aggregateFailedNotice.setUuid(UUID.randomUUID().toString()); m_dispatchDAO.create(aggregateFailedNotice); List<AlertHistoryEntity> histories = m_alertsDAO.findAll(); assertEquals(3, histories.size()); }