@Test public void testUpdateHostsForDownscaleWhenRemainingSpaceIsNotEnough() throws Exception { HostGroupAdjustmentJson json = new HostGroupAdjustmentJson(); json.setHostGroup("slave_1"); json.setScalingAdjustment(-1); AmbariClient ambariClient = mock(AmbariClient.class); HostMetadata metadata1 = mock(HostMetadata.class); InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class); HostMetadata metadata2 = mock(HostMetadata.class); InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class); HostMetadata metadata3 = mock(HostMetadata.class); InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class); Set<HostMetadata> hostsMetaData = new HashSet<>(); List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3); hostsMetaData.addAll(hostsMetadataList); HostGroup hostGroup = new HostGroup(); hostGroup.setHostMetadata(hostsMetaData); hostGroup.setName("slave_1"); Map<String, Map<Long, Long>> dfsSpace = new HashMap<>(); dfsSpace.put("node2", singletonMap(5_000L, 15_000L)); dfsSpace.put("node1", singletonMap(10_000L, 10_000L)); dfsSpace.put("node3", singletonMap(6_000L, 20_000L)); when(metadata1.getHostName()).thenReturn("node1"); when(metadata2.getHostName()).thenReturn("node2"); when(metadata3.getHostName()).thenReturn("node3"); when(instanceMetaData1.getAmbariServer()).thenReturn(false); when(instanceMetaData2.getAmbariServer()).thenReturn(false); when(instanceMetaData3.getAmbariServer()).thenReturn(false); when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1")) .thenReturn(hostGroup); when(ambariClientProvider.getAmbariClient( any(TLSClientConfig.class), any(String.class), any(String.class))) .thenReturn(ambariClient); when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")) .thenReturn(singletonMap("DATANODE", "SLAVE")); when(configurationService.getConfiguration(ambariClient, "slave_1")) .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "1")); when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1")) .thenReturn(hostsMetadataList); when(ambariClient.getBlueprintMap(cluster.getBlueprint().getBlueprintName())) .thenReturn(singletonMap("slave_1", asList("DATANODE"))); when(ambariClient.getDFSSpace()).thenReturn(dfsSpace); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1")) .thenReturn(instanceMetaData1); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2")) .thenReturn(instanceMetaData2); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3")) .thenReturn(instanceMetaData3); Exception result = null; try { underTest.updateHosts(stack.getId(), json); } catch (BadRequestException e) { result = e; } assertEquals( "Trying to move '10000' bytes worth of data to nodes with '11000' bytes of capacity is not allowed", result.getMessage()); }
@Test public void testUpdateHostsForDownscaleSelectNodesWithLessData() throws ConnectException, CloudbreakSecuritySetupException { HostGroupAdjustmentJson json = new HostGroupAdjustmentJson(); json.setHostGroup("slave_1"); json.setScalingAdjustment(-1); AmbariClient ambariClient = mock(AmbariClient.class); HostMetadata metadata1 = mock(HostMetadata.class); InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class); HostMetadata metadata2 = mock(HostMetadata.class); InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class); HostMetadata metadata3 = mock(HostMetadata.class); InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class); Set<HostMetadata> hostsMetaData = new HashSet<>(); List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3); hostsMetaData.addAll(hostsMetadataList); HostGroup hostGroup = new HostGroup(); hostGroup.setHostMetadata(hostsMetaData); hostGroup.setName("slave_1"); Map<String, Map<Long, Long>> dfsSpace = new HashMap<>(); dfsSpace.put("node2", singletonMap(85_000L, 15_000L)); dfsSpace.put("node1", singletonMap(90_000L, 10_000L)); dfsSpace.put("node3", singletonMap(80_000L, 20_000L)); when(metadata1.getHostName()).thenReturn("node1"); when(metadata2.getHostName()).thenReturn("node2"); when(metadata3.getHostName()).thenReturn("node3"); when(instanceMetaData1.getAmbariServer()).thenReturn(false); when(instanceMetaData2.getAmbariServer()).thenReturn(false); when(instanceMetaData3.getAmbariServer()).thenReturn(false); when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1")) .thenReturn(hostGroup); when(ambariClientProvider.getAmbariClient( any(TLSClientConfig.class), any(String.class), any(String.class))) .thenReturn(ambariClient); when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")) .thenReturn(singletonMap("DATANODE", "SLAVE")); when(configurationService.getConfiguration(ambariClient, "slave_1")) .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "1")); when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1")) .thenReturn(hostsMetadataList); when(ambariClient.getBlueprintMap(cluster.getBlueprint().getBlueprintName())) .thenReturn(singletonMap("slave_1", asList("DATANODE"))); when(ambariClient.getDFSSpace()).thenReturn(dfsSpace); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1")) .thenReturn(instanceMetaData1); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2")) .thenReturn(instanceMetaData2); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3")) .thenReturn(instanceMetaData3); doNothing().when(flowManager).triggerClusterDownscale(anyObject()); underTest.updateHosts(stack.getId(), json); verify(flowManager, times(1)).triggerClusterDownscale(anyObject()); }