@Test
  public void testUpdateHostsForDownscaleWhenRemainingSpaceIsNotEnough() throws Exception {
    HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
    json.setHostGroup("slave_1");
    json.setScalingAdjustment(-1);
    AmbariClient ambariClient = mock(AmbariClient.class);
    HostMetadata metadata1 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class);
    HostMetadata metadata2 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class);
    HostMetadata metadata3 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class);
    Set<HostMetadata> hostsMetaData = new HashSet<>();
    List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3);
    hostsMetaData.addAll(hostsMetadataList);
    HostGroup hostGroup = new HostGroup();
    hostGroup.setHostMetadata(hostsMetaData);
    hostGroup.setName("slave_1");
    Map<String, Map<Long, Long>> dfsSpace = new HashMap<>();
    dfsSpace.put("node2", singletonMap(5_000L, 15_000L));
    dfsSpace.put("node1", singletonMap(10_000L, 10_000L));
    dfsSpace.put("node3", singletonMap(6_000L, 20_000L));
    when(metadata1.getHostName()).thenReturn("node1");
    when(metadata2.getHostName()).thenReturn("node2");
    when(metadata3.getHostName()).thenReturn("node3");
    when(instanceMetaData1.getAmbariServer()).thenReturn(false);
    when(instanceMetaData2.getAmbariServer()).thenReturn(false);
    when(instanceMetaData3.getAmbariServer()).thenReturn(false);
    when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1"))
        .thenReturn(hostGroup);
    when(ambariClientProvider.getAmbariClient(
            any(TLSClientConfig.class), any(String.class), any(String.class)))
        .thenReturn(ambariClient);
    when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1"))
        .thenReturn(singletonMap("DATANODE", "SLAVE"));
    when(configurationService.getConfiguration(ambariClient, "slave_1"))
        .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "1"));
    when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1"))
        .thenReturn(hostsMetadataList);
    when(ambariClient.getBlueprintMap(cluster.getBlueprint().getBlueprintName()))
        .thenReturn(singletonMap("slave_1", asList("DATANODE")));
    when(ambariClient.getDFSSpace()).thenReturn(dfsSpace);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1"))
        .thenReturn(instanceMetaData1);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2"))
        .thenReturn(instanceMetaData2);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3"))
        .thenReturn(instanceMetaData3);

    Exception result = null;
    try {
      underTest.updateHosts(stack.getId(), json);
    } catch (BadRequestException e) {
      result = e;
    }

    assertEquals(
        "Trying to move '10000' bytes worth of data to nodes with '11000' bytes of capacity is not allowed",
        result.getMessage());
  }
  @Test
  public void testUpdateHostsForDownscaleSelectNodesWithLessData()
      throws ConnectException, CloudbreakSecuritySetupException {
    HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
    json.setHostGroup("slave_1");
    json.setScalingAdjustment(-1);
    AmbariClient ambariClient = mock(AmbariClient.class);
    HostMetadata metadata1 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class);
    HostMetadata metadata2 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class);
    HostMetadata metadata3 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class);
    Set<HostMetadata> hostsMetaData = new HashSet<>();
    List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3);
    hostsMetaData.addAll(hostsMetadataList);
    HostGroup hostGroup = new HostGroup();
    hostGroup.setHostMetadata(hostsMetaData);
    hostGroup.setName("slave_1");
    Map<String, Map<Long, Long>> dfsSpace = new HashMap<>();
    dfsSpace.put("node2", singletonMap(85_000L, 15_000L));
    dfsSpace.put("node1", singletonMap(90_000L, 10_000L));
    dfsSpace.put("node3", singletonMap(80_000L, 20_000L));
    when(metadata1.getHostName()).thenReturn("node1");
    when(metadata2.getHostName()).thenReturn("node2");
    when(metadata3.getHostName()).thenReturn("node3");
    when(instanceMetaData1.getAmbariServer()).thenReturn(false);
    when(instanceMetaData2.getAmbariServer()).thenReturn(false);
    when(instanceMetaData3.getAmbariServer()).thenReturn(false);
    when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1"))
        .thenReturn(hostGroup);
    when(ambariClientProvider.getAmbariClient(
            any(TLSClientConfig.class), any(String.class), any(String.class)))
        .thenReturn(ambariClient);
    when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1"))
        .thenReturn(singletonMap("DATANODE", "SLAVE"));
    when(configurationService.getConfiguration(ambariClient, "slave_1"))
        .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "1"));
    when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1"))
        .thenReturn(hostsMetadataList);
    when(ambariClient.getBlueprintMap(cluster.getBlueprint().getBlueprintName()))
        .thenReturn(singletonMap("slave_1", asList("DATANODE")));
    when(ambariClient.getDFSSpace()).thenReturn(dfsSpace);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1"))
        .thenReturn(instanceMetaData1);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2"))
        .thenReturn(instanceMetaData2);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3"))
        .thenReturn(instanceMetaData3);
    doNothing().when(flowManager).triggerClusterDownscale(anyObject());

    underTest.updateHosts(stack.getId(), json);

    verify(flowManager, times(1)).triggerClusterDownscale(anyObject());
  }
 public void terminateFailedNodes(
     ContainerOrchestrator orchestrator, Stack stack, GatewayConfig gatewayConfig, Set<Node> nodes)
     throws CloudbreakOrchestratorFailedException {
   List<String> allAvailableNode = orchestrator.getAvailableNodes(gatewayConfig, nodes);
   List<Node> missingNodes = selectMissingNodes(nodes, allAvailableNode);
   if (missingNodes.size() > 0) {
     String message =
         String.format(
             "Bootstrap failed on %s nodes. These nodes will be terminated.", missingNodes.size());
     LOGGER.info(message);
     eventService.fireCloudbreakEvent(stack.getId(), Status.UPDATE_IN_PROGRESS.name(), message);
     for (Node missingNode : missingNodes) {
       InstanceMetaData instanceMetaData =
           instanceMetaDataRepository.findNotTerminatedByPrivateAddress(
               stack.getId(), missingNode.getPrivateIp());
       InstanceGroup ig =
           instanceGroupRepository.findOneByGroupNameInStack(
               stack.getId(), instanceMetaData.getInstanceGroup().getGroupName());
       ig.setNodeCount(ig.getNodeCount() - 1);
       if (ig.getNodeCount() < 1) {
         throw new CloudbreakOrchestratorFailedException(
             String.format(
                 "%s instancegroup nodecount was lower than 1 cluster creation failed.",
                 ig.getGroupName()));
       }
       instanceGroupRepository.save(ig);
       message =
           String.format(
               "Delete '%s' node. and Decrease the nodecount on %s instancegroup",
               instanceMetaData.getInstanceId(), ig.getGroupName());
       LOGGER.info(message);
       eventService.fireCloudbreakEvent(stack.getId(), Status.UPDATE_IN_PROGRESS.name(), message);
       deleteResourceAndDependencies(stack, instanceMetaData);
       deleteInstanceResourceFromDatabase(stack, instanceMetaData);
       instanceMetaData.setInstanceStatus(InstanceStatus.TERMINATED);
       instanceMetaDataRepository.save(instanceMetaData);
       LOGGER.info(
           String.format(
               "The status of instanceMetadata with %s id and %s name setted to TERMINATED.",
               instanceMetaData.getId(), instanceMetaData.getInstanceId()));
     }
   }
 }