@Test
  public void testUpdateHostsForDownscaleWhenRemainingSpaceIsNotEnough() throws Exception {
    HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
    json.setHostGroup("slave_1");
    json.setScalingAdjustment(-1);
    AmbariClient ambariClient = mock(AmbariClient.class);
    HostMetadata metadata1 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class);
    HostMetadata metadata2 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class);
    HostMetadata metadata3 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class);
    Set<HostMetadata> hostsMetaData = new HashSet<>();
    List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3);
    hostsMetaData.addAll(hostsMetadataList);
    HostGroup hostGroup = new HostGroup();
    hostGroup.setHostMetadata(hostsMetaData);
    hostGroup.setName("slave_1");
    Map<String, Map<Long, Long>> dfsSpace = new HashMap<>();
    dfsSpace.put("node2", singletonMap(5_000L, 15_000L));
    dfsSpace.put("node1", singletonMap(10_000L, 10_000L));
    dfsSpace.put("node3", singletonMap(6_000L, 20_000L));
    when(metadata1.getHostName()).thenReturn("node1");
    when(metadata2.getHostName()).thenReturn("node2");
    when(metadata3.getHostName()).thenReturn("node3");
    when(instanceMetaData1.getAmbariServer()).thenReturn(false);
    when(instanceMetaData2.getAmbariServer()).thenReturn(false);
    when(instanceMetaData3.getAmbariServer()).thenReturn(false);
    when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1"))
        .thenReturn(hostGroup);
    when(ambariClientProvider.getAmbariClient(
            any(TLSClientConfig.class), any(String.class), any(String.class)))
        .thenReturn(ambariClient);
    when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1"))
        .thenReturn(singletonMap("DATANODE", "SLAVE"));
    when(configurationService.getConfiguration(ambariClient, "slave_1"))
        .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "1"));
    when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1"))
        .thenReturn(hostsMetadataList);
    when(ambariClient.getBlueprintMap(cluster.getBlueprint().getBlueprintName()))
        .thenReturn(singletonMap("slave_1", asList("DATANODE")));
    when(ambariClient.getDFSSpace()).thenReturn(dfsSpace);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1"))
        .thenReturn(instanceMetaData1);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2"))
        .thenReturn(instanceMetaData2);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3"))
        .thenReturn(instanceMetaData3);

    Exception result = null;
    try {
      underTest.updateHosts(stack.getId(), json);
    } catch (BadRequestException e) {
      result = e;
    }

    assertEquals(
        "Trying to move '10000' bytes worth of data to nodes with '11000' bytes of capacity is not allowed",
        result.getMessage());
  }
  @Test
  public void testUpdateHostsForDownscaleSelectNodesWithLessData()
      throws ConnectException, CloudbreakSecuritySetupException {
    HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
    json.setHostGroup("slave_1");
    json.setScalingAdjustment(-1);
    AmbariClient ambariClient = mock(AmbariClient.class);
    HostMetadata metadata1 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class);
    HostMetadata metadata2 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class);
    HostMetadata metadata3 = mock(HostMetadata.class);
    InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class);
    Set<HostMetadata> hostsMetaData = new HashSet<>();
    List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3);
    hostsMetaData.addAll(hostsMetadataList);
    HostGroup hostGroup = new HostGroup();
    hostGroup.setHostMetadata(hostsMetaData);
    hostGroup.setName("slave_1");
    Map<String, Map<Long, Long>> dfsSpace = new HashMap<>();
    dfsSpace.put("node2", singletonMap(85_000L, 15_000L));
    dfsSpace.put("node1", singletonMap(90_000L, 10_000L));
    dfsSpace.put("node3", singletonMap(80_000L, 20_000L));
    when(metadata1.getHostName()).thenReturn("node1");
    when(metadata2.getHostName()).thenReturn("node2");
    when(metadata3.getHostName()).thenReturn("node3");
    when(instanceMetaData1.getAmbariServer()).thenReturn(false);
    when(instanceMetaData2.getAmbariServer()).thenReturn(false);
    when(instanceMetaData3.getAmbariServer()).thenReturn(false);
    when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1"))
        .thenReturn(hostGroup);
    when(ambariClientProvider.getAmbariClient(
            any(TLSClientConfig.class), any(String.class), any(String.class)))
        .thenReturn(ambariClient);
    when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1"))
        .thenReturn(singletonMap("DATANODE", "SLAVE"));
    when(configurationService.getConfiguration(ambariClient, "slave_1"))
        .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "1"));
    when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1"))
        .thenReturn(hostsMetadataList);
    when(ambariClient.getBlueprintMap(cluster.getBlueprint().getBlueprintName()))
        .thenReturn(singletonMap("slave_1", asList("DATANODE")));
    when(ambariClient.getDFSSpace()).thenReturn(dfsSpace);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1"))
        .thenReturn(instanceMetaData1);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2"))
        .thenReturn(instanceMetaData2);
    when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3"))
        .thenReturn(instanceMetaData3);
    doNothing().when(flowManager).triggerClusterDownscale(anyObject());

    underTest.updateHosts(stack.getId(), json);

    verify(flowManager, times(1)).triggerClusterDownscale(anyObject());
  }
  @Test(expected = BadRequestException.class)
  public void testStopWhenAwsHasEphemeralVolume() {
    cluster =
        TestUtil.cluster(
            TestUtil.blueprint(), TestUtil.stack(Status.AVAILABLE, TestUtil.awsCredential()), 1L);
    stack = TestUtil.setEphemeral(cluster.getStack());
    cluster.setStatus(Status.AVAILABLE);
    cluster.setStack(stack);
    stack.setCluster(cluster);

    when(stackService.get(anyLong())).thenReturn(stack);
    when(stackService.getById(anyLong())).thenReturn(stack);

    underTest.updateStatus(1L, StatusRequest.STOPPED);
  }
  @Test
  public void testUpdateHostsForDownscaleCannotGoBelowReplication()
      throws ConnectException, CloudbreakSecuritySetupException {
    HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
    json.setHostGroup("slave_1");
    json.setScalingAdjustment(-1);
    AmbariClient ambariClient = mock(AmbariClient.class);
    HostMetadata metadata1 = mock(HostMetadata.class);
    HostMetadata metadata2 = mock(HostMetadata.class);
    HostMetadata metadata3 = mock(HostMetadata.class);
    Set<HostMetadata> hostsMetaData = new HashSet<>();
    List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3);
    hostsMetaData.addAll(hostsMetadataList);
    HostGroup hostGroup = new HostGroup();
    hostGroup.setHostMetadata(hostsMetaData);
    hostGroup.setName("slave_1");
    when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1"))
        .thenReturn(hostGroup);
    when(ambariClientProvider.getAmbariClient(
            any(TLSClientConfig.class), any(String.class), any(String.class)))
        .thenReturn(ambariClient);
    when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1"))
        .thenReturn(singletonMap("DATANODE", "SLAVE"));
    when(configurationService.getConfiguration(ambariClient, "slave_1"))
        .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "2"));
    when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1"))
        .thenReturn(asList(metadata2, metadata3));

    Exception result = null;
    try {
      underTest.updateHosts(stack.getId(), json);
    } catch (BadRequestException e) {
      result = e;
    }

    assertTrue(result.getMessage().startsWith("There is not enough node to downscale."));
  }
 public String getAmbariPassword(Cluster cluster) {
   if (Strings.isNullOrEmpty(cluster.getCloudbreakAmbariPassword())) {
     return cluster.getPassword();
   }
   return cluster.getCloudbreakAmbariPassword();
 }
 public String getAmbariUserName(Cluster cluster) {
   if (Strings.isNullOrEmpty(cluster.getCloudbreakAmbariUser())) {
     return cluster.getUserName();
   }
   return cluster.getCloudbreakAmbariUser();
 }
 @Override
 public Cluster convert(ClusterRequest source) {
   Cluster cluster = new Cluster();
   cluster.setName(source.getName());
   cluster.setStatus(REQUESTED);
   cluster.setDescription(source.getDescription());
   cluster.setEmailNeeded(source.getEmailNeeded());
   cluster.setUserName(source.getUserName());
   cluster.setPassword(source.getPassword());
   Boolean enableSecurity = source.getEnableSecurity();
   cluster.setSecure(enableSecurity == null ? false : enableSecurity);
   KerberosRequest kerberos = source.getKerberos();
   KerberosConfig kerberosConfig = new KerberosConfig();
   if (source.getKerberos() != null) {
     kerberosConfig.setKerberosMasterKey(kerberos.getMasterKey());
     kerberosConfig.setKerberosAdmin(kerberos.getAdmin());
     kerberosConfig.setKerberosPassword(kerberos.getPassword());
     kerberosConfig.setKerberosUrl(kerberos.getUrl());
     kerberosConfig.setKerberosRealm(kerberos.getRealm());
     kerberosConfig.setKerberosTcpAllowed(kerberos.getTcpAllowed());
     kerberosConfig.setKerberosPrincipal(kerberos.getPrincipal());
     kerberosConfig.setKerberosLdapUrl(kerberos.getLdapUrl());
     kerberosConfig.setKerberosContainerDn(kerberos.getContainerDn());
   }
   cluster.setKerberosConfig(kerberosConfig);
   cluster.setLdapRequired(source.getLdapRequired());
   cluster.setConfigStrategy(source.getConfigStrategy());
   cluster.setEnableShipyard(source.getEnableShipyard());
   cluster.setEmailTo(source.getEmailTo());
   FileSystemBase fileSystem = source.getFileSystem();
   cluster.setCloudbreakAmbariPassword(PasswordUtil.generatePassword());
   cluster.setCloudbreakAmbariUser("cloudbreak");
   if (fileSystem != null) {
     cluster.setFileSystem(getConversionService().convert(fileSystem, FileSystem.class));
   }
   try {
     Json json = new Json(convertBlueprintInputJsons(source.getBlueprintInputs()));
     cluster.setBlueprintInputs(
         source.getBlueprintInputs() == null ? new Json(new HashMap<>()) : json);
     if (source.getBlueprintCustomProperties() != null) {
       cluster.setBlueprintCustomProperties(source.getBlueprintCustomProperties());
     } else {
       cluster.setBlueprintCustomProperties(null);
     }
   } catch (JsonProcessingException e) {
     cluster.setBlueprintInputs(null);
   }
   return cluster;
 }