@Override public CloudInstance convert(InstanceMetaData metaDataEnity) { InstanceGroup group = metaDataEnity.getInstanceGroup(); Template template = metaDataEnity.getInstanceGroup().getTemplate(); InstanceStatus status = getInstanceStatus(metaDataEnity); InstanceTemplate instanceTemplate = stackToCloudStackConverter.buildInstanceTemplate( template, group.getGroupName(), metaDataEnity.getPrivateId(), status); return new CloudInstance(metaDataEnity.getInstanceId(), instanceTemplate); }
private void deleteResourceAndDependencies(Stack stack, InstanceMetaData instanceMetaData) { LOGGER.info(String.format("Instance %s rollback started.", instanceMetaData.getInstanceId())); CloudPlatformConnector cloudPlatformConnector = cloudPlatformConnectors.get(stack.cloudPlatform()); Set<String> instanceIds = new HashSet<>(); instanceIds.add(instanceMetaData.getInstanceId()); cloudPlatformConnector.removeInstances( stack, instanceIds, instanceMetaData.getInstanceGroup().getGroupName()); LOGGER.info( String.format( "Instance deleted with %s id and %s name.", instanceMetaData.getId(), instanceMetaData.getInstanceId())); }
private void deleteInstanceResourceFromDatabase(Stack stack, InstanceMetaData instanceMetaData) { MetadataSetup metadataSetup = metadataSetups.get(stack.cloudPlatform()); ResourceType instanceResourceType = metadataSetup.getInstanceResourceType(); Resource resource = resourceRepository.findByStackIdAndNameAndType( stack.getId(), instanceMetaData.getInstanceId(), instanceResourceType); if (resource != null) { resourceRepository.delete(resource.getId()); } }
@Test public void testUpdateHostsForDownscaleWhenRemainingSpaceIsNotEnough() throws Exception { HostGroupAdjustmentJson json = new HostGroupAdjustmentJson(); json.setHostGroup("slave_1"); json.setScalingAdjustment(-1); AmbariClient ambariClient = mock(AmbariClient.class); HostMetadata metadata1 = mock(HostMetadata.class); InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class); HostMetadata metadata2 = mock(HostMetadata.class); InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class); HostMetadata metadata3 = mock(HostMetadata.class); InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class); Set<HostMetadata> hostsMetaData = new HashSet<>(); List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3); hostsMetaData.addAll(hostsMetadataList); HostGroup hostGroup = new HostGroup(); hostGroup.setHostMetadata(hostsMetaData); hostGroup.setName("slave_1"); Map<String, Map<Long, Long>> dfsSpace = new HashMap<>(); dfsSpace.put("node2", singletonMap(5_000L, 15_000L)); dfsSpace.put("node1", singletonMap(10_000L, 10_000L)); dfsSpace.put("node3", singletonMap(6_000L, 20_000L)); when(metadata1.getHostName()).thenReturn("node1"); when(metadata2.getHostName()).thenReturn("node2"); when(metadata3.getHostName()).thenReturn("node3"); when(instanceMetaData1.getAmbariServer()).thenReturn(false); when(instanceMetaData2.getAmbariServer()).thenReturn(false); when(instanceMetaData3.getAmbariServer()).thenReturn(false); when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1")) .thenReturn(hostGroup); when(ambariClientProvider.getAmbariClient( any(TLSClientConfig.class), any(String.class), any(String.class))) .thenReturn(ambariClient); when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")) .thenReturn(singletonMap("DATANODE", "SLAVE")); when(configurationService.getConfiguration(ambariClient, "slave_1")) .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "1")); when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1")) .thenReturn(hostsMetadataList); when(ambariClient.getBlueprintMap(cluster.getBlueprint().getBlueprintName())) .thenReturn(singletonMap("slave_1", asList("DATANODE"))); when(ambariClient.getDFSSpace()).thenReturn(dfsSpace); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1")) .thenReturn(instanceMetaData1); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2")) .thenReturn(instanceMetaData2); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3")) .thenReturn(instanceMetaData3); Exception result = null; try { underTest.updateHosts(stack.getId(), json); } catch (BadRequestException e) { result = e; } assertEquals( "Trying to move '10000' bytes worth of data to nodes with '11000' bytes of capacity is not allowed", result.getMessage()); }
@Override public Set<String> removeInstances( Stack stack, String gateWayUserData, String coreUserData, Set<String> instanceIds, String instanceGroup) { LOGGER.debug("Assembling downscale stack event for stack: {}", stack); CloudContext cloudContext = new CloudContext(stack); CloudCredential cloudCredential = credentialConverter.convert(stack.getCredential()); List<CloudResource> resources = cloudResourceConverter.convert(stack.getResources()); List<CloudInstance> instances = new ArrayList<>(); InstanceGroup group = stack.getInstanceGroupByInstanceGroupName(instanceGroup); for (InstanceMetaData metaData : group.getAllInstanceMetaData()) { if (instanceIds.contains(metaData.getInstanceId())) { CloudInstance cloudInstance = metadataConverter.convert(metaData); instances.add(cloudInstance); } } CloudStack cloudStack = cloudStackConverter.convert(stack, coreUserData, gateWayUserData, instanceIds); DownscaleStackRequest<DownscaleStackResult> downscaleRequest = new DownscaleStackRequest<>( cloudContext, cloudCredential, cloudStack, resources, instances); LOGGER.info("Triggering downscale stack event: {}", downscaleRequest); eventBus.notify(downscaleRequest.selector(), Event.wrap(downscaleRequest)); try { DownscaleStackResult res = downscaleRequest.await(); LOGGER.info("Downscale stack result: {}", res); if (res.getStatus().equals(EventStatus.FAILED)) { LOGGER.error("Failed to downscale the stack", res.getErrorDetails()); throw new OperationException(res.getErrorDetails()); } return instanceIds; } catch (InterruptedException e) { LOGGER.error("Error while downscaling the stack", e); throw new OperationException(e); } }
@Test public void testUpdateHostsForDownscaleSelectNodesWithLessData() throws ConnectException, CloudbreakSecuritySetupException { HostGroupAdjustmentJson json = new HostGroupAdjustmentJson(); json.setHostGroup("slave_1"); json.setScalingAdjustment(-1); AmbariClient ambariClient = mock(AmbariClient.class); HostMetadata metadata1 = mock(HostMetadata.class); InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class); HostMetadata metadata2 = mock(HostMetadata.class); InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class); HostMetadata metadata3 = mock(HostMetadata.class); InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class); Set<HostMetadata> hostsMetaData = new HashSet<>(); List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3); hostsMetaData.addAll(hostsMetadataList); HostGroup hostGroup = new HostGroup(); hostGroup.setHostMetadata(hostsMetaData); hostGroup.setName("slave_1"); Map<String, Map<Long, Long>> dfsSpace = new HashMap<>(); dfsSpace.put("node2", singletonMap(85_000L, 15_000L)); dfsSpace.put("node1", singletonMap(90_000L, 10_000L)); dfsSpace.put("node3", singletonMap(80_000L, 20_000L)); when(metadata1.getHostName()).thenReturn("node1"); when(metadata2.getHostName()).thenReturn("node2"); when(metadata3.getHostName()).thenReturn("node3"); when(instanceMetaData1.getAmbariServer()).thenReturn(false); when(instanceMetaData2.getAmbariServer()).thenReturn(false); when(instanceMetaData3.getAmbariServer()).thenReturn(false); when(hostGroupRepository.findHostGroupInClusterByName(cluster.getId(), "slave_1")) .thenReturn(hostGroup); when(ambariClientProvider.getAmbariClient( any(TLSClientConfig.class), any(String.class), any(String.class))) .thenReturn(ambariClient); when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")) .thenReturn(singletonMap("DATANODE", "SLAVE")); when(configurationService.getConfiguration(ambariClient, "slave_1")) .thenReturn(singletonMap(ConfigParam.DFS_REPLICATION.key(), "1")); when(hostFilterService.filterHostsForDecommission(stack, hostsMetaData, "slave_1")) .thenReturn(hostsMetadataList); when(ambariClient.getBlueprintMap(cluster.getBlueprint().getBlueprintName())) .thenReturn(singletonMap("slave_1", asList("DATANODE"))); when(ambariClient.getDFSSpace()).thenReturn(dfsSpace); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1")) .thenReturn(instanceMetaData1); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2")) .thenReturn(instanceMetaData2); when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3")) .thenReturn(instanceMetaData3); doNothing().when(flowManager).triggerClusterDownscale(anyObject()); underTest.updateHosts(stack.getId(), json); verify(flowManager, times(1)).triggerClusterDownscale(anyObject()); }
private InstanceStatus getInstanceStatus(InstanceMetaData metaData) { switch (metaData.getInstanceStatus()) { case REQUESTED: return InstanceStatus.CREATE_REQUESTED; case CREATED: return InstanceStatus.CREATED; case UNREGISTERED: case REGISTERED: return InstanceStatus.STARTED; case DECOMMISSIONED: return InstanceStatus.DELETE_REQUESTED; case TERMINATED: return InstanceStatus.TERMINATED; default: return InstanceStatus.UNKNOWN; } }
public void terminateFailedNodes( ContainerOrchestrator orchestrator, Stack stack, GatewayConfig gatewayConfig, Set<Node> nodes) throws CloudbreakOrchestratorFailedException { List<String> allAvailableNode = orchestrator.getAvailableNodes(gatewayConfig, nodes); List<Node> missingNodes = selectMissingNodes(nodes, allAvailableNode); if (missingNodes.size() > 0) { String message = String.format( "Bootstrap failed on %s nodes. These nodes will be terminated.", missingNodes.size()); LOGGER.info(message); eventService.fireCloudbreakEvent(stack.getId(), Status.UPDATE_IN_PROGRESS.name(), message); for (Node missingNode : missingNodes) { InstanceMetaData instanceMetaData = instanceMetaDataRepository.findNotTerminatedByPrivateAddress( stack.getId(), missingNode.getPrivateIp()); InstanceGroup ig = instanceGroupRepository.findOneByGroupNameInStack( stack.getId(), instanceMetaData.getInstanceGroup().getGroupName()); ig.setNodeCount(ig.getNodeCount() - 1); if (ig.getNodeCount() < 1) { throw new CloudbreakOrchestratorFailedException( String.format( "%s instancegroup nodecount was lower than 1 cluster creation failed.", ig.getGroupName())); } instanceGroupRepository.save(ig); message = String.format( "Delete '%s' node. and Decrease the nodecount on %s instancegroup", instanceMetaData.getInstanceId(), ig.getGroupName()); LOGGER.info(message); eventService.fireCloudbreakEvent(stack.getId(), Status.UPDATE_IN_PROGRESS.name(), message); deleteResourceAndDependencies(stack, instanceMetaData); deleteInstanceResourceFromDatabase(stack, instanceMetaData); instanceMetaData.setInstanceStatus(InstanceStatus.TERMINATED); instanceMetaDataRepository.save(instanceMetaData); LOGGER.info( String.format( "The status of instanceMetadata with %s id and %s name setted to TERMINATED.", instanceMetaData.getId(), instanceMetaData.getInstanceId())); } } }