@Override public void handleStart(Operation start) { ServiceUtils.logInfo(this, "Starting service %s", getSelfLink()); try { // Initialize the task state. State s = start.getBody(State.class); if (s.taskInfo == null || s.taskInfo.stage == TaskState.TaskStage.CREATED) { s.taskInfo = new TaskState(); s.taskInfo.stage = TaskState.TaskStage.STARTED; s.taskInfo.subStage = TaskState.SubStage.UPDATE_DATASTORE_COUNTS; } if (s.documentExpirationTimeMicros <= 0) { s.documentExpirationTimeMicros = ServiceUtils.computeExpirationTime(ServiceUtils.DEFAULT_DOC_EXPIRATION_TIME); } if (s.queryPollDelay == null) { s.queryPollDelay = DEFAULT_QUERY_POLL_DELAY; } validateState(s); start.setBody(s).complete(); sendStageProgressPatch(s, s.taskInfo.stage, s.taskInfo.subStage); } catch (Throwable e) { ServiceUtils.logSevere(this, e); if (!OperationUtils.isCompleted(start)) { start.fail(e); } } }
/** Handle service periodic maintenance calls. */ @Override public void handleMaintenance(Operation post) { post.complete(); Operation.CompletionHandler handler = (Operation op, Throwable failure) -> { if (null != failure) { // query failed so abort and retry next time logFailure(failure); return; } NodeSelectorService.SelectOwnerResponse rsp = op.getBody(NodeSelectorService.SelectOwnerResponse.class); if (!getHost().getId().equals(rsp.ownerNodeId)) { ServiceUtils.logInfo( TaskTriggerService.this, "Host[%s]: Not owner of scheduler [%s] (Owner Info [%s])", getHost().getId(), getSelfLink(), Utils.toJson(rsp)); return; } State state = new State(); sendSelfPatch(state); }; Operation selectOwnerOp = Operation.createPost(null) .setExpiration(ServiceUtils.computeExpirationTime(OWNER_SELECTION_TIMEOUT_MILLIS)) .setCompletion(handler); getHost().selectOwner(null, getSelfLink(), selectOwnerOp); }
@Override public void handleStart(Operation start) { ServiceUtils.logInfo(this, "Starting service %s", getSelfLink()); State startState = start.getBody(State.class); InitializationUtils.initialize(startState); if (null == startState.queryCreateVmTaskInterval) { startState.queryCreateVmTaskInterval = HostUtils.getDeployerContext(this).getTaskPollDelay(); } validateState(startState); if (TaskState.TaskStage.CREATED == startState.taskState.stage) { startState.taskState.stage = TaskState.TaskStage.STARTED; } start.setBody(startState).complete(); try { if (ControlFlags.isOperationProcessingDisabled(startState.controlFlags)) { ServiceUtils.logInfo(this, "Skipping patch handling (disabled)"); } else if (TaskState.TaskStage.STARTED == startState.taskState.stage) { TaskUtils.sendSelfPatch(this, buildPatch(startState.taskState.stage, (Throwable) null)); } } catch (Throwable t) { failTask(t); } }
/** * Applies patch to current document state. * * @param currentState * @param patchState */ protected void applyPatch(State currentState, State patchState) { if (patchState.taskInfo != null) { if (patchState.taskInfo.stage != currentState.taskInfo.stage || patchState.taskInfo.subStage != currentState.taskInfo.subStage) { ServiceUtils.logInfo( this, "moving stage to %s:%s", patchState.taskInfo.stage, patchState.taskInfo.subStage); } if (patchState.taskInfo.subStage != null) { adjustStat(patchState.taskInfo.subStage.toString(), 1); } currentState.taskInfo = patchState.taskInfo; } if (patchState.dataStoreCount != null) { currentState.dataStoreCount = patchState.dataStoreCount; } if (patchState.finishedCopies != null) { currentState.finishedCopies = patchState.finishedCopies; } if (patchState.failedOrCanceledCopies != null) { currentState.failedOrCanceledCopies = patchState.failedOrCanceledCopies; } }
/** * Patch operation handler. Implements all logic to drive our state machine. * * @param patch */ @Override public void handlePatch(Operation patch) { State currentState = getState(patch); State patchState = patch.getBody(State.class); try { validatePatch(currentState, patchState); applyPatch(currentState, patchState); validateState(currentState); patch.complete(); switch (currentState.taskInfo.stage) { case STARTED: handleStartedStage(currentState, patchState); break; case FAILED: case FINISHED: case CANCELLED: break; default: throw new IllegalStateException( String.format("Invalid stage %s", currentState.taskInfo.stage)); } } catch (Throwable e) { ServiceUtils.logSevere(this, e); if (!OperationUtils.isCompleted(patch)) { patch.fail(e); } } }
@Test public void testPrepareVmDiskOperation() throws Throwable { com.vmware.photon.controller.api.Operation operation = com.vmware.photon.controller.api.Operation.DETACH_DISK; DiskService.State diskState = new DiskService.State(); diskState.name = "test-vm-disk-1"; diskState.projectId = projectId; diskState.flavorId = flavorDcpBackend.getEntityByNameAndKind("core-100", PersistentDisk.KIND).getId(); diskState.capacityGb = 64; diskState.diskType = DiskType.PERSISTENT; diskState.state = DiskState.ATTACHED; Operation result = dcpClient.post(DiskServiceFactory.SELF_LINK, diskState); DiskService.State createdDiskState = result.getBody(DiskService.State.class); String diskId = ServiceUtils.getIDFromDocumentSelfLink(createdDiskState.documentSelfLink); List<String> disks = new ArrayList<>(); disks.add(diskId); TaskEntity task = vmDcpBackend.prepareVmDiskOperation(vmId, disks, operation); assertThat(task, is(notNullValue())); assertThat(task.getState(), is(TaskEntity.State.QUEUED)); assertThat(task.getSteps().size(), is(1)); assertThat(task.getSteps().get(0).getOperation(), is(operation)); try { vmDcpBackend.prepareVmDiskOperation( vmId, disks, com.vmware.photon.controller.api.Operation.MOCK_OP); fail("vmDcpBackend.prepareVmOperation with invalid operation should have failed"); } catch (NotImplementedException e) { // do nothing } }
@BeforeMethod public void setUp() throws Throwable { commonHostAndClientSetup(basicServiceHost, apiFeDcpRestClient); commonDataSetup( tenantDcpBackend, resourceTicketDcpBackend, projectDcpBackend, flavorDcpBackend, flavorLoader); vm = new VmService.State(); vm.name = UUID.randomUUID().toString(); flavorEntity = flavorDcpBackend.getEntityByNameAndKind("core-100", Vm.KIND); vm.flavorId = flavorEntity.getId(); vm.imageId = UUID.randomUUID().toString(); vm.projectId = projectId; vm.vmState = VmState.CREATING; vm.affinities = new ArrayList<>(); vm.affinities.add(new LocalitySpec("id1", "kind1")); Iso iso = new Iso(); iso.setName(UUID.randomUUID().toString()); iso.setSize(-1L); vm.isos = new ArrayList<>(); vm.isos.add(iso); vm.metadata = new HashMap<>(); vm.metadata.put("key1", UUID.randomUUID().toString()); vm.networks = new ArrayList<>(); vm.networks.add(UUID.randomUUID().toString()); vm.agent = UUID.randomUUID().toString(); vm.host = UUID.randomUUID().toString(); vm.datastore = UUID.randomUUID().toString(); vm.datastoreName = UUID.randomUUID().toString(); vm.tags = new HashSet<>(); vm.tags.add("namespace1:predicate1=value1"); vm.tags.add("namespace2:predicate2=value2"); Operation result = dcpClient.post(VmServiceFactory.SELF_LINK, vm); createdVm = result.getBody(VmService.State.class); vmId = ServiceUtils.getIDFromDocumentSelfLink(createdVm.documentSelfLink); DeploymentCreateSpec deploymentCreateSpec = new DeploymentCreateSpec(); deploymentCreateSpec.setImageDatastores(Collections.singleton(UUID.randomUUID().toString())); TaskEntity deploymentTask = deploymentBackend.prepareCreateDeployment(deploymentCreateSpec); HostCreateSpec hostCreateSpec = new HostCreateSpec(); hostCreateSpec.setAddress(vm.host); hostCreateSpec.setUsageTags(ImmutableList.of(UsageTag.CLOUD)); hostCreateSpec.setUsername(UUID.randomUUID().toString()); hostCreateSpec.setPassword(UUID.randomUUID().toString()); TaskEntity hostTask = hostBackend.prepareHostCreate(hostCreateSpec, deploymentTask.getEntityId()); hostId = hostTask.getEntityId(); }
@Override public void handleStart(Operation startOperation) { ServiceUtils.logInfo(this, "Starting service %s", getSelfLink()); State startState = startOperation.getBody(State.class); InitializationUtils.initialize(startState); ValidationUtils.validateState(startState); startOperation.complete(); }
/** * This method performs document state updates in response to an operation which sets the state to * STARTED. * * @param currentState Supplies the current state object. * @param vmState Supplies the state object of the VmService entity. * @param hostState Supplies the state object of the HostService entity. * @param projectState Supplies the state object of the ProjectService entity. * @param imageState Supplies the state object of the Image Service entity. * @param projectState Supplies the state object of the Project Service entity. * @param vmFlavorState Supplies the state object of the FlavorService entity. * @param diskFlavorState Supplies the state object of the FlavorService entity. */ private void processStartedStage( final State currentState, final VmService.State vmState, final HostService.State hostState, final ImageService.State imageState, final ProjectService.State projectState, final FlavorService.State vmFlavorState, final FlavorService.State diskFlavorState) throws IOException { final Service service = this; FutureCallback<CreateVmTaskService.State> callback = new FutureCallback<CreateVmTaskService.State>() { @Override public void onSuccess(@Nullable CreateVmTaskService.State result) { switch (result.taskState.stage) { case FINISHED: updateVmId(currentState, result.vmId); break; case CANCELLED: TaskUtils.sendSelfPatch( service, buildPatch(TaskState.TaskStage.CANCELLED, (ServiceErrorResponse) null)); break; case FAILED: ServiceErrorResponse failed = new ServiceErrorResponse(); failed.message = String.format( "CreateVmTaskService failed to create the vm. documentSelfLink: %s. failureReason: %s", result.documentSelfLink, result.taskState.failure.message); TaskUtils.sendSelfPatch(service, buildPatch(TaskState.TaskStage.FAILED, failed)); break; } } @Override public void onFailure(Throwable t) { failTask(t); } }; CreateVmTaskService.State startState = new CreateVmTaskService.State(); startState.projectId = ServiceUtils.getIDFromDocumentSelfLink(projectState.documentSelfLink); startState.vmCreateSpec = composeVmCreateSpec(vmState, hostState, imageState, vmFlavorState, diskFlavorState); startState.queryCreateVmTaskInterval = currentState.queryCreateVmTaskInterval; TaskUtils.startTaskAsync( this, CreateVmTaskFactoryService.SELF_LINK, startState, (state) -> TaskUtils.finalTaskStages.contains(state.taskState.stage), CreateVmTaskService.State.class, HostUtils.getDeployerContext(this).getTaskPollDelay(), callback); }
/** * This function creates a set of ImageCopyService instances parented to the current service * instance. * * @param targetDataStoreSet * @param current * @return The number of batches created. */ private void triggerCopyServices(Set<Datastore> targetDataStoreSet, State current) { if (targetDataStoreSet.isEmpty()) { ServiceUtils.logInfo(this, "No copies to trigger!"); return; } for (Datastore targetDataStore : targetDataStoreSet) { triggerCopyService(current, targetDataStore.getId()); } }
@Override public void handlePatch(Operation patchOperation) { ServiceUtils.logInfo(this, "Handling patch for service %s", getSelfLink()); State currentState = getState(patchOperation); State patchState = patchOperation.getBody(State.class); ValidationUtils.validatePatch(currentState, patchState); PatchUtils.patchState(currentState, patchState); ValidationUtils.validateState(currentState); patchOperation.complete(); }
@Override public void handlePatch(Operation patchOperation) { ServiceUtils.logInfo(this, "Handling patch for service %s", getSelfLink()); State startState = getState(patchOperation); State patchState = patchOperation.getBody(State.class); validatePatchState(startState, patchState); State currentState = applyPatch(startState, patchState); validateState(currentState); patchOperation.complete(); try { if (ControlFlags.isOperationProcessingDisabled(currentState.controlFlags)) { ServiceUtils.logInfo(this, "Skipping patch operation processing (disabled)"); } else if (TaskState.TaskStage.STARTED == currentState.taskState.stage) { createTenant(currentState); } } catch (Throwable t) { failTask(t); } }
@Override public void handlePatch(Operation patch) { ServiceUtils.logInfo(this, "Handling patch for service %s", getSelfLink()); KubernetesClusterCreateTask currentState = getState(patch); KubernetesClusterCreateTask patchState = patch.getBody(KubernetesClusterCreateTask.class); validatePatchState(currentState, patchState); PatchUtils.patchState(currentState, patchState); validateStartState(currentState); patch.complete(); try { if (ControlFlags.isOperationProcessingDisabled(currentState.controlFlags)) { ServiceUtils.logInfo(this, "Skipping patch handling (disabled)"); } else if (TaskState.TaskStage.STARTED == currentState.taskState.stage) { processStateMachine(currentState); } } catch (Throwable t) { failTask(t); } }
/** * This method applies a patch to a state object. * * @param startState Supplies the start state object. * @param patchState Supplies the patch state object. */ private State applyPatch(State startState, State patchState) { if (patchState.taskState != null) { if (patchState.taskState.stage != startState.taskState.stage) { ServiceUtils.logInfo(this, "Moving to stage %s", patchState.taskState.stage); } startState.taskState = patchState.taskState; } return startState; }
private State applyPatch(State currentState, State patchState) { if (patchState.taskState.stage != currentState.taskState.stage) { ServiceUtils.logInfo(this, "Moving to stage %s", patchState.taskState.stage); currentState.taskState = patchState.taskState; } if (patchState.tenantServiceLink != null) { currentState.tenantServiceLink = patchState.tenantServiceLink; } return currentState; }
@Override public void handleStart(Operation start) { ServiceUtils.logInfo(this, "Starting service %s", getSelfLink()); State s = start.getBody(State.class); this.initializeState(s); this.validateState(s); // set the maintenance interval to match the value in the state. this.setMaintenanceIntervalMicros(TimeUnit.MILLISECONDS.toMicros(s.triggerIntervalMillis)); start.complete(); }
/** * This method creates a VmCreateSpec object for creating an VM. * * @param vmState Supplies the state object of the VmService entity. * @param hostState Supplies the state object of the HostService entity. * @param imageState Supplies the state object of the ImageService entity. * @param vmFlavorState Supplies the state object of the FlavorService entity. * @param diskFlavorState Supplies the state object of the FlavorService entity. * @return Returns the VmCreateSpec object. */ private VmCreateSpec composeVmCreateSpec( final VmService.State vmState, final HostService.State hostState, final ImageService.State imageState, final FlavorService.State vmFlavorState, final FlavorService.State diskFlavorState) { // Craft the VM creation spec. VmCreateSpec spec = new VmCreateSpec(); spec.setName(vmState.name); spec.setFlavor(vmFlavorState.name); spec.setSourceImageId(ServiceUtils.getIDFromDocumentSelfLink(imageState.documentSelfLink)); List<AttachedDiskCreateSpec> attachedDisks = new ArrayList<>(); AttachedDiskCreateSpec bootDisk = new AttachedDiskCreateSpec(); bootDisk.setName(vmState.name + "-bootdisk"); bootDisk.setBootDisk(true); bootDisk.setFlavor(diskFlavorState.name); bootDisk.setKind(EphemeralDisk.KIND); attachedDisks.add(bootDisk); spec.setAttachedDisks(attachedDisks); Map<String, String> environment = new HashMap<>(); spec.setEnvironment(environment); List<LocalitySpec> affinities = new ArrayList<>(); LocalitySpec hostSpec = new LocalitySpec(); hostSpec.setId(hostState.hostAddress); hostSpec.setKind("host"); affinities.add(hostSpec); LocalitySpec datastoreSpec = new LocalitySpec(); datastoreSpec.setId( hostState.metadata.get(HostService.State.METADATA_KEY_NAME_MANAGEMENT_DATASTORE)); datastoreSpec.setKind("datastore"); affinities.add(datastoreSpec); LocalitySpec portGroupSpec = new LocalitySpec(); portGroupSpec.setId( hostState.metadata.get(HostService.State.METADATA_KEY_NAME_MANAGEMENT_PORTGROUP)); portGroupSpec.setKind("portGroup"); affinities.add(portGroupSpec); spec.setAffinities(affinities); return spec; }
/** * Does any additional processing after the patch operation has been completed. * * @param current */ private void processPatch(final State current) { try { Type stateType = Class.forName(current.triggerStateClassName); ServiceDocument postState = Utils.fromJson(current.serializedTriggerState, stateType); postState.documentExpirationTimeMicros = ServiceUtils.computeExpirationTime(current.taskExpirationAgeMillis); Operation post = Operation.createPost(UriUtils.buildUri(getHost(), current.factoryServiceLink)) .setBody(postState); this.sendRequest(post); } catch (ClassNotFoundException ex) { logFailure(ex); } }
@Override public void handleStart(Operation start) { ServiceUtils.logInfo(this, "Starting service %s", getSelfLink()); KubernetesClusterCreateTask startState = start.getBody(KubernetesClusterCreateTask.class); InitializationUtils.initialize(startState); validateStartState(startState); if (startState.taskState.stage == TaskState.TaskStage.CREATED) { startState.taskState.stage = TaskState.TaskStage.STARTED; startState.taskState.subStage = TaskState.SubStage.ALLOCATE_RESOURCES; } start.setBody(startState).complete(); try { if (ControlFlags.isOperationProcessingDisabled(startState.controlFlags)) { ServiceUtils.logInfo(this, "Skipping start operation processing (disabled)"); } else if (TaskState.TaskStage.STARTED == startState.taskState.stage) { TaskUtils.sendSelfPatch( this, buildPatch(startState.taskState.stage, TaskState.SubStage.ALLOCATE_RESOURCES)); } } catch (Throwable t) { failTask(t); } }
private void failTaskAndPatchDocument( final KubernetesClusterCreateTask currentState, final NodeType nodeType, final Throwable throwable) { ServiceUtils.logSevere(this, throwable); KubernetesClusterCreateTask patchState = buildPatch( TaskState.TaskStage.FAILED, null, new IllegalStateException( String.format( "Failed to rollout %s. Error: %s", nodeType.toString(), throwable.toString()))); ClusterService.State document = new ClusterService.State(); document.clusterState = ClusterState.ERROR; updateStates(currentState, patchState, document); }
/** * This method queries the list of data stores available in this ESX cloud instance and, on query * completion, creates a set of ImageCopyService instances and transitions the current service * instance to the AWAIT_COMPLETION sub-state. * * @param current */ protected void handleTriggerCopies(final State current) { try { Set<Datastore> datastoreSet = getZookeeperHostMonitor().getAllDatastores(); ServiceUtils.logInfo(this, "All target datastores: %s", Utils.toJson(datastoreSet)); triggerCopyServices(datastoreSet, current); // move to next stage if (!current.isSelfProgressionDisabled) { State patch = ImageReplicatorService.this.buildPatch( TaskState.TaskStage.STARTED, TaskState.SubStage.AWAIT_COMPLETION, null); patch.dataStoreCount = datastoreSet.size(); sendSelfPatch(patch); } } catch (Exception e) { failTask(e); } }
private void createTestEnvironment() throws Throwable { ZookeeperClientFactory zkFactory = mock(ZookeeperClientFactory.class); sourceEnvironment = new TestEnvironment.Builder() .listeningExecutorService(listeningExecutorService) .apiClientFactory(apiClientFactory) .cloudServerSet(sourceCloudStore.getServerSet()) .hostCount(1) .build(); destinationEnvironment = new TestEnvironment.Builder() .hostCount(1) .apiClientFactory(apiClientFactory) .cloudServerSet(destinationCloudStore.getServerSet()) .httpFileServiceClientFactory(httpFileServiceClientFactory) .zookeeperServersetBuilderFactory(zkFactory) .build(); ZookeeperClient zkBuilder = mock(ZookeeperClient.class); doReturn(zkBuilder).when(zkFactory).create(); doReturn( Collections.singleton( new InetSocketAddress( "127.0.0.1", sourceEnvironment.getHosts()[0].getState().httpPort))) .when(zkBuilder) .getServers(Matchers.startsWith("127.0.0.1:2181"), eq("cloudstore")); ServiceHost sourceHost = sourceEnvironment.getHosts()[0]; startState.sourceLoadBalancerAddress = sourceHost.getPublicUri().toString(); TestHelper.createHostService(sourceCloudStore, Collections.singleton(UsageTag.MGMT.name())); TestHelper.createHostService(sourceCloudStore, Collections.singleton(UsageTag.CLOUD.name())); DeploymentService.State deploymentService = TestHelper.createDeploymentService(destinationCloudStore); startState.destinationDeploymentId = ServiceUtils.getIDFromDocumentSelfLink(deploymentService.documentSelfLink); }
private void failTask(Throwable e) { ServiceUtils.logSevere(this, e); TaskUtils.sendSelfPatch(this, buildPatch(TaskState.TaskStage.FAILED, null, e)); }
private static void commonVmAndImageSetup( VmDcpBackend vmDcpBackend, NetworkDcpBackend networkDcpBackend) throws Throwable { AttachedDiskCreateSpec disk1 = new AttachedDiskCreateSpecBuilder().name("disk1").flavor("core-100").bootDisk(true).build(); AttachedDiskCreateSpec disk2 = new AttachedDiskCreateSpecBuilder().name("disk2").flavor("core-200").capacityGb(10).build(); List<LocalitySpec> affinities = new ArrayList<>(); affinities.add(new LocalitySpec("disk-id1", "disk")); affinities.add(new LocalitySpec("disk-id2", "disk")); ImageService.State imageServiceState = new ImageService.State(); imageServiceState.name = "image-1"; imageServiceState.state = ImageState.READY; imageServiceState.size = 1024L * 1024L; imageServiceState.replicationType = ImageReplicationType.EAGER; imageServiceState.imageSettings = new ArrayList<>(); ImageService.State.ImageSetting imageSetting = new ImageService.State.ImageSetting(); imageSetting.name = "n1"; imageSetting.defaultValue = "v1"; imageServiceState.imageSettings.add(imageSetting); imageSetting = new ImageService.State.ImageSetting(); imageSetting.name = "n2"; imageSetting.defaultValue = "v2"; imageServiceState.imageSettings.add(imageSetting); Operation result = dcpClient.post(ImageServiceFactory.SELF_LINK, imageServiceState); createdImageState = result.getBody(ImageService.State.class); imageId = ServiceUtils.getIDFromDocumentSelfLink(createdImageState.documentSelfLink); vmCreateSpec = new VmCreateSpec(); vmCreateSpec.setName("test-vm"); vmCreateSpec.setFlavor("core-100"); vmCreateSpec.setSourceImageId(imageId); vmCreateSpec.setAttachedDisks(ImmutableList.of(disk1, disk2)); vmCreateSpec.setAffinities(affinities); vmCreateSpec.setTags(ImmutableSet.of("value1", "value2")); List<String> networks = new ArrayList<>(); List<String> portGroups = new ArrayList<>(); portGroups.add("p1"); NetworkCreateSpec networkCreateSpec = new NetworkCreateSpec(); networkCreateSpec.setName("n1"); networkCreateSpec.setPortGroups(portGroups); TaskEntity networkTask = networkDcpBackend.createNetwork(networkCreateSpec); networks.add(networkTask.getEntityId()); portGroups = new ArrayList<>(); portGroups.add("p2"); networkCreateSpec.setName("n2"); networkCreateSpec.setPortGroups(portGroups); networkTask = networkDcpBackend.createNetwork(networkCreateSpec); networks.add(networkTask.getEntityId()); vmCreateSpec.setNetworks(networks); createdVmTaskEntity = vmDcpBackend.prepareVmCreate(projectId, vmCreateSpec); }
private void sendStageProgressPatch(final TaskState.TaskStage stage) { ServiceUtils.logInfo(this, "Sending stage progress patch %s", stage); TaskUtils.sendSelfPatch(this, buildPatch(stage, null)); }
private void failTask(Map<Long, Throwable> exs) { exs.values().forEach(e -> ServiceUtils.logSevere(this, e)); TaskUtils.sendSelfPatch( this, buildPatch(TaskState.TaskStage.FAILED, exs.values().iterator().next())); }
/** * Log failed query. * * @param e */ private void logFailure(Throwable e) { ServiceUtils.logSevere(this, e); }