private void handle(APIChangeDiskOfferingStateMsg msg) { DiskOfferingStateEvent sevt = DiskOfferingStateEvent.valueOf(msg.getStateEvent()); if (sevt == DiskOfferingStateEvent.disable) { self.setState(DiskOfferingState.Disabled); } else { self.setState(DiskOfferingState.Enabled); } self = dbf.updateAndRefresh(self); DiskOfferingInventory inv = DiskOfferingInventory.valueOf(self); APIChangeDiskOfferingStateEvent evt = new APIChangeDiskOfferingStateEvent(msg.getId()); evt.setInventory(inv); bus.publish(evt); }
private void handle(APIUpdateDiskOfferingMsg msg) { boolean update = false; if (msg.getName() != null) { self.setName(msg.getName()); update = true; } if (msg.getDescription() != null) { self.setDescription(msg.getDescription()); update = true; } if (update) { self = dbf.updateAndRefresh(self); } APIUpdateDiskOfferingEvent evt = new APIUpdateDiskOfferingEvent(msg.getId()); evt.setInventory(DiskOfferingInventory.valueOf(self)); bus.publish(evt); }
private void handle(APIDeleteDiskOfferingMsg msg) { final APIDeleteDiskOfferingEvent evt = new APIDeleteDiskOfferingEvent(msg.getId()); final String issuer = DiskOfferingVO.class.getSimpleName(); final List<DiskOfferingInventory> ctx = DiskOfferingInventory.valueOf(Arrays.asList(self)); FlowChain chain = FlowChainBuilder.newSimpleFlowChain(); chain.setName(String.format("delete-disk-offering-%s", msg.getUuid())); if (msg.getDeletionMode() == APIDeleteMessage.DeletionMode.Permissive) { chain .then( new NoRollbackFlow() { @Override public void run(final FlowTrigger trigger, Map data) { casf.asyncCascade( CascadeConstant.DELETION_CHECK_CODE, issuer, ctx, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }) .then( new NoRollbackFlow() { @Override public void run(final FlowTrigger trigger, Map data) { casf.asyncCascade( CascadeConstant.DELETION_DELETE_CODE, issuer, ctx, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); } else { chain.then( new NoRollbackFlow() { @Override public void run(final FlowTrigger trigger, Map data) { casf.asyncCascade( CascadeConstant.DELETION_FORCE_DELETE_CODE, issuer, ctx, new Completion(trigger) { @Override public void success() { trigger.next(); } @Override public void fail(ErrorCode errorCode) { trigger.fail(errorCode); } }); } }); } chain .done( new FlowDoneHandler(msg) { @Override public void handle(Map data) { casf.asyncCascadeFull( CascadeConstant.DELETION_CLEANUP_CODE, issuer, ctx, new NopeCompletion()); bus.publish(evt); } }) .error( new FlowErrorHandler(msg) { @Override public void handle(ErrorCode errCode, Map data) { evt.setErrorCode( errf.instantiateErrorCode(SysErrors.DELETE_RESOURCE_ERROR, errCode)); bus.publish(evt); } }) .start(); }
@Override protected void startVmFromNewCreate( final StartNewCreatedVmInstanceMsg msg, final SyncTaskChain taskChain) { boolean callNext = true; try { refreshVO(); ErrorCode allowed = validateOperationByState(msg, self.getState(), null); if (allowed != null) { bus.replyErrorByMessageType(msg, allowed); return; } ErrorCode preCreated = extEmitter.preStartNewCreatedVm(msg.getVmInstanceInventory()); if (preCreated != null) { bus.replyErrorByMessageType(msg, preCreated); return; } StartNewCreatedApplianceVmMsg smsg = (StartNewCreatedApplianceVmMsg) msg; ApplianceVmSpec aspec = smsg.getApplianceVmSpec(); final VmInstanceSpec spec = new VmInstanceSpec(); spec.setVmInventory(msg.getVmInstanceInventory()); if (msg.getL3NetworkUuids() != null && !msg.getL3NetworkUuids().isEmpty()) { SimpleQuery<L3NetworkVO> nwquery = dbf.createQuery(L3NetworkVO.class); nwquery.add(L3NetworkVO_.uuid, SimpleQuery.Op.IN, msg.getL3NetworkUuids()); List<L3NetworkVO> vos = nwquery.list(); List<L3NetworkInventory> nws = L3NetworkInventory.valueOf(vos); spec.setL3Networks(nws); } else { spec.setL3Networks(new ArrayList<L3NetworkInventory>(0)); } if (msg.getDataDiskOfferingUuids() != null && !msg.getDataDiskOfferingUuids().isEmpty()) { SimpleQuery<DiskOfferingVO> dquery = dbf.createQuery(DiskOfferingVO.class); dquery.add(DiskOfferingVO_.uuid, SimpleQuery.Op.IN, msg.getDataDiskOfferingUuids()); List<DiskOfferingVO> vos = dquery.list(); // allow create multiple data volume from the same disk offering List<DiskOfferingInventory> disks = new ArrayList<DiskOfferingInventory>(); for (final String duuid : msg.getDataDiskOfferingUuids()) { DiskOfferingVO dvo = CollectionUtils.find( vos, new Function<DiskOfferingVO, DiskOfferingVO>() { @Override public DiskOfferingVO call(DiskOfferingVO arg) { if (duuid.equals(arg.getUuid())) { return arg; } return null; } }); disks.add(DiskOfferingInventory.valueOf(dvo)); } spec.setDataDiskOfferings(disks); } else { spec.setDataDiskOfferings(new ArrayList<DiskOfferingInventory>(0)); } ImageVO imvo = dbf.findByUuid(spec.getVmInventory().getImageUuid(), ImageVO.class); spec.getImageSpec().setInventory(ImageInventory.valueOf(imvo)); spec.putExtensionData(ApplianceVmConstant.Params.applianceVmSpec.toString(), aspec); spec.setCurrentVmOperation(VmInstanceConstant.VmOperation.NewCreate); spec.putExtensionData( ApplianceVmConstant.Params.applianceVmSubType.toString(), getSelf().getApplianceVmType()); changeVmStateInDb(VmInstanceStateEvent.starting); extEmitter.beforeStartNewCreatedVm(VmInstanceInventory.valueOf(self)); FlowChain chain = apvmf.getCreateApplianceVmWorkFlowBuilder().build(); chain.setName(String.format("create-appliancevm-%s", msg.getVmInstanceUuid())); chain.getData().put(VmInstanceConstant.Params.VmInstanceSpec.toString(), spec); chain .getData() .put( ApplianceVmConstant.Params.applianceVmFirewallRules.toString(), aspec.getFirewallRules()); addBootstrapFlows( chain, VolumeFormat.getMasterHypervisorTypeByVolumeFormat(imvo.getFormat())); List<Flow> subCreateFlows = getPostCreateFlows(); if (subCreateFlows != null) { for (Flow f : subCreateFlows) { chain.then(f); } } chain.then( new NoRollbackFlow() { String __name__ = "change-appliancevm-status-to-connected"; @Override public void run(FlowTrigger trigger, Map data) { // must reload here, otherwise it will override changes created by previous flows self = dbf.reload(self); getSelf().setStatus(ApplianceVmStatus.Connected); dbf.update(self); trigger.next(); } }); boolean noRollbackOnFailure = ApplianceVmGlobalProperty.NO_ROLLBACK_ON_POST_FAILURE; chain.noRollback(noRollbackOnFailure); chain .done( new FlowDoneHandler(msg, taskChain) { @Override public void handle(Map data) { VmInstanceSpec spec = (VmInstanceSpec) data.get(VmInstanceConstant.Params.VmInstanceSpec.toString()); self = dbf.reload(self); self.setLastHostUuid(spec.getDestHost().getUuid()); self.setHostUuid(spec.getDestHost().getUuid()); self.setClusterUuid(spec.getDestHost().getClusterUuid()); self.setZoneUuid(spec.getDestHost().getZoneUuid()); self.setHypervisorType(spec.getDestHost().getHypervisorType()); self.setRootVolumeUuid(spec.getDestRootVolume().getUuid()); changeVmStateInDb(VmInstanceStateEvent.running); logger.debug( String.format( "appliance vm[uuid:%s, name: %s, type:%s] is running ..", self.getUuid(), self.getName(), getSelf().getApplianceVmType())); VmInstanceInventory inv = VmInstanceInventory.valueOf(self); extEmitter.afterStartNewCreatedVm(inv); StartNewCreatedVmInstanceReply reply = new StartNewCreatedVmInstanceReply(); reply.setVmInventory(inv); bus.reply(msg, reply); taskChain.next(); } }) .error( new FlowErrorHandler(msg, taskChain) { @Override public void handle(ErrorCode errCode, Map data) { extEmitter.failedToStartNewCreatedVm(VmInstanceInventory.valueOf(self), errCode); dbf.remove(self); StartNewCreatedVmInstanceReply reply = new StartNewCreatedVmInstanceReply(); reply.setError(errCode); reply.setSuccess(false); bus.reply(msg, reply); taskChain.next(); } }) .start(); callNext = false; } finally { if (callNext) { taskChain.next(); } } }