@Override public void allocate(Allocation allocInfo) throws Exception { try { final VmInstanceLifecycleHelper helper = VmInstanceLifecycleHelpers.get(); final PrepareNetworkResourcesType request = new PrepareNetworkResourcesType(); request.setAvailabilityZone(allocInfo.getPartition().getName()); request.setVpc( allocInfo.getSubnet() == null ? null : CloudMetadatas.toDisplayName().apply(allocInfo.getSubnet().getVpc())); request.setSubnet(CloudMetadatas.toDisplayName().apply(allocInfo.getSubnet())); request.setFeatures(Lists.<NetworkFeature>newArrayList(new DnsHostNamesFeature())); helper.prepareNetworkAllocation(allocInfo, request); final PrepareNetworkResourcesResultType result = Networking.getInstance().prepare(request); for (final ResourceToken token : allocInfo.getAllocationTokens()) { for (final NetworkResource networkResource : result.getResources()) { if (token.getInstanceId().equals(networkResource.getOwnerId())) { token .getAttribute(NetworkResourceVmInstanceLifecycleHelper.NetworkResourcesKey) .add(networkResource); } } } } catch (Exception e) { throw Objects.firstNonNull(Exceptions.findCause(e, NotEnoughResourcesException.class), e); } }
@Override public void allocate(Allocation allocInfo) throws Exception { Partition reqPartition = allocInfo.getPartition(); String zoneName = reqPartition.getName(); String vmTypeName = allocInfo.getVmType().getName(); /* Validate min and max amount */ final int minAmount = allocInfo.getMinCount(); final int maxAmount = allocInfo.getMaxCount(); if (minAmount > maxAmount) throw new RuntimeException( "Maximum instance count must not be smaller than minimum instance count"); /* Retrieve our context and list of clusters associated with this zone */ List<Cluster> authorizedClusters = this.doPrivilegedLookup(zoneName, vmTypeName); int remaining = maxAmount; int allocated = 0; int available; LOG.info( "Found authorized clusters: " + Iterables.transform(authorizedClusters, HasName.GET_NAME)); /* Do we have any VM available throughout our clusters? */ if ((available = checkAvailability(vmTypeName, authorizedClusters)) < minAmount) { throw new NotEnoughResourcesException( "Not enough resources (" + available + " in " + zoneName + " < " + minAmount + "): vm instances."); } else { for (Cluster cluster : authorizedClusters) { if (remaining <= 0) { break; } else { ResourceState state = cluster.getNodeState(); Partition partition = cluster.getConfiguration().lookupPartition(); /* Has a partition been set if the AZ was not specified? */ if (allocInfo.getPartition().equals(Partition.DEFAULT)) { /* * Ok, do we have enough slots in this partition to support our request? We should have at least * the minimum. The list is sorted in order of resource availability from the cluster with the most * available to the cluster with the least amount available. This is why we don't check against the * maxAmount value since its a best effort at this point. If we select the partition here and we * can't fit maxAmount, based on the sorting order, the next partition will not fit maxAmount anyway. */ int zoneAvailable = checkZoneAvailability(vmTypeName, partition, authorizedClusters); if (zoneAvailable < minAmount) continue; /* Lets use this partition */ allocInfo.setPartition(partition); } else if (!allocInfo.getPartition().equals(partition)) { /* We should only pick clusters that are part of the selected AZ */ continue; } if (allocInfo.getBootSet().getMachine() instanceof BlockStorageImageInfo) { try { Topology.lookup(Storage.class, partition); } catch (Exception ex) { allocInfo.abort(); allocInfo.setPartition(reqPartition); throw new NotEnoughResourcesException( "Not enough resources: Cannot run EBS instances in partition w/o a storage controller: " + ex.getMessage(), ex); } } try { int tryAmount = (remaining > state.getAvailability(vmTypeName).getAvailable()) ? state.getAvailability(vmTypeName).getAvailable() : remaining; List<ResourceToken> tokens = this.requestResourceToken(allocInfo, tryAmount, maxAmount); remaining -= tokens.size(); allocated += tokens.size(); } catch (Exception t) { LOG.error(t); Logs.extreme().error(t, t); allocInfo.abort(); allocInfo.setPartition(reqPartition); /* if we still have some allocation remaining AND no more resources are available */ if (((available = checkZoneAvailability(vmTypeName, partition, authorizedClusters)) < remaining) && (remaining > 0)) { throw new NotEnoughResourcesException( "Not enough resources (" + available + " in " + zoneName + " < " + minAmount + "): vm instances.", t); } else { throw new NotEnoughResourcesException(t.getMessage(), t); } } } } /* Were we able to meet our minimum requirements? */ if ((allocated < minAmount) && (remaining > 0)) { allocInfo.abort(); allocInfo.setPartition(reqPartition); if (reqPartition.equals(Partition.DEFAULT)) { throw new NotEnoughResourcesException( "Not enough resources available in all zone for " + minAmount + "): vm instances."); } else { available = checkZoneAvailability(vmTypeName, reqPartition, authorizedClusters); throw new NotEnoughResourcesException( "Not enough resources (" + available + " in " + zoneName + " < " + minAmount + "): vm instances."); } } } }
private List<ResourceToken> requestResourceToken( final Allocation allocInfo, final int tryAmount, final int maxAmount) throws Exception { ServiceConfiguration config = Topology.lookup(ClusterController.class, allocInfo.getPartition()); Cluster cluster = Clusters.lookup(config); /** * TODO:GRZE: this is the call path which needs to trigger gating. * It shouldn't be handled directly here, but instead be handled in {@link ResourceState#requestResourceAllocation(). * */ if (cluster.getGateLock().readLock().tryLock(60, TimeUnit.SECONDS)) { try { final ResourceState state = cluster.getNodeState(); /** * NOTE: If the defined instance type has an ordering conflict w/ some other type then it * isn't safe to service TWO requests which use differing types during the same resource * refresh duty cycle. This determines whether or not an asynchronous allocation is safe * to do for the request instance type or whether a synchronous resource availability * refresh is needed. */ boolean unorderedType = VmTypes.isUnorderedType(allocInfo.getVmType()); boolean forceResourceRefresh = state.hasUnorderedTokens() || unorderedType; /** * GRZE: if the vm type is not "nicely" ordered then we force a refresh of the actual * cluster state. Note: we already hold the cluster gating lock here so this update will * be mutual exclusive wrt both resource allocations and cluster state updates. */ if (forceResourceRefresh) { cluster.refreshResources(); } final List<ResourceToken> tokens = state.requestResourceAllocation(allocInfo, tryAmount, maxAmount); final Iterator<ResourceToken> tokenIterator = tokens.iterator(); try { final Supplier<ResourceToken> allocator = new Supplier<ResourceToken>() { @Override public ResourceToken get() { final ResourceToken ret = tokenIterator.next(); allocInfo.getAllocationTokens().add(ret); return ret; } }; RestrictedTypes.allocateUnitlessResources(tokens.size(), allocator); } finally { // release any tokens that were not allocated Iterators.all( tokenIterator, new Predicate<ResourceToken>() { @Override public boolean apply(final ResourceToken resourceToken) { state.releaseToken(resourceToken); return true; } }); } return allocInfo.getAllocationTokens(); } finally { cluster.getGateLock().readLock().unlock(); } } else { throw new ServiceStateException( "Failed to allocate resources in the zone " + cluster.getPartition() + ", it is currently locked for maintenance."); } }