Esempio n. 1
0
 private List<Cluster> doPrivilegedLookup(String partitionName, String vmTypeName)
     throws NotEnoughResourcesException {
   if (Partition.DEFAULT_NAME.equals(partitionName)) {
     Iterable<Cluster> authorizedClusters =
         Iterables.filter(
             Clusters.getInstance().listValues(),
             RestrictedTypes.filterPrivilegedWithoutOwner());
     Multimap<VmTypeAvailability, Cluster> sorted = TreeMultimap.create();
     for (Cluster c : authorizedClusters) {
       sorted.put(c.getNodeState().getAvailability(vmTypeName), c);
     }
     if (sorted.isEmpty()) {
       throw new NotEnoughResourcesException(
           "Not enough resources: no availability zone is available in which you have permissions to run instances.");
     } else {
       return Lists.newArrayList(sorted.values());
     }
   } else {
     ServiceConfiguration ccConfig =
         Topology.lookup(ClusterController.class, Partitions.lookupByName(partitionName));
     Cluster cluster = Clusters.lookup(ccConfig);
     if (cluster == null) {
       throw new NotEnoughResourcesException("Can't find cluster " + partitionName);
     }
     if (!RestrictedTypes.filterPrivilegedWithoutOwner().apply(cluster)) {
       throw new NotEnoughResourcesException("Not authorized to use cluster " + partitionName);
     }
     return Lists.newArrayList(cluster);
   }
 }
Esempio n. 2
0
    private List<ResourceToken> requestResourceToken(
        final Allocation allocInfo, final int tryAmount, final int maxAmount) throws Exception {
      ServiceConfiguration config =
          Topology.lookup(ClusterController.class, allocInfo.getPartition());
      Cluster cluster = Clusters.lookup(config);
      /**
       * TODO:GRZE: this is the call path which needs to trigger gating.
       * It shouldn't be handled directly here, but instead be handled in {@link ResourceState#requestResourceAllocation().
       *
       */
      if (cluster.getGateLock().readLock().tryLock(60, TimeUnit.SECONDS)) {
        try {
          final ResourceState state = cluster.getNodeState();
          /**
           * NOTE: If the defined instance type has an ordering conflict w/ some other type then it
           * isn't safe to service TWO requests which use differing types during the same resource
           * refresh duty cycle. This determines whether or not an asynchronous allocation is safe
           * to do for the request instance type or whether a synchronous resource availability
           * refresh is needed.
           */
          boolean unorderedType = VmTypes.isUnorderedType(allocInfo.getVmType());
          boolean forceResourceRefresh = state.hasUnorderedTokens() || unorderedType;
          /**
           * GRZE: if the vm type is not "nicely" ordered then we force a refresh of the actual
           * cluster state. Note: we already hold the cluster gating lock here so this update will
           * be mutual exclusive wrt both resource allocations and cluster state updates.
           */
          if (forceResourceRefresh) {
            cluster.refreshResources();
          }
          final List<ResourceToken> tokens =
              state.requestResourceAllocation(allocInfo, tryAmount, maxAmount);
          final Iterator<ResourceToken> tokenIterator = tokens.iterator();
          try {
            final Supplier<ResourceToken> allocator =
                new Supplier<ResourceToken>() {
                  @Override
                  public ResourceToken get() {
                    final ResourceToken ret = tokenIterator.next();
                    allocInfo.getAllocationTokens().add(ret);
                    return ret;
                  }
                };

            RestrictedTypes.allocateUnitlessResources(tokens.size(), allocator);
          } finally {
            // release any tokens that were not allocated
            Iterators.all(
                tokenIterator,
                new Predicate<ResourceToken>() {
                  @Override
                  public boolean apply(final ResourceToken resourceToken) {
                    state.releaseToken(resourceToken);
                    return true;
                  }
                });
          }
          return allocInfo.getAllocationTokens();
        } finally {
          cluster.getGateLock().readLock().unlock();
        }
      } else {
        throw new ServiceStateException(
            "Failed to allocate resources in the zone "
                + cluster.getPartition()
                + ", it is currently locked for maintenance.");
      }
    }
Esempio n. 3
0
  public DetachVolumeResponseType detach(DetachVolumeType request) throws EucalyptusCloudException {
    DetachVolumeResponseType reply = (DetachVolumeResponseType) request.getReply();
    Context ctx = Contexts.lookup();

    Volume vol;
    try {
      vol = Volumes.lookup(ctx.getUserFullName().asAccountFullName(), request.getVolumeId());
    } catch (Exception ex1) {
      throw new EucalyptusCloudException("Volume does not exist: " + request.getVolumeId());
    }
    if (!RestrictedTypes.filterPrivileged().apply(vol)) {
      throw new EucalyptusCloudException(
          "Not authorized to detach volume "
              + request.getVolumeId()
              + " by "
              + ctx.getUser().getName());
    }

    VmInstance vm = null;
    AttachedVolume volume = null;
    try {
      VmVolumeAttachment vmVolAttach = VmInstances.lookupVolumeAttachment(request.getVolumeId());
      volume = VmVolumeAttachment.asAttachedVolume(vmVolAttach.getVmInstance()).apply(vmVolAttach);
      vm = vmVolAttach.getVmInstance();
    } catch (NoSuchElementException ex) {
      /** no such attachment * */
    }
    if (volume == null) {
      throw new EucalyptusCloudException("Volume is not attached: " + request.getVolumeId());
    }
    if (!RestrictedTypes.filterPrivileged().apply(vm)) {
      throw new EucalyptusCloudException(
          "Not authorized to detach volume from instance "
              + request.getInstanceId()
              + " by "
              + ctx.getUser().getName());
    }
    if (!vm.getInstanceId().equals(request.getInstanceId())
        && request.getInstanceId() != null
        && !request.getInstanceId().equals("")) {
      throw new EucalyptusCloudException(
          "Volume is not attached to instance: " + request.getInstanceId());
    }
    if (request.getDevice() != null
        && !request.getDevice().equals("")
        && !volume.getDevice().equals(request.getDevice())) {
      throw new EucalyptusCloudException(
          "Volume is not attached to device: " + request.getDevice());
    }

    Cluster cluster = null;
    ServiceConfiguration ccConfig = null;
    try {
      ccConfig = Topology.lookup(ClusterController.class, vm.lookupPartition());
      cluster = Clusters.lookup(ccConfig);
    } catch (NoSuchElementException e) {
      LOG.debug(e, e);
      throw new EucalyptusCloudException(
          "Cluster does not exist in partition: " + vm.getPartition());
    }
    ServiceConfiguration scVm;
    try {
      scVm = Topology.lookup(Storage.class, vm.lookupPartition());
    } catch (Exception ex) {
      LOG.error(ex, ex);
      throw new EucalyptusCloudException(
          "Failed to lookup SC for partition: " + vm.getPartition(), ex);
    }
    request.setVolumeId(volume.getVolumeId());
    request.setRemoteDevice(volume.getRemoteDevice());
    request.setDevice(volume.getDevice().replaceAll("unknown,requested:", ""));
    request.setInstanceId(vm.getInstanceId());
    VolumeDetachCallback ncDetach = new VolumeDetachCallback(request);
    try {
      AsyncRequests.sendSync(scVm, new DetachStorageVolumeType(volume.getVolumeId()));
    } catch (Exception e) {
      LOG.debug(e);
      Logs.extreme().debug(e, e);
      // GRZE: attach is idempotent, failure here is ok.      throw new EucalyptusCloudException(
      // e.getMessage( ) );
    }
    AsyncRequests.newRequest(ncDetach).dispatch(cluster.getConfiguration());
    EventRecord.here(VolumeManager.class, EventClass.VOLUME, EventType.VOLUME_DETACH)
        .withDetails(vm.getOwner().toString(), volume.getVolumeId(), "instance", vm.getInstanceId())
        .withDetails("cluster", ccConfig.getFullName().toString())
        .info();
    volume.setStatus("detaching");
    reply.setDetachedVolume(volume);
    return reply;
  }