private List<Cluster> doPrivilegedLookup(String partitionName, String vmTypeName)
     throws NotEnoughResourcesException {
   if (Partition.DEFAULT_NAME.equals(partitionName)) {
     Iterable<Cluster> authorizedClusters =
         Iterables.filter(
             Clusters.getInstance().listValues(),
             RestrictedTypes.filterPrivilegedWithoutOwner());
     Multimap<VmTypeAvailability, Cluster> sorted = TreeMultimap.create();
     for (Cluster c : authorizedClusters) {
       sorted.put(c.getNodeState().getAvailability(vmTypeName), c);
     }
     if (sorted.isEmpty()) {
       throw new NotEnoughResourcesException(
           "Not enough resources: no availability zone is available in which you have permissions to run instances.");
     } else {
       return Lists.newArrayList(sorted.values());
     }
   } else {
     ServiceConfiguration ccConfig =
         Topology.lookup(ClusterController.class, Partitions.lookupByName(partitionName));
     Cluster cluster = Clusters.lookup(ccConfig);
     if (cluster == null) {
       throw new NotEnoughResourcesException("Can't find cluster " + partitionName);
     }
     if (!RestrictedTypes.filterPrivilegedWithoutOwner().apply(cluster)) {
       throw new NotEnoughResourcesException("Not authorized to use cluster " + partitionName);
     }
     return Lists.newArrayList(cluster);
   }
 }
 private int checkAvailability(String vmTypeName, List<Cluster> authorizedClusters)
     throws NotEnoughResourcesException {
   int available = 0;
   for (Cluster authorizedCluster : authorizedClusters) {
     VmTypeAvailability vmAvailability =
         authorizedCluster.getNodeState().getAvailability(vmTypeName);
     available += vmAvailability.getAvailable();
     LOG.info(
         "Availability: "
             + authorizedCluster.getName()
             + " -> "
             + vmAvailability.getAvailable());
   }
   return available;
 }
 public static void handleOrphan(Cluster cluster, ClusterAddressInfo address) {
   Integer orphanCount = 1;
   orphanCount = orphans.putIfAbsent(address, orphanCount);
   orphanCount = (orphanCount == null) ? 1 : orphanCount;
   orphans.put(address, orphanCount + 1);
   EventRecord.caller(
           ClusterState.class,
           EventType.ADDRESS_STATE,
           "Updated orphaned public ip address: "
               + LogUtil.dumpObject(address)
               + " count="
               + orphanCount)
       .debug();
   if (orphanCount > AddressingConfiguration.getInstance().getMaxKillOrphans()) {
     EventRecord.caller(
             ClusterState.class,
             EventType.ADDRESS_STATE,
             "Unassigning orphaned public ip address: "
                 + LogUtil.dumpObject(address)
                 + " count="
                 + orphanCount)
         .warn();
     try {
       final Address addr = Addresses.getInstance().lookup(address.getAddress());
       if (addr.isPending()) {
         try {
           addr.clearPending();
         } catch (Exception ex) {
         }
       }
       try {
         if (addr.isAssigned() && "0.0.0.0".equals(address.getInstanceIp())) {
           addr.unassign().clearPending();
           if (addr.isSystemOwned()) {
             addr.release();
           }
         } else if (addr.isAssigned() && !"0.0.0.0".equals(address.getInstanceIp())) {
           AsyncRequests.newRequest(new UnassignAddressCallback(address))
               .sendSync(cluster.getConfiguration());
           if (addr.isSystemOwned()) {
             addr.release();
           }
         } else if (!addr.isAssigned() && addr.isAllocated() && addr.isSystemOwned()) {
           addr.release();
         }
       } catch (ExecutionException ex) {
         if (!addr.isAssigned() && addr.isAllocated() && addr.isSystemOwned()) {
           addr.release();
         }
       }
     } catch (InterruptedException ex) {
       Exceptions.maybeInterrupted(ex);
     } catch (NoSuchElementException ex) {
     } finally {
       orphans.remove(address);
     }
   }
 }
    private int checkZoneAvailability(
        String vmTypeName, Partition partition, List<Cluster> authorizedClusters)
        throws NotEnoughResourcesException {
      int available = 0;
      for (Cluster authorizedCluster : authorizedClusters) {
        if (!authorizedCluster.getConfiguration().lookupPartition().equals(partition)) continue;

        VmTypeAvailability vmAvailability =
            authorizedCluster.getNodeState().getAvailability(vmTypeName);
        available += vmAvailability.getAvailable();
        LOG.info(
            "Availability: "
                + authorizedCluster.getName()
                + " -> "
                + vmAvailability.getAvailable());
      }
      return available;
    }
    @Override
    public void allocate(Allocation allocInfo) throws Exception {
      Partition reqPartition = allocInfo.getPartition();
      String zoneName = reqPartition.getName();
      String vmTypeName = allocInfo.getVmType().getName();

      /* Validate min and max amount */
      final int minAmount = allocInfo.getMinCount();
      final int maxAmount = allocInfo.getMaxCount();
      if (minAmount > maxAmount)
        throw new RuntimeException(
            "Maximum instance count must not be smaller than minimum instance count");

      /* Retrieve our context and list of clusters associated with this zone */
      List<Cluster> authorizedClusters = this.doPrivilegedLookup(zoneName, vmTypeName);

      int remaining = maxAmount;
      int allocated = 0;
      int available;

      LOG.info(
          "Found authorized clusters: "
              + Iterables.transform(authorizedClusters, HasName.GET_NAME));

      /* Do we have any VM available throughout our clusters? */
      if ((available = checkAvailability(vmTypeName, authorizedClusters)) < minAmount) {
        throw new NotEnoughResourcesException(
            "Not enough resources ("
                + available
                + " in "
                + zoneName
                + " < "
                + minAmount
                + "): vm instances.");
      } else {
        for (Cluster cluster : authorizedClusters) {
          if (remaining <= 0) {
            break;
          } else {
            ResourceState state = cluster.getNodeState();
            Partition partition = cluster.getConfiguration().lookupPartition();

            /* Has a partition been set if the AZ was not specified? */
            if (allocInfo.getPartition().equals(Partition.DEFAULT)) {
              /*
               * Ok, do we have enough slots in this partition to support our request? We should have at least
               * the minimum. The list is sorted in order of resource availability from the cluster with the most
               * available to the cluster with the least amount available. This is why we don't check against the
               * maxAmount value since its a best effort at this point. If we select the partition here and we
               * can't fit maxAmount, based on the sorting order, the next partition will not fit maxAmount anyway.
               */
              int zoneAvailable = checkZoneAvailability(vmTypeName, partition, authorizedClusters);
              if (zoneAvailable < minAmount) continue;

              /* Lets use this partition */
              allocInfo.setPartition(partition);
            } else if (!allocInfo.getPartition().equals(partition)) {
              /* We should only pick clusters that are part of the selected AZ */
              continue;
            }

            if (allocInfo.getBootSet().getMachine() instanceof BlockStorageImageInfo) {
              try {
                Topology.lookup(Storage.class, partition);
              } catch (Exception ex) {
                allocInfo.abort();
                allocInfo.setPartition(reqPartition);
                throw new NotEnoughResourcesException(
                    "Not enough resources: Cannot run EBS instances in partition w/o a storage controller: "
                        + ex.getMessage(),
                    ex);
              }
            }

            try {
              int tryAmount =
                  (remaining > state.getAvailability(vmTypeName).getAvailable())
                      ? state.getAvailability(vmTypeName).getAvailable()
                      : remaining;

              List<ResourceToken> tokens =
                  this.requestResourceToken(allocInfo, tryAmount, maxAmount);
              remaining -= tokens.size();
              allocated += tokens.size();
            } catch (Exception t) {
              LOG.error(t);
              Logs.extreme().error(t, t);

              allocInfo.abort();
              allocInfo.setPartition(reqPartition);

              /* if we still have some allocation remaining AND no more resources are available */
              if (((available = checkZoneAvailability(vmTypeName, partition, authorizedClusters))
                      < remaining)
                  && (remaining > 0)) {
                throw new NotEnoughResourcesException(
                    "Not enough resources ("
                        + available
                        + " in "
                        + zoneName
                        + " < "
                        + minAmount
                        + "): vm instances.",
                    t);
              } else {
                throw new NotEnoughResourcesException(t.getMessage(), t);
              }
            }
          }
        }

        /* Were we able to meet our minimum requirements? */
        if ((allocated < minAmount) && (remaining > 0)) {
          allocInfo.abort();
          allocInfo.setPartition(reqPartition);

          if (reqPartition.equals(Partition.DEFAULT)) {
            throw new NotEnoughResourcesException(
                "Not enough resources available in all zone for " + minAmount + "): vm instances.");
          } else {
            available = checkZoneAvailability(vmTypeName, reqPartition, authorizedClusters);
            throw new NotEnoughResourcesException(
                "Not enough resources ("
                    + available
                    + " in "
                    + zoneName
                    + " < "
                    + minAmount
                    + "): vm instances.");
          }
        }
      }
    }
    private List<ResourceToken> requestResourceToken(
        final Allocation allocInfo, final int tryAmount, final int maxAmount) throws Exception {
      ServiceConfiguration config =
          Topology.lookup(ClusterController.class, allocInfo.getPartition());
      Cluster cluster = Clusters.lookup(config);
      /**
       * TODO:GRZE: this is the call path which needs to trigger gating.
       * It shouldn't be handled directly here, but instead be handled in {@link ResourceState#requestResourceAllocation().
       *
       */
      if (cluster.getGateLock().readLock().tryLock(60, TimeUnit.SECONDS)) {
        try {
          final ResourceState state = cluster.getNodeState();
          /**
           * NOTE: If the defined instance type has an ordering conflict w/ some other type then it
           * isn't safe to service TWO requests which use differing types during the same resource
           * refresh duty cycle. This determines whether or not an asynchronous allocation is safe
           * to do for the request instance type or whether a synchronous resource availability
           * refresh is needed.
           */
          boolean unorderedType = VmTypes.isUnorderedType(allocInfo.getVmType());
          boolean forceResourceRefresh = state.hasUnorderedTokens() || unorderedType;
          /**
           * GRZE: if the vm type is not "nicely" ordered then we force a refresh of the actual
           * cluster state. Note: we already hold the cluster gating lock here so this update will
           * be mutual exclusive wrt both resource allocations and cluster state updates.
           */
          if (forceResourceRefresh) {
            cluster.refreshResources();
          }
          final List<ResourceToken> tokens =
              state.requestResourceAllocation(allocInfo, tryAmount, maxAmount);
          final Iterator<ResourceToken> tokenIterator = tokens.iterator();
          try {
            final Supplier<ResourceToken> allocator =
                new Supplier<ResourceToken>() {
                  @Override
                  public ResourceToken get() {
                    final ResourceToken ret = tokenIterator.next();
                    allocInfo.getAllocationTokens().add(ret);
                    return ret;
                  }
                };

            RestrictedTypes.allocateUnitlessResources(tokens.size(), allocator);
          } finally {
            // release any tokens that were not allocated
            Iterators.all(
                tokenIterator,
                new Predicate<ResourceToken>() {
                  @Override
                  public boolean apply(final ResourceToken resourceToken) {
                    state.releaseToken(resourceToken);
                    return true;
                  }
                });
          }
          return allocInfo.getAllocationTokens();
        } finally {
          cluster.getGateLock().readLock().unlock();
        }
      } else {
        throw new ServiceStateException(
            "Failed to allocate resources in the zone "
                + cluster.getPartition()
                + ", it is currently locked for maintenance.");
      }
    }
 protected static Address lookupOrCreate(
     final Cluster cluster, final ClusterAddressInfo addrInfo) {
   Address addr = null;
   VmInstance vm = null;
   try {
     addr = Addresses.getInstance().lookupDisabled(addrInfo.getAddress());
     LOG.trace("Found address in the inactive set cache: " + addr);
   } catch (final NoSuchElementException e1) {
     try {
       addr = Addresses.getInstance().lookup(addrInfo.getAddress());
       LOG.trace("Found address in the active set cache: " + addr);
     } catch (final NoSuchElementException e) {
     }
   }
   if (addrInfo.hasMapping()) {
     vm =
         Helper.maybeFindVm(
             addr != null ? addr.getInstanceId() : null,
             addrInfo.getAddress(),
             addrInfo.getInstanceIp());
     if ((addr != null) && (vm != null)) {
       Helper.ensureAllocated(addr, vm);
       clearOrphan(addrInfo);
     } else if (addr != null && !addr.isPending() && vm != null && VmStateSet.DONE.apply(vm)) {
       handleOrphan(cluster, addrInfo);
     } else if ((addr != null && addr.isAssigned() && !addr.isPending()) && (vm == null)) {
       handleOrphan(cluster, addrInfo);
     } else if ((addr == null) && (vm != null)) {
       addr =
           new Address(
               Principals.systemFullName(),
               addrInfo.getAddress(),
               vm.getInstanceUuid(),
               vm.getInstanceId(),
               vm.getPrivateAddress());
       clearOrphan(addrInfo);
     } else if ((addr == null) && (vm == null)) {
       addr = new Address(addrInfo.getAddress(), cluster.getPartition());
       handleOrphan(cluster, addrInfo);
     }
   } else {
     if ((addr != null) && addr.isAssigned() && !addr.isPending()) {
       handleOrphan(cluster, addrInfo);
     } else if ((addr != null)
         && !addr.isAssigned()
         && !addr.isPending()
         && addr.isSystemOwned()) {
       try {
         addr.release();
       } catch (final Exception ex) {
         LOG.error(ex);
       }
     } else if ((addr != null) && Address.Transition.system.equals(addr.getTransition())) {
       handleOrphan(cluster, addrInfo);
     } else if (addr == null) {
       addr = new Address(addrInfo.getAddress(), cluster.getPartition());
       Helper.clearVmState(addrInfo);
     }
   }
   return addr;
 }
Example #8
0
  public DetachVolumeResponseType detach(DetachVolumeType request) throws EucalyptusCloudException {
    DetachVolumeResponseType reply = (DetachVolumeResponseType) request.getReply();
    Context ctx = Contexts.lookup();

    Volume vol;
    try {
      vol = Volumes.lookup(ctx.getUserFullName().asAccountFullName(), request.getVolumeId());
    } catch (Exception ex1) {
      throw new EucalyptusCloudException("Volume does not exist: " + request.getVolumeId());
    }
    if (!RestrictedTypes.filterPrivileged().apply(vol)) {
      throw new EucalyptusCloudException(
          "Not authorized to detach volume "
              + request.getVolumeId()
              + " by "
              + ctx.getUser().getName());
    }

    VmInstance vm = null;
    AttachedVolume volume = null;
    try {
      VmVolumeAttachment vmVolAttach = VmInstances.lookupVolumeAttachment(request.getVolumeId());
      volume = VmVolumeAttachment.asAttachedVolume(vmVolAttach.getVmInstance()).apply(vmVolAttach);
      vm = vmVolAttach.getVmInstance();
    } catch (NoSuchElementException ex) {
      /** no such attachment * */
    }
    if (volume == null) {
      throw new EucalyptusCloudException("Volume is not attached: " + request.getVolumeId());
    }
    if (!RestrictedTypes.filterPrivileged().apply(vm)) {
      throw new EucalyptusCloudException(
          "Not authorized to detach volume from instance "
              + request.getInstanceId()
              + " by "
              + ctx.getUser().getName());
    }
    if (!vm.getInstanceId().equals(request.getInstanceId())
        && request.getInstanceId() != null
        && !request.getInstanceId().equals("")) {
      throw new EucalyptusCloudException(
          "Volume is not attached to instance: " + request.getInstanceId());
    }
    if (request.getDevice() != null
        && !request.getDevice().equals("")
        && !volume.getDevice().equals(request.getDevice())) {
      throw new EucalyptusCloudException(
          "Volume is not attached to device: " + request.getDevice());
    }

    Cluster cluster = null;
    ServiceConfiguration ccConfig = null;
    try {
      ccConfig = Topology.lookup(ClusterController.class, vm.lookupPartition());
      cluster = Clusters.lookup(ccConfig);
    } catch (NoSuchElementException e) {
      LOG.debug(e, e);
      throw new EucalyptusCloudException(
          "Cluster does not exist in partition: " + vm.getPartition());
    }
    ServiceConfiguration scVm;
    try {
      scVm = Topology.lookup(Storage.class, vm.lookupPartition());
    } catch (Exception ex) {
      LOG.error(ex, ex);
      throw new EucalyptusCloudException(
          "Failed to lookup SC for partition: " + vm.getPartition(), ex);
    }
    request.setVolumeId(volume.getVolumeId());
    request.setRemoteDevice(volume.getRemoteDevice());
    request.setDevice(volume.getDevice().replaceAll("unknown,requested:", ""));
    request.setInstanceId(vm.getInstanceId());
    VolumeDetachCallback ncDetach = new VolumeDetachCallback(request);
    try {
      AsyncRequests.sendSync(scVm, new DetachStorageVolumeType(volume.getVolumeId()));
    } catch (Exception e) {
      LOG.debug(e);
      Logs.extreme().debug(e, e);
      // GRZE: attach is idempotent, failure here is ok.      throw new EucalyptusCloudException(
      // e.getMessage( ) );
    }
    AsyncRequests.newRequest(ncDetach).dispatch(cluster.getConfiguration());
    EventRecord.here(VolumeManager.class, EventClass.VOLUME, EventType.VOLUME_DETACH)
        .withDetails(vm.getOwner().toString(), volume.getVolumeId(), "instance", vm.getInstanceId())
        .withDetails("cluster", ccConfig.getFullName().toString())
        .info();
    volume.setStatus("detaching");
    reply.setDetachedVolume(volume);
    return reply;
  }
  @Override
  public void fireEvent(final ClockTick event) {
    if (Bootstrap.isFinished() && Hosts.isCoordinator()) {

      final List<ResourceAvailabilityEvent> resourceAvailabilityEvents = Lists.newArrayList();
      final Map<ResourceAvailabilityEvent.ResourceType, AvailabilityAccumulator> availabilities =
          Maps.newEnumMap(ResourceAvailabilityEvent.ResourceType.class);
      final Iterable<VmType> vmTypes = Lists.newArrayList(VmTypes.list());
      for (final Cluster cluster : Clusters.getInstance().listValues()) {
        availabilities.put(Core, new AvailabilityAccumulator(VmType.SizeProperties.Cpu));
        availabilities.put(Disk, new AvailabilityAccumulator(VmType.SizeProperties.Disk));
        availabilities.put(Memory, new AvailabilityAccumulator(VmType.SizeProperties.Memory));

        for (final VmType vmType : vmTypes) {
          final ResourceState.VmTypeAvailability va =
              cluster.getNodeState().getAvailability(vmType.getName());

          resourceAvailabilityEvents.add(
              new ResourceAvailabilityEvent(
                  Instance,
                  new ResourceAvailabilityEvent.Availability(
                      va.getMax(),
                      va.getAvailable(),
                      Lists.<ResourceAvailabilityEvent.Tag>newArrayList(
                          new ResourceAvailabilityEvent.Dimension(
                              "availabilityZone", cluster.getPartition()),
                          new ResourceAvailabilityEvent.Dimension("cluster", cluster.getName()),
                          new ResourceAvailabilityEvent.Type("vm-type", vmType.getName())))));

          for (final AvailabilityAccumulator availability : availabilities.values()) {
            availability.total =
                Math.max(
                    availability.total, va.getMax() * availability.valueExtractor.apply(vmType));
            availability.available =
                Math.max(
                    availability.available,
                    va.getAvailable() * availability.valueExtractor.apply(vmType));
          }
        }

        for (final AvailabilityAccumulator availability : availabilities.values()) {
          availability.rollUp(
              Lists.<ResourceAvailabilityEvent.Tag>newArrayList(
                  new ResourceAvailabilityEvent.Dimension(
                      "availabilityZone", cluster.getPartition()),
                  new ResourceAvailabilityEvent.Dimension("cluster", cluster.getName())));
        }
      }

      for (final Map.Entry<ResourceAvailabilityEvent.ResourceType, AvailabilityAccumulator> entry :
          availabilities.entrySet()) {
        resourceAvailabilityEvents.add(
            new ResourceAvailabilityEvent(entry.getKey(), entry.getValue().availabilities));
      }

      for (final ResourceAvailabilityEvent resourceAvailabilityEvent : resourceAvailabilityEvents)
        try {
          ListenerRegistry.getInstance().fireEvent(resourceAvailabilityEvent);
        } catch (Exception ex) {
          logger.error(ex, ex);
        }
    }
  }