Пример #1
0
 private List<Cluster> doPrivilegedLookup(String partitionName, String vmTypeName)
     throws NotEnoughResourcesException {
   if (Partition.DEFAULT_NAME.equals(partitionName)) {
     Iterable<Cluster> authorizedClusters =
         Iterables.filter(
             Clusters.getInstance().listValues(),
             RestrictedTypes.filterPrivilegedWithoutOwner());
     Multimap<VmTypeAvailability, Cluster> sorted = TreeMultimap.create();
     for (Cluster c : authorizedClusters) {
       sorted.put(c.getNodeState().getAvailability(vmTypeName), c);
     }
     if (sorted.isEmpty()) {
       throw new NotEnoughResourcesException(
           "Not enough resources: no availability zone is available in which you have permissions to run instances.");
     } else {
       return Lists.newArrayList(sorted.values());
     }
   } else {
     ServiceConfiguration ccConfig =
         Topology.lookup(ClusterController.class, Partitions.lookupByName(partitionName));
     Cluster cluster = Clusters.lookup(ccConfig);
     if (cluster == null) {
       throw new NotEnoughResourcesException("Can't find cluster " + partitionName);
     }
     if (!RestrictedTypes.filterPrivilegedWithoutOwner().apply(cluster)) {
       throw new NotEnoughResourcesException("Not authorized to use cluster " + partitionName);
     }
     return Lists.newArrayList(cluster);
   }
 }
Пример #2
0
 public static void dispatchAssignAddress(Address address, VmInstance vm) {
   try {
     AssignAddressCallback callback = new AssignAddressCallback(address, vm);
     Clusters.dispatchClusterEvent(address.getCluster(), callback);
   } catch (Throwable e) {
     LOG.debug(e, e);
   }
 }
Пример #3
0
 public static void dispatchUnassignAddress(Address address) {
   if (VmInstance.DEFAULT_IP.equals(address.getInstanceAddress())) {
     return;
   }
   try {
     UnassignAddressCallback callback = new UnassignAddressCallback(address);
     Clusters.dispatchClusterEvent(address.getCluster(), callback);
   } catch (Throwable e) {
     LOG.debug(e, e);
   }
 }
Пример #4
0
 private static void handleOrphan(String cluster, Address address) {
   Integer orphanCount = 1;
   orphanCount = orphans.putIfAbsent(address.getName(), orphanCount);
   orphanCount = (orphanCount == null) ? 1 : orphanCount;
   orphans.put(address.getName(), orphanCount + 1);
   LOG.warn("Found orphaned public ip address: " + address + " count=" + orphanCount);
   if (orphanCount > 10) {
     orphans.remove(address.getName());
     Clusters.dispatchClusterEvent(cluster, new UnassignAddressCallback(address));
   }
 }
Пример #5
0
 private static ConcurrentMap<String, AtomicBoolean> getClusterAddressMap() {
   synchronized (AddressUtil.class) {
     if (clusterInit == null) {
       clusterInit = new ConcurrentHashMap<String, AtomicBoolean>();
       for (String cluster : Clusters.getInstance().listKeys()) {
         clusterInit.put(cluster, new AtomicBoolean(false));
       }
     }
   }
   return clusterInit;
 }
Пример #6
0
 @Override
 public boolean apply(Allocation allocInfo) throws MetadataException {
   RunInstancesType request = allocInfo.getRequest();
   String zoneName = request.getAvailabilityZone();
   if (Clusters.getInstance().listValues().isEmpty()) {
     LOG.debug("enabled values: " + Joiner.on("\n").join(Clusters.getInstance().listValues()));
     LOG.debug("disabled values: " + Joiner.on("\n").join(Clusters.getInstance().listValues()));
     throw new VerificationException(
         "Not enough resources: no cluster controller is currently available to run instances.");
   } else if (Partitions.exists(zoneName)) {
     Partition partition = Partitions.lookupByName(zoneName);
     allocInfo.setPartition(partition);
   } else if (Partition.DEFAULT_NAME.equals(zoneName)) {
     Partition partition = Partition.DEFAULT;
     allocInfo.setPartition(partition);
   } else {
     throw new VerificationException(
         "Not enough resources: no cluster controller is currently available to run instances.");
   }
   return true;
 }
Пример #7
0
  @SuppressWarnings("UnnecessaryQualifiedReference")
  static void broadcastNetworkInfo() {
    try (final SemaphoreResource semaphore = SemaphoreResource.acquire(activeBroadcastSemaphore)) {
      // populate with info directly from configuration
      final Optional<NetworkConfiguration> networkConfiguration =
          NetworkConfigurations.getNetworkConfiguration();
      final List<com.eucalyptus.cluster.Cluster> clusters = Clusters.getInstance().listValues();

      final NetworkInfoSource source = cacheSource();
      final Set<String> dirtyPublicAddresses = PublicAddresses.dirtySnapshot();
      final Set<RouteKey> invalidStateRoutes = Sets.newHashSetWithExpectedSize(50);
      final int sourceFingerprint =
          fingerprint(source, clusters, dirtyPublicAddresses, NetworkGroups.NETWORK_CONFIGURATION);
      final NetworkInfo info =
          NetworkInfoBroadcasts.buildNetworkConfiguration(
              networkConfiguration,
              source,
              Suppliers.ofInstance(clusters),
              new Supplier<String>() {
                @Override
                public String get() {
                  return Topology.lookup(Eucalyptus.class).getInetAddress().getHostAddress();
                }
              },
              new Function<List<String>, List<String>>() {
                @Nullable
                @Override
                public List<String> apply(final List<String> defaultServers) {
                  return NetworkConfigurations.loadSystemNameservers(defaultServers);
                }
              },
              dirtyPublicAddresses,
              invalidStateRoutes);
      info.setVersion(
          BaseEncoding.base16().lowerCase().encode(Ints.toByteArray(sourceFingerprint)));

      if (!invalidStateRoutes.isEmpty()) {
        vpcRouteStateInvalidator.accept(invalidStateRoutes);
      }

      Applicators.apply(clusters, info);

    } catch (ApplicatorException e) {
      logger.error("Error during network broadcast", e);
    }
  }
Пример #8
0
    private List<ResourceToken> requestResourceToken(
        final Allocation allocInfo, final int tryAmount, final int maxAmount) throws Exception {
      ServiceConfiguration config =
          Topology.lookup(ClusterController.class, allocInfo.getPartition());
      Cluster cluster = Clusters.lookup(config);
      /**
       * TODO:GRZE: this is the call path which needs to trigger gating.
       * It shouldn't be handled directly here, but instead be handled in {@link ResourceState#requestResourceAllocation().
       *
       */
      if (cluster.getGateLock().readLock().tryLock(60, TimeUnit.SECONDS)) {
        try {
          final ResourceState state = cluster.getNodeState();
          /**
           * NOTE: If the defined instance type has an ordering conflict w/ some other type then it
           * isn't safe to service TWO requests which use differing types during the same resource
           * refresh duty cycle. This determines whether or not an asynchronous allocation is safe
           * to do for the request instance type or whether a synchronous resource availability
           * refresh is needed.
           */
          boolean unorderedType = VmTypes.isUnorderedType(allocInfo.getVmType());
          boolean forceResourceRefresh = state.hasUnorderedTokens() || unorderedType;
          /**
           * GRZE: if the vm type is not "nicely" ordered then we force a refresh of the actual
           * cluster state. Note: we already hold the cluster gating lock here so this update will
           * be mutual exclusive wrt both resource allocations and cluster state updates.
           */
          if (forceResourceRefresh) {
            cluster.refreshResources();
          }
          final List<ResourceToken> tokens =
              state.requestResourceAllocation(allocInfo, tryAmount, maxAmount);
          final Iterator<ResourceToken> tokenIterator = tokens.iterator();
          try {
            final Supplier<ResourceToken> allocator =
                new Supplier<ResourceToken>() {
                  @Override
                  public ResourceToken get() {
                    final ResourceToken ret = tokenIterator.next();
                    allocInfo.getAllocationTokens().add(ret);
                    return ret;
                  }
                };

            RestrictedTypes.allocateUnitlessResources(tokens.size(), allocator);
          } finally {
            // release any tokens that were not allocated
            Iterators.all(
                tokenIterator,
                new Predicate<ResourceToken>() {
                  @Override
                  public boolean apply(final ResourceToken resourceToken) {
                    state.releaseToken(resourceToken);
                    return true;
                  }
                });
          }
          return allocInfo.getAllocationTokens();
        } finally {
          cluster.getGateLock().readLock().unlock();
        }
      } else {
        throw new ServiceStateException(
            "Failed to allocate resources in the zone "
                + cluster.getPartition()
                + ", it is currently locked for maintenance.");
      }
    }
Пример #9
0
  public DetachVolumeResponseType detach(DetachVolumeType request) throws EucalyptusCloudException {
    DetachVolumeResponseType reply = (DetachVolumeResponseType) request.getReply();
    Context ctx = Contexts.lookup();

    Volume vol;
    try {
      vol = Volumes.lookup(ctx.getUserFullName().asAccountFullName(), request.getVolumeId());
    } catch (Exception ex1) {
      throw new EucalyptusCloudException("Volume does not exist: " + request.getVolumeId());
    }
    if (!RestrictedTypes.filterPrivileged().apply(vol)) {
      throw new EucalyptusCloudException(
          "Not authorized to detach volume "
              + request.getVolumeId()
              + " by "
              + ctx.getUser().getName());
    }

    VmInstance vm = null;
    AttachedVolume volume = null;
    try {
      VmVolumeAttachment vmVolAttach = VmInstances.lookupVolumeAttachment(request.getVolumeId());
      volume = VmVolumeAttachment.asAttachedVolume(vmVolAttach.getVmInstance()).apply(vmVolAttach);
      vm = vmVolAttach.getVmInstance();
    } catch (NoSuchElementException ex) {
      /** no such attachment * */
    }
    if (volume == null) {
      throw new EucalyptusCloudException("Volume is not attached: " + request.getVolumeId());
    }
    if (!RestrictedTypes.filterPrivileged().apply(vm)) {
      throw new EucalyptusCloudException(
          "Not authorized to detach volume from instance "
              + request.getInstanceId()
              + " by "
              + ctx.getUser().getName());
    }
    if (!vm.getInstanceId().equals(request.getInstanceId())
        && request.getInstanceId() != null
        && !request.getInstanceId().equals("")) {
      throw new EucalyptusCloudException(
          "Volume is not attached to instance: " + request.getInstanceId());
    }
    if (request.getDevice() != null
        && !request.getDevice().equals("")
        && !volume.getDevice().equals(request.getDevice())) {
      throw new EucalyptusCloudException(
          "Volume is not attached to device: " + request.getDevice());
    }

    Cluster cluster = null;
    ServiceConfiguration ccConfig = null;
    try {
      ccConfig = Topology.lookup(ClusterController.class, vm.lookupPartition());
      cluster = Clusters.lookup(ccConfig);
    } catch (NoSuchElementException e) {
      LOG.debug(e, e);
      throw new EucalyptusCloudException(
          "Cluster does not exist in partition: " + vm.getPartition());
    }
    ServiceConfiguration scVm;
    try {
      scVm = Topology.lookup(Storage.class, vm.lookupPartition());
    } catch (Exception ex) {
      LOG.error(ex, ex);
      throw new EucalyptusCloudException(
          "Failed to lookup SC for partition: " + vm.getPartition(), ex);
    }
    request.setVolumeId(volume.getVolumeId());
    request.setRemoteDevice(volume.getRemoteDevice());
    request.setDevice(volume.getDevice().replaceAll("unknown,requested:", ""));
    request.setInstanceId(vm.getInstanceId());
    VolumeDetachCallback ncDetach = new VolumeDetachCallback(request);
    try {
      AsyncRequests.sendSync(scVm, new DetachStorageVolumeType(volume.getVolumeId()));
    } catch (Exception e) {
      LOG.debug(e);
      Logs.extreme().debug(e, e);
      // GRZE: attach is idempotent, failure here is ok.      throw new EucalyptusCloudException(
      // e.getMessage( ) );
    }
    AsyncRequests.newRequest(ncDetach).dispatch(cluster.getConfiguration());
    EventRecord.here(VolumeManager.class, EventClass.VOLUME, EventType.VOLUME_DETACH)
        .withDetails(vm.getOwner().toString(), volume.getVolumeId(), "instance", vm.getInstanceId())
        .withDetails("cluster", ccConfig.getFullName().toString())
        .info();
    volume.setStatus("detaching");
    reply.setDetachedVolume(volume);
    return reply;
  }
  @Override
  public void fireEvent(final ClockTick event) {
    if (Bootstrap.isFinished() && Hosts.isCoordinator()) {

      final List<ResourceAvailabilityEvent> resourceAvailabilityEvents = Lists.newArrayList();
      final Map<ResourceAvailabilityEvent.ResourceType, AvailabilityAccumulator> availabilities =
          Maps.newEnumMap(ResourceAvailabilityEvent.ResourceType.class);
      final Iterable<VmType> vmTypes = Lists.newArrayList(VmTypes.list());
      for (final Cluster cluster : Clusters.getInstance().listValues()) {
        availabilities.put(Core, new AvailabilityAccumulator(VmType.SizeProperties.Cpu));
        availabilities.put(Disk, new AvailabilityAccumulator(VmType.SizeProperties.Disk));
        availabilities.put(Memory, new AvailabilityAccumulator(VmType.SizeProperties.Memory));

        for (final VmType vmType : vmTypes) {
          final ResourceState.VmTypeAvailability va =
              cluster.getNodeState().getAvailability(vmType.getName());

          resourceAvailabilityEvents.add(
              new ResourceAvailabilityEvent(
                  Instance,
                  new ResourceAvailabilityEvent.Availability(
                      va.getMax(),
                      va.getAvailable(),
                      Lists.<ResourceAvailabilityEvent.Tag>newArrayList(
                          new ResourceAvailabilityEvent.Dimension(
                              "availabilityZone", cluster.getPartition()),
                          new ResourceAvailabilityEvent.Dimension("cluster", cluster.getName()),
                          new ResourceAvailabilityEvent.Type("vm-type", vmType.getName())))));

          for (final AvailabilityAccumulator availability : availabilities.values()) {
            availability.total =
                Math.max(
                    availability.total, va.getMax() * availability.valueExtractor.apply(vmType));
            availability.available =
                Math.max(
                    availability.available,
                    va.getAvailable() * availability.valueExtractor.apply(vmType));
          }
        }

        for (final AvailabilityAccumulator availability : availabilities.values()) {
          availability.rollUp(
              Lists.<ResourceAvailabilityEvent.Tag>newArrayList(
                  new ResourceAvailabilityEvent.Dimension(
                      "availabilityZone", cluster.getPartition()),
                  new ResourceAvailabilityEvent.Dimension("cluster", cluster.getName())));
        }
      }

      for (final Map.Entry<ResourceAvailabilityEvent.ResourceType, AvailabilityAccumulator> entry :
          availabilities.entrySet()) {
        resourceAvailabilityEvents.add(
            new ResourceAvailabilityEvent(entry.getKey(), entry.getValue().availabilities));
      }

      for (final ResourceAvailabilityEvent resourceAvailabilityEvent : resourceAvailabilityEvents)
        try {
          ListenerRegistry.getInstance().fireEvent(resourceAvailabilityEvent);
        } catch (Exception ex) {
          logger.error(ex, ex);
        }
    }
  }