protected static Address lookupOrCreate(
     final Cluster cluster, final ClusterAddressInfo addrInfo) {
   Address addr = null;
   VmInstance vm = null;
   try {
     addr = Addresses.getInstance().lookupDisabled(addrInfo.getAddress());
     LOG.trace("Found address in the inactive set cache: " + addr);
   } catch (final NoSuchElementException e1) {
     try {
       addr = Addresses.getInstance().lookup(addrInfo.getAddress());
       LOG.trace("Found address in the active set cache: " + addr);
     } catch (final NoSuchElementException e) {
     }
   }
   if (addrInfo.hasMapping()) {
     vm =
         Helper.maybeFindVm(
             addr != null ? addr.getInstanceId() : null,
             addrInfo.getAddress(),
             addrInfo.getInstanceIp());
     if ((addr != null) && (vm != null)) {
       Helper.ensureAllocated(addr, vm);
       clearOrphan(addrInfo);
     } else if (addr != null && !addr.isPending() && vm != null && VmStateSet.DONE.apply(vm)) {
       handleOrphan(cluster, addrInfo);
     } else if ((addr != null && addr.isAssigned() && !addr.isPending()) && (vm == null)) {
       handleOrphan(cluster, addrInfo);
     } else if ((addr == null) && (vm != null)) {
       addr =
           new Address(
               Principals.systemFullName(),
               addrInfo.getAddress(),
               vm.getInstanceUuid(),
               vm.getInstanceId(),
               vm.getPrivateAddress());
       clearOrphan(addrInfo);
     } else if ((addr == null) && (vm == null)) {
       addr = new Address(addrInfo.getAddress(), cluster.getPartition());
       handleOrphan(cluster, addrInfo);
     }
   } else {
     if ((addr != null) && addr.isAssigned() && !addr.isPending()) {
       handleOrphan(cluster, addrInfo);
     } else if ((addr != null)
         && !addr.isAssigned()
         && !addr.isPending()
         && addr.isSystemOwned()) {
       try {
         addr.release();
       } catch (final Exception ex) {
         LOG.error(ex);
       }
     } else if ((addr != null) && Address.Transition.system.equals(addr.getTransition())) {
       handleOrphan(cluster, addrInfo);
     } else if (addr == null) {
       addr = new Address(addrInfo.getAddress(), cluster.getPartition());
       Helper.clearVmState(addrInfo);
     }
   }
   return addr;
 }
Esempio n. 2
0
    private List<ResourceToken> requestResourceToken(
        final Allocation allocInfo, final int tryAmount, final int maxAmount) throws Exception {
      ServiceConfiguration config =
          Topology.lookup(ClusterController.class, allocInfo.getPartition());
      Cluster cluster = Clusters.lookup(config);
      /**
       * TODO:GRZE: this is the call path which needs to trigger gating.
       * It shouldn't be handled directly here, but instead be handled in {@link ResourceState#requestResourceAllocation().
       *
       */
      if (cluster.getGateLock().readLock().tryLock(60, TimeUnit.SECONDS)) {
        try {
          final ResourceState state = cluster.getNodeState();
          /**
           * NOTE: If the defined instance type has an ordering conflict w/ some other type then it
           * isn't safe to service TWO requests which use differing types during the same resource
           * refresh duty cycle. This determines whether or not an asynchronous allocation is safe
           * to do for the request instance type or whether a synchronous resource availability
           * refresh is needed.
           */
          boolean unorderedType = VmTypes.isUnorderedType(allocInfo.getVmType());
          boolean forceResourceRefresh = state.hasUnorderedTokens() || unorderedType;
          /**
           * GRZE: if the vm type is not "nicely" ordered then we force a refresh of the actual
           * cluster state. Note: we already hold the cluster gating lock here so this update will
           * be mutual exclusive wrt both resource allocations and cluster state updates.
           */
          if (forceResourceRefresh) {
            cluster.refreshResources();
          }
          final List<ResourceToken> tokens =
              state.requestResourceAllocation(allocInfo, tryAmount, maxAmount);
          final Iterator<ResourceToken> tokenIterator = tokens.iterator();
          try {
            final Supplier<ResourceToken> allocator =
                new Supplier<ResourceToken>() {
                  @Override
                  public ResourceToken get() {
                    final ResourceToken ret = tokenIterator.next();
                    allocInfo.getAllocationTokens().add(ret);
                    return ret;
                  }
                };

            RestrictedTypes.allocateUnitlessResources(tokens.size(), allocator);
          } finally {
            // release any tokens that were not allocated
            Iterators.all(
                tokenIterator,
                new Predicate<ResourceToken>() {
                  @Override
                  public boolean apply(final ResourceToken resourceToken) {
                    state.releaseToken(resourceToken);
                    return true;
                  }
                });
          }
          return allocInfo.getAllocationTokens();
        } finally {
          cluster.getGateLock().readLock().unlock();
        }
      } else {
        throw new ServiceStateException(
            "Failed to allocate resources in the zone "
                + cluster.getPartition()
                + ", it is currently locked for maintenance.");
      }
    }
  @Override
  public void fireEvent(final ClockTick event) {
    if (Bootstrap.isFinished() && Hosts.isCoordinator()) {

      final List<ResourceAvailabilityEvent> resourceAvailabilityEvents = Lists.newArrayList();
      final Map<ResourceAvailabilityEvent.ResourceType, AvailabilityAccumulator> availabilities =
          Maps.newEnumMap(ResourceAvailabilityEvent.ResourceType.class);
      final Iterable<VmType> vmTypes = Lists.newArrayList(VmTypes.list());
      for (final Cluster cluster : Clusters.getInstance().listValues()) {
        availabilities.put(Core, new AvailabilityAccumulator(VmType.SizeProperties.Cpu));
        availabilities.put(Disk, new AvailabilityAccumulator(VmType.SizeProperties.Disk));
        availabilities.put(Memory, new AvailabilityAccumulator(VmType.SizeProperties.Memory));

        for (final VmType vmType : vmTypes) {
          final ResourceState.VmTypeAvailability va =
              cluster.getNodeState().getAvailability(vmType.getName());

          resourceAvailabilityEvents.add(
              new ResourceAvailabilityEvent(
                  Instance,
                  new ResourceAvailabilityEvent.Availability(
                      va.getMax(),
                      va.getAvailable(),
                      Lists.<ResourceAvailabilityEvent.Tag>newArrayList(
                          new ResourceAvailabilityEvent.Dimension(
                              "availabilityZone", cluster.getPartition()),
                          new ResourceAvailabilityEvent.Dimension("cluster", cluster.getName()),
                          new ResourceAvailabilityEvent.Type("vm-type", vmType.getName())))));

          for (final AvailabilityAccumulator availability : availabilities.values()) {
            availability.total =
                Math.max(
                    availability.total, va.getMax() * availability.valueExtractor.apply(vmType));
            availability.available =
                Math.max(
                    availability.available,
                    va.getAvailable() * availability.valueExtractor.apply(vmType));
          }
        }

        for (final AvailabilityAccumulator availability : availabilities.values()) {
          availability.rollUp(
              Lists.<ResourceAvailabilityEvent.Tag>newArrayList(
                  new ResourceAvailabilityEvent.Dimension(
                      "availabilityZone", cluster.getPartition()),
                  new ResourceAvailabilityEvent.Dimension("cluster", cluster.getName())));
        }
      }

      for (final Map.Entry<ResourceAvailabilityEvent.ResourceType, AvailabilityAccumulator> entry :
          availabilities.entrySet()) {
        resourceAvailabilityEvents.add(
            new ResourceAvailabilityEvent(entry.getKey(), entry.getValue().availabilities));
      }

      for (final ResourceAvailabilityEvent resourceAvailabilityEvent : resourceAvailabilityEvents)
        try {
          ListenerRegistry.getInstance().fireEvent(resourceAvailabilityEvent);
        } catch (Exception ex) {
          logger.error(ex, ex);
        }
    }
  }