/** Binding module for authentication of users with special capabilities for admin functions. */
public class ThriftAuthModule extends AbstractModule {

  private static final Map<Capability, String> DEFAULT_CAPABILITIES =
      ImmutableMap.of(Capability.ROOT, "mesos");

  @NotEmpty
  @CmdLine(
      name = "user_capabilities",
      help = "Concrete name mappings for administration capabilities.")
  private static final Arg<Map<Capability, String>> USER_CAPABILITIES =
      Arg.create(DEFAULT_CAPABILITIES);

  private Map<Capability, String> capabilities;

  public ThriftAuthModule() {
    this(USER_CAPABILITIES.get());
  }

  @VisibleForTesting
  public ThriftAuthModule(Map<Capability, String> capabilities) {
    this.capabilities = Preconditions.checkNotNull(capabilities);
  }

  @Override
  protected void configure() {
    Preconditions.checkArgument(
        capabilities.containsKey(Capability.ROOT),
        "A ROOT capability must be provided with --user_capabilities");
    bind(new TypeLiteral<Map<Capability, String>>() {}).toInstance(capabilities);

    requireBinding(SessionValidator.class);
    requireBinding(CapabilityValidator.class);
  }
}
  /**
   * Extend this to add custom options. TODO (dmitriy): generate the getters/setters that default to
   * statically injected values, but can be overridden.
   */
  public static class IndexConfig {
    @NotNull
    @CmdLine(name = "input", help = "one or more paths to input data, comma separated")
    public static final Arg<List<String>> input = Arg.create();

    @NotNull
    @CmdLine(name = "index", help = "index location")
    public static final Arg<String> index = Arg.create();

    @NotNull
    @CmdLine(name = "num_partitions", help = "number of partitions")
    public static final Arg<Integer> numPartitions = Arg.create();

    @CmdLine(
        name = "sample_percentage",
        help =
            "index randomly-sampled percentage [1,100] of records; default=100 (index everything)")
    public static final Arg<Integer> samplePercentage = Arg.create(100);

    @CmdLine(name = "analyzer", help = "Lucene analyzer to use. WhitespaceAnalyzer is default")
    public static final Arg<String> analyzer = Arg.create(WhitespaceAnalyzer.class.getName());

    @CmdLine(name = "similarity", help = "Lucene similarity to use")
    public static final Arg<String> similarity = Arg.create(DefaultSimilarity.class.getName());

    @CmdLine(name = "wait_for_completion", help = "wait for job completion")
    public static final Arg<Boolean> waitForCompletion = Arg.create(true);
  }
示例#3
0
/**
 * A configuration class for the root java.util.logging Logger.
 *
 * <p>Defines flags to control the behavior behavior of the root logger similarly to Google's glog
 * library (see http://code.google.com/p/google-glog ).
 */
public class RootLogConfig {
  /** An enum reflecting log {@link Level} constants. */
  public enum LogLevel {
    FINEST(Level.FINEST),
    FINER(Level.FINER),
    FINE(Level.FINE),
    CONFIG(Level.CONFIG),
    INFO(Level.INFO),
    WARNING(Level.WARNING),
    SEVERE(Level.SEVERE);

    private final Level level;

    private LogLevel(Level level) {
      this.level = level;
    }

    private Level getLevel() {
      return level;
    }

    private int intValue() {
      return level.intValue();
    }
  }

  @CmdLine(name = "logtostderr", help = "Log messages to stderr instead of logfiles.")
  private static Arg<Boolean> LOGTOSTDERR = Arg.create(false);

  @CmdLine(
      name = "alsologtostderr",
      help = "Log messages to stderr, in addition to log files. Ignored when --logtostderr")
  private static Arg<Boolean> ALSOLOGTOSTDERR = Arg.create(false);

  @CmdLine(
      name = "vlog",
      help =
          "The value is one of the constants in java.util.logging.Level. "
              + "Shows all messages with level equal or higher "
              + "than the value of this flag.")
  private static Arg<LogLevel> VLOG = Arg.create(LogLevel.INFO);

  @CmdLine(
      name = "vmodule",
      help =
          "Per-class verbose level. The argument has to contain a comma-separated list "
              + "of <class_name>=<log_level>. <class_name> is the full-qualified name of a "
              + "class, <log_level> is one of the constants in java.util.logging.Level. "
              + "<log_level> overrides any value given by --vlog.")
  private static Arg<Map<Class<?>, LogLevel>> VMODULE =
      Arg.<Map<Class<?>, LogLevel>>create(new HashMap<Class<?>, LogLevel>());

  // TODO(franco): change this flag's default to true, then remove after enough forewarning.
  @CmdLine(name = "use_glog_formatter", help = "True to use the glog formatter exclusively.")
  private static Arg<Boolean> USE_GLOG_FORMATTER = Arg.create(false);

  /**
   * A builder-pattern class used to perform the configuration programmatically (i.e. not through
   * flags). Example: <code>
   *    RootLogConfig.builder().logToStderr(true).apply();
   * </code>
   */
  public static class Configuration {
    private boolean logToStderr = false;
    private boolean alsoLogToStderr = false;
    private boolean useGLogFormatter = false;
    private LogLevel vlog = null;
    private Map<Class<?>, LogLevel> vmodule = null;
    private String rootLoggerName = "";

    private Configuration() {}
    /**
     * Only log messages to stderr, instead of log files. Overrides alsologtostderr. Default: false.
     *
     * @param flag True to enable, false to disable.
     * @return this Configuration object.
     */
    public Configuration logToStderr(boolean flag) {
      this.logToStderr = flag;
      return this;
    }

    /**
     * Also log messages to stderr, in addition to log files. Overridden by logtostderr. Default:
     * false.
     *
     * @param flag True to enable, false to disable.
     * @return this Configuration object.
     */
    public Configuration alsoLogToStderr(boolean flag) {
      this.alsoLogToStderr = flag;
      return this;
    }

    /**
     * Format log messages in one-line with a header, similar to google-glog. Default: false.
     *
     * @param flag True to enable, false to disable.
     * @return this Configuration object.
     */
    public Configuration useGLogFormatter(boolean flag) {
      this.useGLogFormatter = flag;
      return this;
    }

    /**
     * Output log messages at least at the given verbosity level. Overridden by vmodule. Default:
     * INFO
     *
     * @param level LogLevel enumerator for the minimum log message verbosity level that is output.
     * @return this Configuration object.
     */
    public Configuration vlog(LogLevel level) {
      Preconditions.checkNotNull(level);
      this.vlog = level;
      return this;
    }

    /**
     * Output log messages for a given set of classes at the associated verbosity levels. Overrides
     * vlog. Default: no classes are treated specially.
     *
     * @param pairs Map of classes and correspoding log levels.
     * @return this Configuration object.
     */
    public Configuration vmodule(Map<Class<?>, LogLevel> pairs) {
      Preconditions.checkNotNull(pairs);
      this.vmodule = pairs;
      return this;
    }

    /** Applies this configuration to the root log. */
    public void apply() {
      RootLogConfig.configure(this);
    }

    // Intercepts the root logger, for testing purposes only.
    @VisibleForTesting
    Configuration rootLoggerName(String name) {
      Preconditions.checkNotNull(name);
      Preconditions.checkArgument(!name.isEmpty());
      this.rootLoggerName = name;
      return this;
    }
  }

  /**
   * Creates a new Configuration builder object.
   *
   * @return The newly built Configuration.
   */
  public static Configuration builder() {
    return new Configuration();
  }

  /**
   * Configures the root log properties using flags. This is the entry point used by
   * AbstractApplication via LogModule.
   */
  public static void configureFromFlags() {
    builder()
        .logToStderr(LOGTOSTDERR.get())
        .alsoLogToStderr(ALSOLOGTOSTDERR.get())
        .useGLogFormatter(USE_GLOG_FORMATTER.get())
        .vlog(VLOG.get())
        .vmodule(VMODULE.get())
        .apply();
  }

  private static void configure(Configuration configuration) {
    // Edit the properties of the root logger.
    Logger rootLogger = Logger.getLogger(configuration.rootLoggerName);
    if (configuration.logToStderr) {
      setLoggerToStderr(rootLogger);
    } else if (configuration.alsoLogToStderr) {
      setLoggerToAlsoStderr(rootLogger);
    }
    if (configuration.useGLogFormatter) {
      setGLogFormatter(rootLogger);
    }
    if (configuration.vlog != null) {
      setVlog(rootLogger, configuration.vlog);
    }
    if (configuration.vmodule != null) {
      setVmodules(configuration.vmodule);
    }
  }

  private static void setLoggerToStderr(Logger logger) {
    LogManager.getLogManager().reset();
    setConsoleHandler(logger, true);
  }

  private static void setLoggerToAlsoStderr(Logger logger) {
    setConsoleHandler(logger, false);
  }

  private static void setConsoleHandler(Logger logger, boolean removeOtherHandlers) {
    Handler consoleHandler = null;
    for (Handler h : logger.getHandlers()) {
      if (h instanceof ConsoleHandler) {
        consoleHandler = h;
      } else if (removeOtherHandlers) {
        logger.removeHandler(h);
      }
    }
    if (consoleHandler == null) {
      consoleHandler = new ConsoleHandler();
      logger.addHandler(new ConsoleHandler());
    }
    consoleHandler.setLevel(Level.ALL);
    consoleHandler.setFilter(null);
  }

  private static void setGLogFormatter(Logger logger) {
    for (Handler h : logger.getHandlers()) {
      h.setFormatter(new LogFormatter());
    }
  }

  private static void setVmodules(Map<Class<?>, LogLevel> vmodules) {
    for (Map.Entry<Class<?>, LogLevel> entry : vmodules.entrySet()) {
      String className = entry.getKey().getName();
      Logger logger = Logger.getLogger(className);
      setVlog(logger, entry.getValue());
    }
  }

  private static void setVlog(Logger logger, LogLevel logLevel) {
    final Level newLevel = logLevel.getLevel();
    logger.setLevel(newLevel);
    do {
      for (Handler handler : logger.getHandlers()) {
        Level handlerLevel = handler.getLevel();
        if (newLevel.intValue() < handlerLevel.intValue()) {
          handler.setLevel(newLevel);
        }
      }
    } while (logger.getUseParentHandlers() && (logger = logger.getParent()) != null);
  }

  // Utility class.
  private RootLogConfig() {}
}
/** Binding module for async task management. */
public class AsyncModule extends AbstractModule {

  private static final Logger LOG = Logger.getLogger(AsyncModule.class.getName());

  @CmdLine(
      name = "async_worker_threads",
      help = "The number of worker threads to process async task operations with.")
  private static final Arg<Integer> ASYNC_WORKER_THREADS = Arg.create(1);

  @CmdLine(
      name = "transient_task_state_timeout",
      help = "The amount of time after which to treat a task stuck in a transient state as LOST.")
  private static final Arg<Amount<Long, Time>> TRANSIENT_TASK_STATE_TIMEOUT =
      Arg.create(Amount.of(5L, Time.MINUTES));

  @CmdLine(
      name = "initial_schedule_delay",
      help = "Initial amount of time to wait before attempting to schedule a PENDING task.")
  private static final Arg<Amount<Long, Time>> INITIAL_SCHEDULE_DELAY =
      Arg.create(Amount.of(1L, Time.SECONDS));

  @CmdLine(
      name = "max_schedule_delay",
      help = "Maximum delay between attempts to schedule a PENDING tasks.")
  private static final Arg<Amount<Long, Time>> MAX_SCHEDULE_DELAY =
      Arg.create(Amount.of(30L, Time.SECONDS));

  @CmdLine(
      name = "min_offer_hold_time",
      help = "Minimum amount of time to hold a resource offer before declining.")
  private static final Arg<Amount<Integer, Time>> MIN_OFFER_HOLD_TIME =
      Arg.create(Amount.of(5, Time.MINUTES));

  @CmdLine(
      name = "history_prune_threshold",
      help = "Time after which the scheduler will prune terminated task history.")
  private static final Arg<Amount<Long, Time>> HISTORY_PRUNE_THRESHOLD =
      Arg.create(Amount.of(2L, Time.DAYS));

  @CmdLine(
      name = "max_schedule_attempts_per_sec",
      help = "Maximum number of scheduling attempts to make per second.")
  private static final Arg<Double> MAX_SCHEDULE_ATTEMPTS_PER_SEC = Arg.create(10D);

  @CmdLine(
      name = "flapping_task_threshold",
      help = "A task that repeatedly runs for less than this time is considered to be flapping.")
  private static final Arg<Amount<Long, Time>> FLAPPING_THRESHOLD =
      Arg.create(Amount.of(5L, Time.MINUTES));

  @CmdLine(
      name = "initial_flapping_task_delay",
      help = "Initial amount of time to wait before attempting to schedule a flapping task.")
  private static final Arg<Amount<Long, Time>> INITIAL_FLAPPING_DELAY =
      Arg.create(Amount.of(30L, Time.SECONDS));

  @CmdLine(
      name = "max_flapping_task_delay",
      help = "Maximum delay between attempts to schedule a flapping task.")
  private static final Arg<Amount<Long, Time>> MAX_FLAPPING_DELAY =
      Arg.create(Amount.of(5L, Time.MINUTES));

  @CmdLine(
      name = "max_reschedule_task_delay_on_startup",
      help = "Upper bound of random delay for pending task rescheduling on scheduler startup.")
  private static final Arg<Amount<Integer, Time>> MAX_RESCHEDULING_DELAY =
      Arg.create(Amount.of(30, Time.SECONDS));

  @CmdLine(
      name = "preemption_delay",
      help = "Time interval after which a pending task becomes eligible to preempt other tasks")
  private static final Arg<Amount<Long, Time>> PREEMPTION_DELAY =
      Arg.create(Amount.of(10L, Time.MINUTES));

  @CmdLine(name = "enable_preemptor", help = "Enable the preemptor and preemption")
  private static final Arg<Boolean> ENABLE_PREEMPTOR = Arg.create(true);

  private static final Preemptor NULL_PREEMPTOR =
      new Preemptor() {
        @Override
        public Optional<String> findPreemptionSlotFor(String taskId) {
          return Optional.absent();
        }
      };

  @CmdLine(
      name = "offer_reservation_duration",
      help =
          "Time to reserve a slave's offers while "
              + "trying to satisfy a task preempting another.")
  private static final Arg<Amount<Long, Time>> RESERVATION_DURATION =
      Arg.create(Amount.of(3L, Time.MINUTES));

  @BindingAnnotation
  @Target({FIELD, PARAMETER, METHOD})
  @Retention(RUNTIME)
  private @interface PreemptionBinding {}

  @VisibleForTesting
  static final Key<Preemptor> PREEMPTOR_KEY = Key.get(Preemptor.class, PreemptionBinding.class);

  @Override
  protected void configure() {
    // Don't worry about clean shutdown, these can be daemon and cleanup-free.
    final ScheduledThreadPoolExecutor executor =
        new ScheduledThreadPoolExecutor(
            ASYNC_WORKER_THREADS.get(),
            new ThreadFactoryBuilder().setNameFormat("AsyncProcessor-%d").setDaemon(true).build());
    Stats.exportSize("timeout_queue_size", executor.getQueue());
    Stats.export(
        new StatImpl<Long>("async_tasks_completed") {
          @Override
          public Long read() {
            return executor.getCompletedTaskCount();
          }
        });

    // AsyncModule itself is not a subclass of PrivateModule because TaskEventModule internally uses
    // a MultiBinder, which cannot span multiple injectors.
    binder()
        .install(
            new PrivateModule() {
              @Override
              protected void configure() {
                bind(new TypeLiteral<Amount<Long, Time>>() {})
                    .toInstance(TRANSIENT_TASK_STATE_TIMEOUT.get());
                bind(ScheduledExecutorService.class).toInstance(executor);

                bind(TaskTimeout.class).in(Singleton.class);
                requireBinding(StatsProvider.class);
                expose(TaskTimeout.class);
              }
            });
    PubsubEventModule.bindSubscriber(binder(), TaskTimeout.class);

    binder()
        .install(
            new PrivateModule() {
              @Override
              protected void configure() {
                bind(TaskGroupsSettings.class)
                    .toInstance(
                        new TaskGroupsSettings(
                            new TruncatedBinaryBackoff(
                                INITIAL_SCHEDULE_DELAY.get(), MAX_SCHEDULE_DELAY.get()),
                            RateLimiter.create(MAX_SCHEDULE_ATTEMPTS_PER_SEC.get())));

                bind(RescheduleCalculatorImpl.RescheduleCalculatorSettings.class)
                    .toInstance(
                        new RescheduleCalculatorImpl.RescheduleCalculatorSettings(
                            new TruncatedBinaryBackoff(
                                INITIAL_FLAPPING_DELAY.get(), MAX_FLAPPING_DELAY.get()),
                            FLAPPING_THRESHOLD.get(),
                            MAX_RESCHEDULING_DELAY.get()));

                bind(RescheduleCalculator.class)
                    .to(RescheduleCalculatorImpl.class)
                    .in(Singleton.class);
                if (ENABLE_PREEMPTOR.get()) {
                  bind(PREEMPTOR_KEY).to(PreemptorImpl.class);
                  bind(PreemptorImpl.class).in(Singleton.class);
                  LOG.info("Preemptor Enabled.");
                } else {
                  bind(PREEMPTOR_KEY).toInstance(NULL_PREEMPTOR);
                  LOG.warning("Preemptor Disabled.");
                }
                expose(PREEMPTOR_KEY);
                bind(new TypeLiteral<Amount<Long, Time>>() {})
                    .annotatedWith(PreemptionDelay.class)
                    .toInstance(PREEMPTION_DELAY.get());
                bind(TaskGroups.class).in(Singleton.class);
                expose(TaskGroups.class);
              }
            });
    bindTaskScheduler(binder(), PREEMPTOR_KEY, RESERVATION_DURATION.get());
    PubsubEventModule.bindSubscriber(binder(), TaskGroups.class);

    binder()
        .install(
            new PrivateModule() {
              @Override
              protected void configure() {
                bind(OfferReturnDelay.class).to(RandomJitterReturnDelay.class);
                bind(ScheduledExecutorService.class).toInstance(executor);
                bind(OfferQueue.class).to(OfferQueueImpl.class);
                bind(OfferQueueImpl.class).in(Singleton.class);
                expose(OfferQueue.class);
              }
            });
    PubsubEventModule.bindSubscriber(binder(), OfferQueue.class);

    binder()
        .install(
            new PrivateModule() {
              @Override
              protected void configure() {
                // TODO(ksweeney): Create a configuration validator module so this can be injected.
                // TODO(William Farner): Revert this once large task counts is cheap ala
                // hierarchichal store
                bind(Integer.class).annotatedWith(PruneThreshold.class).toInstance(100);
                bind(new TypeLiteral<Amount<Long, Time>>() {})
                    .annotatedWith(PruneThreshold.class)
                    .toInstance(HISTORY_PRUNE_THRESHOLD.get());
                bind(ScheduledExecutorService.class).toInstance(executor);

                bind(HistoryPruner.class).in(Singleton.class);
                expose(HistoryPruner.class);
              }
            });
    PubsubEventModule.bindSubscriber(binder(), HistoryPruner.class);
  }

  /**
   * This method exists because we want to test the wiring up of TaskSchedulerImpl class to the
   * PubSub system in the TaskSchedulerImplTest class. The method has a complex signature because
   * the binding of the TaskScheduler and friends occurs in a PrivateModule which does not interact
   * well with the MultiBinder that backs the PubSub system.
   */
  @VisibleForTesting
  static void bindTaskScheduler(
      Binder binder,
      final Key<Preemptor> preemptorKey,
      final Amount<Long, Time> reservationDuration) {
    binder.install(
        new PrivateModule() {
          @Override
          protected void configure() {
            bind(Preemptor.class).to(preemptorKey);
            bind(new TypeLiteral<Amount<Long, Time>>() {})
                .annotatedWith(ReservationDuration.class)
                .toInstance(reservationDuration);
            bind(TaskScheduler.class).to(TaskSchedulerImpl.class);
            bind(TaskSchedulerImpl.class).in(Singleton.class);
            expose(TaskScheduler.class);
          }
        });
    PubsubEventModule.bindSubscriber(binder, TaskScheduler.class);
  }

  /** Returns offers after a random duration within a fixed window. */
  private static class RandomJitterReturnDelay implements OfferReturnDelay {
    private static final int JITTER_WINDOW_MS = Amount.of(1, Time.MINUTES).as(Time.MILLISECONDS);

    private final int minHoldTimeMs = MIN_OFFER_HOLD_TIME.get().as(Time.MILLISECONDS);
    private final Random random = new Random.SystemRandom(new java.util.Random());

    @Override
    public Amount<Integer, Time> get() {
      return Amount.of(minHoldTimeMs + random.nextInt(JITTER_WINDOW_MS), Time.MILLISECONDS);
    }
  }
}
/** Implementation of the scheduler core. */
class SchedulerCoreImpl implements SchedulerCore {
  @Positive
  @CmdLine(name = "max_tasks_per_job", help = "Maximum number of allowed tasks in a single job.")
  public static final Arg<Integer> MAX_TASKS_PER_JOB = Arg.create(4000);

  private static final Logger LOG = Logger.getLogger(SchedulerCoreImpl.class.getName());

  private final Storage storage;

  private final CronJobManager cronScheduler;

  // Schedulers that are responsible for triggering execution of jobs.
  private final ImmutableList<JobManager> jobManagers;

  // State manager handles persistence of task modifications and state transitions.
  private final StateManager stateManager;

  private final TaskIdGenerator taskIdGenerator;
  private final QuotaManager quotaManager;

  /**
   * Creates a new core scheduler.
   *
   * @param storage Backing store implementation.
   * @param cronScheduler Cron scheduler.
   * @param immediateScheduler Immediate scheduler.
   * @param stateManager Persistent state manager.
   * @param taskIdGenerator Task ID generator.
   * @param quotaManager Quota manager.
   */
  @Inject
  public SchedulerCoreImpl(
      Storage storage,
      CronJobManager cronScheduler,
      ImmediateJobManager immediateScheduler,
      StateManager stateManager,
      TaskIdGenerator taskIdGenerator,
      QuotaManager quotaManager) {

    this.storage = checkNotNull(storage);

    // The immediate scheduler will accept any job, so it's important that other schedulers are
    // placed first.
    this.jobManagers = ImmutableList.of(cronScheduler, immediateScheduler);
    this.cronScheduler = cronScheduler;
    this.stateManager = checkNotNull(stateManager);
    this.taskIdGenerator = checkNotNull(taskIdGenerator);
    this.quotaManager = checkNotNull(quotaManager);
  }

  private boolean hasActiveJob(IJobConfiguration job) {
    return Iterables.any(jobManagers, managerHasJob(job));
  }

  @Override
  public synchronized void tasksDeleted(Set<String> taskIds) {
    for (String taskId : taskIds) {
      setTaskStatus(taskId, ScheduleStatus.UNKNOWN, Optional.<String>absent());
    }
  }

  @Override
  public synchronized void createJob(final SanitizedConfiguration sanitizedConfiguration)
      throws ScheduleException {

    storage.write(
        new MutateWork.NoResult<ScheduleException>() {
          @Override
          protected void execute(MutableStoreProvider storeProvider) throws ScheduleException {

            final IJobConfiguration job = sanitizedConfiguration.getJobConfig();
            if (hasActiveJob(job)) {
              throw new ScheduleException("Job already exists: " + JobKeys.toPath(job));
            }

            validateTaskLimits(job.getTaskConfig(), job.getInstanceCount());

            boolean accepted = false;
            // TODO(wfarner): Remove the JobManager abstraction, and directly invoke addInstances
            // here for non-cron jobs.
            for (final JobManager manager : jobManagers) {
              if (manager.receiveJob(sanitizedConfiguration)) {
                LOG.info("Job accepted by manager: " + manager.getUniqueKey());
                accepted = true;
                break;
              }
            }

            if (!accepted) {
              LOG.severe("Job was not accepted by any of the configured schedulers, discarding.");
              LOG.severe("Discarded job: " + job);
              throw new ScheduleException("Job not accepted, discarding.");
            }
          }
        });
  }

  // This number is derived from the maximum file name length limit on most UNIX systems, less
  // the number of characters we've observed being added by mesos for the executor ID, prefix, and
  // delimiters.
  @VisibleForTesting static final int MAX_TASK_ID_LENGTH = 255 - 90;

  /**
   * Validates task specific requirements including name, count and quota checks. Must be performed
   * inside of a write storage transaction along with state mutation change to avoid any data race
   * conditions.
   *
   * @param task Task configuration.
   * @param instances Number of task instances
   * @throws ScheduleException If validation fails.
   */
  private void validateTaskLimits(ITaskConfig task, int instances) throws ScheduleException {

    // TODO(maximk): This is a short-term hack to stop the bleeding from
    //               https://issues.apache.org/jira/browse/MESOS-691
    if (taskIdGenerator.generate(task, instances).length() > MAX_TASK_ID_LENGTH) {
      throw new ScheduleException("Task ID is too long, please shorten your role or job name.");
    }

    if (instances > MAX_TASKS_PER_JOB.get()) {
      throw new ScheduleException("Job exceeds task limit of " + MAX_TASKS_PER_JOB.get());
    }

    QuotaCheckResult quotaCheck = quotaManager.checkQuota(task, instances);
    if (quotaCheck.getResult() == INSUFFICIENT_QUOTA) {
      throw new ScheduleException("Insufficient resource quota: " + quotaCheck.getDetails());
    }
  }

  @Override
  public void validateJobResources(SanitizedConfiguration sanitizedConfiguration)
      throws ScheduleException {

    IJobConfiguration job = sanitizedConfiguration.getJobConfig();
    validateTaskLimits(job.getTaskConfig(), job.getInstanceCount());
  }

  @Override
  public void addInstances(
      final IJobKey jobKey, final ImmutableSet<Integer> instanceIds, final ITaskConfig config)
      throws ScheduleException {

    storage.write(
        new MutateWork.NoResult<ScheduleException>() {
          @Override
          protected void execute(MutableStoreProvider storeProvider) throws ScheduleException {

            validateTaskLimits(config, instanceIds.size());

            ImmutableSet<IScheduledTask> tasks =
                storeProvider.getTaskStore().fetchTasks(Query.jobScoped(jobKey).active());

            Set<Integer> existingInstanceIds =
                FluentIterable.from(tasks).transform(Tasks.SCHEDULED_TO_INSTANCE_ID).toSet();
            if (!Sets.intersection(existingInstanceIds, instanceIds).isEmpty()) {
              throw new ScheduleException("Instance ID collision detected.");
            }

            stateManager.insertPendingTasks(Maps.asMap(instanceIds, Functions.constant(config)));
          }
        });
  }

  @Override
  public synchronized void startCronJob(IJobKey jobKey)
      throws ScheduleException, TaskDescriptionException {

    checkNotNull(jobKey);

    if (!cronScheduler.hasJob(jobKey)) {
      throw new ScheduleException("Cron job does not exist for " + JobKeys.toPath(jobKey));
    }

    cronScheduler.startJobNow(jobKey);
  }

  /**
   * Creates a predicate that will determine whether a job manager has a job matching a job key.
   *
   * @param job Job to match.
   * @return A new predicate matching the job owner and name given.
   */
  private static Predicate<JobManager> managerHasJob(final IJobConfiguration job) {
    return new Predicate<JobManager>() {
      @Override
      public boolean apply(JobManager manager) {
        return manager.hasJob(job.getKey());
      }
    };
  }

  @Override
  public synchronized void setTaskStatus(
      String taskId, final ScheduleStatus status, Optional<String> message) {

    checkNotNull(taskId);
    checkNotNull(status);

    stateManager.changeState(taskId, Optional.<ScheduleStatus>absent(), status, message);
  }

  @Override
  public synchronized void killTasks(Query.Builder query, final String user)
      throws ScheduleException {

    checkNotNull(query);
    LOG.info("Killing tasks matching " + query);

    boolean jobDeleted = false;

    if (Query.isOnlyJobScoped(query)) {
      // If this looks like a query for all tasks in a job, instruct the scheduler modules to
      // delete the job.
      IJobKey jobKey = JobKeys.from(query).get();
      for (JobManager manager : jobManagers) {
        if (manager.deleteJob(jobKey)) {
          jobDeleted = true;
        }
      }
    }

    // Unless statuses were specifically supplied, only attempt to kill active tasks.
    final Query.Builder taskQuery =
        query.get().isSetStatuses() ? query.byStatus(ACTIVE_STATES) : query;

    int tasksAffected =
        storage.write(
            new MutateWork.Quiet<Integer>() {
              @Override
              public Integer apply(MutableStoreProvider storeProvider) {
                int total = 0;
                for (String taskId :
                    Tasks.ids(storeProvider.getTaskStore().fetchTasks(taskQuery))) {
                  boolean changed =
                      stateManager.changeState(
                          taskId,
                          Optional.<ScheduleStatus>absent(),
                          KILLING,
                          Optional.of("Killed by " + user));

                  if (changed) {
                    total++;
                  }
                }
                return total;
              }
            });

    if (!jobDeleted && (tasksAffected == 0)) {
      throw new ScheduleException("No jobs to kill");
    }
  }

  @Override
  public void restartShards(IJobKey jobKey, final Set<Integer> shards, final String requestingUser)
      throws ScheduleException {

    if (!JobKeys.isValid(jobKey)) {
      throw new ScheduleException("Invalid job key: " + jobKey);
    }

    if (shards.isEmpty()) {
      throw new ScheduleException("At least one shard must be specified.");
    }

    final Query.Builder query = Query.instanceScoped(jobKey, shards).active();
    storage.write(
        new MutateWork.NoResult<ScheduleException>() {
          @Override
          protected void execute(MutableStoreProvider storeProvider) throws ScheduleException {

            Set<IScheduledTask> matchingTasks = storeProvider.getTaskStore().fetchTasks(query);
            if (matchingTasks.size() != shards.size()) {
              throw new ScheduleException("Not all requested shards are active.");
            }
            LOG.info("Restarting shards matching " + query);
            for (String taskId : Tasks.ids(matchingTasks)) {
              stateManager.changeState(
                  taskId,
                  Optional.<ScheduleStatus>absent(),
                  RESTARTING,
                  Optional.of("Restarted by " + requestingUser));
            }
          }
        });
  }
}