Пример #1
0
  /**
   * Check validity of and populates defaults in a job configuration. This will return a deep copy
   * of the provided job configuration with default configuration values applied, and configuration
   * map values sanitized and applied to their respective struct fields.
   *
   * @param job Job to validate and populate.
   * @return A deep copy of {@code job} that has been populated.
   * @throws TaskDescriptionException If the job configuration is invalid.
   */
  public IJobConfiguration validateAndPopulate(IJobConfiguration job)
      throws TaskDescriptionException {

    requireNonNull(job);

    if (!job.isSetTaskConfig()) {
      throw new TaskDescriptionException("Job configuration must have taskConfig set.");
    }

    if (job.getInstanceCount() <= 0) {
      throw new TaskDescriptionException("Instance count must be positive.");
    }

    JobConfiguration builder = job.newBuilder();

    if (!JobKeys.isValid(job.getKey())) {
      throw new TaskDescriptionException("Job key " + job.getKey() + " is invalid.");
    }

    if (job.isSetOwner() && !UserProvidedStrings.isGoodIdentifier(job.getOwner().getUser())) {
      throw new TaskDescriptionException(
          "Job user contains illegal characters: " + job.getOwner().getUser());
    }

    builder.setTaskConfig(
        validateAndPopulate(ITaskConfig.build(builder.getTaskConfig())).newBuilder());

    // Only one of [service=true, cron_schedule] may be set.
    if (!Strings.isNullOrEmpty(job.getCronSchedule()) && builder.getTaskConfig().isIsService()) {
      throw new TaskDescriptionException(
          "A service task may not be run on a cron schedule: " + builder);
    }

    return IJobConfiguration.build(builder);
  }
Пример #2
0
  @Test
  public void testJobStore() {
    assertNull(store.fetchJob(MANAGER_1, JobKeys.from("nobody", "nowhere", "noname")).orNull());
    assertEquals(ImmutableSet.<IJobConfiguration>of(), store.fetchJobs(MANAGER_1));
    assertEquals(ImmutableSet.<String>of(), store.fetchManagerIds());

    store.saveAcceptedJob(MANAGER_1, JOB_A);
    assertEquals(JOB_A, store.fetchJob(MANAGER_1, KEY_A).orNull());
    assertEquals(ImmutableSet.of(JOB_A), store.fetchJobs(MANAGER_1));

    store.saveAcceptedJob(MANAGER_1, JOB_B);
    assertEquals(JOB_B, store.fetchJob(MANAGER_1, KEY_B).orNull());
    assertEquals(ImmutableSet.of(JOB_A, JOB_B), store.fetchJobs(MANAGER_1));
    assertEquals(ImmutableSet.of(MANAGER_1), store.fetchManagerIds());

    store.saveAcceptedJob(MANAGER_2, JOB_B);
    assertEquals(JOB_B, store.fetchJob(MANAGER_1, KEY_B).orNull());
    assertEquals(ImmutableSet.of(JOB_B), store.fetchJobs(MANAGER_2));
    assertEquals(ImmutableSet.of(MANAGER_1, MANAGER_2), store.fetchManagerIds());

    store.removeJob(KEY_B);
    assertEquals(ImmutableSet.of(JOB_A), store.fetchJobs(MANAGER_1));
    assertEquals(ImmutableSet.<IJobConfiguration>of(), store.fetchJobs(MANAGER_2));

    store.deleteJobs();
    assertEquals(ImmutableSet.<IJobConfiguration>of(), store.fetchJobs(MANAGER_1));
    assertEquals(ImmutableSet.<IJobConfiguration>of(), store.fetchJobs(MANAGER_2));
  }
 private void instanceChanged(final IInstanceKey instance, final Optional<IScheduledTask> state) {
   storage.write(
       (NoResult.Quiet)
           storeProvider -> {
             IJobKey job = instance.getJobKey();
             UpdateFactory.Update update = updates.get(job);
             if (update != null) {
               if (update.getUpdater().containsInstance(instance.getInstanceId())) {
                 LOG.info("Forwarding task change for " + InstanceKeys.toString(instance));
                 try {
                   evaluateUpdater(
                       storeProvider,
                       update,
                       getOnlyMatch(storeProvider.getJobUpdateStore(), queryActiveByJob(job)),
                       ImmutableMap.of(instance.getInstanceId(), state));
                 } catch (UpdateStateException e) {
                   throw Throwables.propagate(e);
                 }
               } else {
                 LOG.info(
                     "Instance "
                         + instance
                         + " is not part of active update for "
                         + JobKeys.canonicalString(job));
               }
             }
           });
 }
  @Override
  public void restartShards(IJobKey jobKey, final Set<Integer> shards, final String requestingUser)
      throws ScheduleException {

    if (!JobKeys.isValid(jobKey)) {
      throw new ScheduleException("Invalid job key: " + jobKey);
    }

    if (shards.isEmpty()) {
      throw new ScheduleException("At least one shard must be specified.");
    }

    final Query.Builder query = Query.instanceScoped(jobKey, shards).active();
    storage.write(
        new MutateWork.NoResult<ScheduleException>() {
          @Override
          protected void execute(MutableStoreProvider storeProvider) throws ScheduleException {

            Set<IScheduledTask> matchingTasks = storeProvider.getTaskStore().fetchTasks(query);
            if (matchingTasks.size() != shards.size()) {
              throw new ScheduleException("Not all requested shards are active.");
            }
            LOG.info("Restarting shards matching " + query);
            for (String taskId : Tasks.ids(matchingTasks)) {
              stateManager.changeState(
                  taskId,
                  Optional.<ScheduleStatus>absent(),
                  RESTARTING,
                  Optional.of("Restarted by " + requestingUser));
            }
          }
        });
  }
 private IJobConfiguration makeJob() {
   return IJobConfiguration.build(
       new JobConfiguration()
           .setOwner(new Identity(OWNER, OWNER))
           .setKey(JobKeys.from(OWNER, ENVIRONMENT, JOB_NAME).newBuilder())
           .setCronSchedule("1 1 1 1 1")
           .setTaskConfig(defaultTask())
           .setInstanceCount(1));
 }
  @Override
  public synchronized void startCronJob(IJobKey jobKey)
      throws ScheduleException, TaskDescriptionException {

    checkNotNull(jobKey);

    if (!cronScheduler.hasJob(jobKey)) {
      throw new ScheduleException("Cron job does not exist for " + JobKeys.toPath(jobKey));
    }

    cronScheduler.startJobNow(jobKey);
  }
Пример #7
0
  /**
   * Dumps a cron job struct.
   *
   * @return HTTP response.
   */
  @GET
  @Path("/cron/{role}/{environment}/{job}")
  @Produces(MediaType.TEXT_HTML)
  public Response dump(
      @PathParam("role") final String role,
      @PathParam("environment") final String environment,
      @PathParam("job") final String job) {

    final IJobKey jobKey = JobKeys.from(role, environment, job);
    return dumpEntity(
        "Cron job " + JobKeys.canonicalString(jobKey),
        new Work.Quiet<Optional<? extends TBase<?, ?>>>() {
          @Override
          public Optional<JobConfiguration> apply(StoreProvider storeProvider) {
            return storeProvider
                .getCronJobStore()
                .fetchJob(jobKey)
                .transform(IJobConfiguration::newBuilder);
          }
        });
  }
  @Override
  public synchronized void killTasks(Query.Builder query, final String user)
      throws ScheduleException {

    checkNotNull(query);
    LOG.info("Killing tasks matching " + query);

    boolean jobDeleted = false;

    if (Query.isOnlyJobScoped(query)) {
      // If this looks like a query for all tasks in a job, instruct the scheduler modules to
      // delete the job.
      IJobKey jobKey = JobKeys.from(query).get();
      for (JobManager manager : jobManagers) {
        if (manager.deleteJob(jobKey)) {
          jobDeleted = true;
        }
      }
    }

    // Unless statuses were specifically supplied, only attempt to kill active tasks.
    final Query.Builder taskQuery =
        query.get().isSetStatuses() ? query.byStatus(ACTIVE_STATES) : query;

    int tasksAffected =
        storage.write(
            new MutateWork.Quiet<Integer>() {
              @Override
              public Integer apply(MutableStoreProvider storeProvider) {
                int total = 0;
                for (String taskId :
                    Tasks.ids(storeProvider.getTaskStore().fetchTasks(taskQuery))) {
                  boolean changed =
                      stateManager.changeState(
                          taskId,
                          Optional.<ScheduleStatus>absent(),
                          KILLING,
                          Optional.of("Killed by " + user));

                  if (changed) {
                    total++;
                  }
                }
                return total;
              }
            });

    if (!jobDeleted && (tasksAffected == 0)) {
      throw new ScheduleException("No jobs to kill");
    }
  }
  @Test
  public void testEmptyConfigSummary() throws Exception {
    IJobKey key = JobKeys.from("test", "test", "test");

    storageUtil.expectTaskFetch(Query.jobScoped(key).active(), ImmutableSet.of());

    ConfigSummary summary =
        new ConfigSummary().setKey(key.newBuilder()).setGroups(Sets.newHashSet());

    ConfigSummaryResult expected = new ConfigSummaryResult().setSummary(summary);

    control.replay();

    Response response = assertOkResponse(thrift.getConfigSummary(key.newBuilder()));
    assertEquals(expected, response.getResult().getConfigSummaryResult());
  }
  @Test
  public void testGetConfigSummary() throws Exception {
    IJobKey key = JobKeys.from("test", "test", "test");

    TaskConfig firstGroupTask = defaultTask(true);
    TaskConfig secondGroupTask = defaultTask(true).setNumCpus(2);

    IScheduledTask first1 =
        IScheduledTask.build(
            new ScheduledTask()
                .setAssignedTask(new AssignedTask().setTask(firstGroupTask).setInstanceId(0)));

    IScheduledTask first2 =
        IScheduledTask.build(
            new ScheduledTask()
                .setAssignedTask(new AssignedTask().setTask(firstGroupTask).setInstanceId(1)));

    IScheduledTask second =
        IScheduledTask.build(
            new ScheduledTask()
                .setAssignedTask(new AssignedTask().setTask(secondGroupTask).setInstanceId(2)));

    storageUtil.expectTaskFetch(Query.jobScoped(key).active(), first1, first2, second);

    ConfigGroup group1 =
        new ConfigGroup()
            .setConfig(firstGroupTask)
            .setInstances(IRange.toBuildersSet(convertRanges(toRanges(ImmutableSet.of(0, 1)))));
    ConfigGroup group2 =
        new ConfigGroup()
            .setConfig(secondGroupTask)
            .setInstances(IRange.toBuildersSet(convertRanges(toRanges(ImmutableSet.of(2)))));

    ConfigSummary summary =
        new ConfigSummary().setKey(key.newBuilder()).setGroups(Sets.newHashSet(group1, group2));

    ConfigSummaryResult expected = new ConfigSummaryResult().setSummary(summary);

    control.replay();

    Response response = assertOkResponse(thrift.getConfigSummary(key.newBuilder()));
    assertEquals(
        IConfigSummaryResult.build(expected),
        IConfigSummaryResult.build(response.getResult().getConfigSummaryResult()));
  }
Пример #11
0
  /**
   * Check validity of and populates defaults in a job configuration. This will return a deep copy
   * of the provided job configuration with default configuration values applied, and configuration
   * map values sanitized and applied to their respective struct fields.
   *
   * @param job Job to validate and populate.
   * @return A deep copy of {@code job} that has been populated.
   * @throws TaskDescriptionException If the job configuration is invalid.
   */
  public static IJobConfiguration validateAndPopulate(IJobConfiguration job)
      throws TaskDescriptionException {

    Objects.requireNonNull(job);

    if (!job.isSetTaskConfig()) {
      throw new TaskDescriptionException("Job configuration must have taskConfig set.");
    }

    if (job.getInstanceCount() <= 0) {
      throw new TaskDescriptionException("Instance count must be positive.");
    }

    JobConfiguration builder = job.newBuilder();

    if (!JobKeys.isValid(job.getKey())) {
      throw new TaskDescriptionException("Job key " + job.getKey() + " is invalid.");
    }

    if (job.isSetOwner()) {
      assertOwnerValidity(job.getOwner());

      if (!job.getKey().getRole().equals(job.getOwner().getRole())) {
        throw new TaskDescriptionException("Role in job key must match job owner.");
      }
    }

    builder.setTaskConfig(
        validateAndPopulate(ITaskConfig.build(builder.getTaskConfig())).newBuilder());

    // Only one of [service=true, cron_schedule] may be set.
    if (!Strings.isNullOrEmpty(job.getCronSchedule()) && builder.getTaskConfig().isIsService()) {
      throw new TaskDescriptionException(
          "A service task may not be run on a cron schedule: " + builder);
    }

    return IJobConfiguration.build(builder);
  }
  @Test
  public void testCreateAndRestoreNewSnapshot() {
    ImmutableSet<IScheduledTask> tasks =
        ImmutableSet.of(
            IScheduledTask.build(new ScheduledTask().setStatus(ScheduleStatus.PENDING)));
    Set<QuotaConfiguration> quotas =
        ImmutableSet.of(new QuotaConfiguration("steve", ResourceAggregates.none().newBuilder()));
    IHostAttributes attribute =
        IHostAttributes.build(
            new HostAttributes(
                "host", ImmutableSet.of(new Attribute("attr", ImmutableSet.of("value")))));
    StoredJob job =
        new StoredJob(
            "jobManager", new JobConfiguration().setKey(new JobKey("owner", "env", "name")));
    String frameworkId = "framework_id";
    ILock lock =
        ILock.build(
            new Lock()
                .setKey(LockKey.job(JobKeys.from("testRole", "testEnv", "testJob").newBuilder()))
                .setToken("lockId")
                .setUser("testUser")
                .setTimestampMs(12345L));
    SchedulerMetadata metadata =
        new SchedulerMetadata().setFrameworkId(frameworkId).setVersion(CURRENT_API_VERSION);

    storageUtil.expectOperations();
    expect(storageUtil.taskStore.fetchTasks(Query.unscoped())).andReturn(tasks);
    expect(storageUtil.quotaStore.fetchQuotas())
        .andReturn(ImmutableMap.of("steve", ResourceAggregates.none()));
    expect(storageUtil.attributeStore.getHostAttributes()).andReturn(ImmutableSet.of(attribute));
    expect(storageUtil.jobStore.fetchManagerIds()).andReturn(ImmutableSet.of("jobManager"));
    expect(storageUtil.jobStore.fetchJobs("jobManager"))
        .andReturn(ImmutableSet.of(IJobConfiguration.build(job.getJobConfiguration())));
    expect(storageUtil.schedulerStore.fetchFrameworkId()).andReturn(frameworkId);
    expect(storageUtil.lockStore.fetchLocks()).andReturn(ImmutableSet.of(lock));

    expectDataWipe();
    storageUtil.taskStore.saveTasks(tasks);
    storageUtil.quotaStore.saveQuota("steve", ResourceAggregates.none());
    storageUtil.attributeStore.saveHostAttributes(attribute);
    storageUtil.jobStore.saveAcceptedJob(
        job.getJobManagerId(), IJobConfiguration.build(job.getJobConfiguration()));
    storageUtil.schedulerStore.saveFrameworkId(frameworkId);
    storageUtil.lockStore.saveLock(lock);

    control.replay();

    Snapshot expected =
        new Snapshot()
            .setTimestamp(NOW)
            .setTasks(IScheduledTask.toBuildersSet(tasks))
            .setQuotaConfigurations(quotas)
            .setHostAttributes(ImmutableSet.of(attribute.newBuilder()))
            .setJobs(ImmutableSet.of(job))
            .setSchedulerMetadata(metadata)
            .setLocks(ILock.toBuildersSet(ImmutableSet.of(lock)));

    assertEquals(expected, snapshotStore.createSnapshot());

    snapshotStore.applySnapshot(expected);
  }
public class SchedulingFilterImplTest extends EasyMockTest {
  private static final String HOST_A = "hostA";
  private static final String HOST_B = "hostB";
  private static final String HOST_C = "hostC";

  private static final String RACK_A = "rackA";
  private static final String RACK_B = "rackB";

  private static final String RACK_ATTRIBUTE = "rack";
  private static final String HOST_ATTRIBUTE = "host";

  private static final IJobKey JOB_A = JobKeys.from("roleA", "env", "jobA");
  private static final IJobKey JOB_B = JobKeys.from("roleB", "env", "jobB");

  private static final int DEFAULT_CPUS = 4;
  private static final long DEFAULT_RAM = 1000;
  private static final long DEFAULT_DISK = 2000;
  private static final ResourceSlot DEFAULT_OFFER =
      Resources.from(Offers.createOffer(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK, Pair.of(80, 80)))
          .slot();

  private SchedulingFilter defaultFilter;

  @Before
  public void setUp() {
    defaultFilter = new SchedulingFilterImpl(TaskExecutors.NO_OVERHEAD_EXECUTOR);
  }

  @Test
  public void testMeetsOffer() {
    control.replay();

    IHostAttributes attributes = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A));
    assertNoVetoes(makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK), attributes);
    assertNoVetoes(makeTask(DEFAULT_CPUS - 1, DEFAULT_RAM - 1, DEFAULT_DISK - 1), attributes);
  }

  @Test
  public void testSufficientPorts() {
    control.replay();

    ResourceSlot twoPorts =
        Resources.from(Offers.createOffer(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK, Pair.of(80, 81)))
            .slot();

    ITaskConfig noPortTask =
        ITaskConfig.build(
            makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
                .newBuilder()
                .setRequestedPorts(ImmutableSet.of()));
    ITaskConfig onePortTask =
        ITaskConfig.build(
            makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
                .newBuilder()
                .setRequestedPorts(ImmutableSet.of("one")));
    ITaskConfig twoPortTask =
        ITaskConfig.build(
            makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
                .newBuilder()
                .setRequestedPorts(ImmutableSet.of("one", "two")));
    ITaskConfig threePortTask =
        ITaskConfig.build(
            makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
                .newBuilder()
                .setRequestedPorts(ImmutableSet.of("one", "two", "three")));

    Set<Veto> none = ImmutableSet.of();
    IHostAttributes hostA = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A));
    assertEquals(
        none,
        defaultFilter.filter(
            new UnusedResource(twoPorts, hostA), new ResourceRequest(noPortTask, EMPTY)));
    assertEquals(
        none,
        defaultFilter.filter(
            new UnusedResource(twoPorts, hostA), new ResourceRequest(onePortTask, EMPTY)));
    assertEquals(
        none,
        defaultFilter.filter(
            new UnusedResource(twoPorts, hostA), new ResourceRequest(twoPortTask, EMPTY)));
    assertEquals(
        ImmutableSet.of(PORTS.veto(1)),
        defaultFilter.filter(
            new UnusedResource(twoPorts, hostA), new ResourceRequest(threePortTask, EMPTY)));
  }

  @Test
  public void testInsufficientResources() {
    control.replay();

    IHostAttributes hostA = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A));
    assertVetoes(
        makeTask(DEFAULT_CPUS + 1, DEFAULT_RAM + 1, DEFAULT_DISK + 1),
        hostA,
        CPU.veto(1),
        DISK.veto(1),
        RAM.veto(1));
    assertVetoes(makeTask(DEFAULT_CPUS + 1, DEFAULT_RAM, DEFAULT_DISK), hostA, CPU.veto(1));
    assertVetoes(makeTask(DEFAULT_CPUS, DEFAULT_RAM + 1, DEFAULT_DISK), hostA, RAM.veto(1));
    assertVetoes(makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK + 1), hostA, DISK.veto(1));
  }

  @Test
  public void testDedicatedRole() {
    control.replay();

    IHostAttributes hostA = hostAttributes(HOST_A, dedicated(JOB_A.getRole()));
    checkConstraint(hostA, DEDICATED_ATTRIBUTE, true, JOB_A.getRole());
    assertVetoes(makeTask(JOB_B), hostA, Veto.dedicatedHostConstraintMismatch());
  }

  @Test
  public void testSharedDedicatedHost() {
    control.replay();

    String dedicated1 = dedicatedFor(JOB_A);
    String dedicated2 = dedicatedFor(JOB_B);
    IHostAttributes hostA = hostAttributes(HOST_A, dedicated(dedicated1, dedicated2));
    assertNoVetoes(checkConstraint(JOB_A, hostA, DEDICATED_ATTRIBUTE, true, dedicated1), hostA);
    assertNoVetoes(checkConstraint(JOB_B, hostA, DEDICATED_ATTRIBUTE, true, dedicated2), hostA);
  }

  @Test
  public void testMultiValuedAttributes() {
    control.replay();

    IHostAttributes hostA = hostAttributes(HOST_A, valueAttribute("jvm", "1.0", "2.0", "3.0"));
    checkConstraint(hostA, "jvm", true, "1.0");
    checkConstraint(hostA, "jvm", false, "4.0");

    checkConstraint(hostA, "jvm", true, "1.0", "2.0");
    IHostAttributes hostB = hostAttributes(HOST_A, valueAttribute("jvm", "1.0"));
    checkConstraint(hostB, "jvm", false, "2.0", "3.0");
  }

  @Test
  public void testHostScheduledForMaintenance() {
    control.replay();

    assertNoVetoes(
        makeTask(), hostAttributes(HOST_A, MaintenanceMode.SCHEDULED, host(HOST_A), rack(RACK_A)));
  }

  @Test
  public void testHostDrainingForMaintenance() {
    control.replay();

    assertVetoes(
        makeTask(),
        hostAttributes(HOST_A, MaintenanceMode.DRAINING, host(HOST_A), rack(RACK_A)),
        Veto.maintenance("draining"));
  }

  @Test
  public void testHostDrainedForMaintenance() {
    control.replay();

    assertVetoes(
        makeTask(),
        hostAttributes(HOST_A, MaintenanceMode.DRAINED, host(HOST_A), rack(RACK_A)),
        Veto.maintenance("drained"));
  }

  @Test
  public void testMultipleTaskConstraints() {
    control.replay();

    Constraint constraint1 = makeConstraint("host", HOST_A);
    Constraint constraint2 = makeConstraint(DEDICATED_ATTRIBUTE, "xxx");

    assertVetoes(
        makeTask(JOB_A, constraint1, constraint2),
        hostAttributes(HOST_A, dedicated(HOST_A), host(HOST_A)),
        Veto.constraintMismatch(DEDICATED_ATTRIBUTE));
    assertNoVetoes(
        makeTask(JOB_B, constraint1, constraint2),
        hostAttributes(HOST_B, dedicated("xxx"), host(HOST_A)));
  }

  @Test
  public void testDedicatedMismatchShortCircuits() {
    // Ensures that a dedicated mismatch short-circuits other filter operations, such as
    // evaluation of limit constraints.  Reduction of task queries is the desired outcome.

    control.replay();

    Constraint hostLimit = limitConstraint("host", 1);
    assertVetoes(
        makeTask(JOB_A, hostLimit, makeConstraint(DEDICATED_ATTRIBUTE, "xxx")),
        hostAttributes(HOST_A, host(HOST_A)),
        Veto.constraintMismatch(DEDICATED_ATTRIBUTE));
    assertVetoes(
        makeTask(JOB_A, hostLimit, makeConstraint(DEDICATED_ATTRIBUTE, "xxx")),
        hostAttributes(HOST_B, dedicated(dedicatedFor(JOB_B)), host(HOST_B)),
        Veto.constraintMismatch(DEDICATED_ATTRIBUTE));
  }

  @Test
  public void testUnderLimitNoTasks() {
    control.replay();

    assertNoVetoes(hostLimitTask(2), hostAttributes(HOST_A, host(HOST_A)));
  }

  private IAttribute host(String host) {
    return valueAttribute(HOST_ATTRIBUTE, host);
  }

  private IAttribute rack(String rack) {
    return valueAttribute(RACK_ATTRIBUTE, rack);
  }

  private IAttribute dedicated(String value, String... values) {
    return valueAttribute(DEDICATED_ATTRIBUTE, value, values);
  }

  private String dedicatedFor(IJobKey job) {
    return job.getRole() + "/" + job.getName();
  }

  @Test
  public void testLimitWithinJob() throws Exception {
    control.replay();

    AttributeAggregate stateA =
        AttributeAggregate.create(
            Suppliers.ofInstance(
                ImmutableList.of(
                    host(HOST_A),
                    rack(RACK_A),
                    host(HOST_B),
                    rack(RACK_A),
                    host(HOST_B),
                    rack(RACK_A),
                    host(HOST_C),
                    rack(RACK_B))));
    AttributeAggregate stateB =
        AttributeAggregate.create(
            Suppliers.ofInstance(
                ImmutableList.of(
                    host(HOST_A),
                    rack(RACK_A),
                    host(HOST_A),
                    rack(RACK_A),
                    host(HOST_B),
                    rack(RACK_A))));

    IHostAttributes hostA = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A));
    IHostAttributes hostB = hostAttributes(HOST_B, host(HOST_B), rack(RACK_A));
    IHostAttributes hostC = hostAttributes(HOST_C, host(HOST_C), rack(RACK_B));
    assertNoVetoes(hostLimitTask(JOB_A, 2), hostA, stateA);
    assertVetoes(hostLimitTask(JOB_A, 1), hostB, stateA, Veto.unsatisfiedLimit(HOST_ATTRIBUTE));
    assertVetoes(hostLimitTask(JOB_A, 2), hostB, stateA, Veto.unsatisfiedLimit(HOST_ATTRIBUTE));
    assertNoVetoes(hostLimitTask(JOB_A, 3), hostB, stateA);

    assertVetoes(rackLimitTask(JOB_A, 2), hostB, stateB, Veto.unsatisfiedLimit(RACK_ATTRIBUTE));
    assertVetoes(rackLimitTask(JOB_A, 3), hostB, stateB, Veto.unsatisfiedLimit(RACK_ATTRIBUTE));
    assertNoVetoes(rackLimitTask(JOB_A, 4), hostB, stateB);

    assertNoVetoes(rackLimitTask(JOB_A, 1), hostC, stateB);

    assertVetoes(rackLimitTask(JOB_A, 1), hostC, stateA, Veto.unsatisfiedLimit(RACK_ATTRIBUTE));
    assertNoVetoes(rackLimitTask(JOB_A, 2), hostC, stateB);
  }

  @Test
  public void testAttribute() {
    control.replay();

    IHostAttributes hostA = hostAttributes(HOST_A, valueAttribute("jvm", "1.0"));

    // Matches attribute, matching value.
    checkConstraint(hostA, "jvm", true, "1.0");

    // Matches attribute, different value.
    checkConstraint(hostA, "jvm", false, "1.4");

    // Does not match attribute.
    checkConstraint(hostA, "xxx", false, "1.4");

    // Logical 'OR' matching attribute.
    checkConstraint(hostA, "jvm", false, "1.2", "1.4");

    // Logical 'OR' not matching attribute.
    checkConstraint(hostA, "xxx", false, "1.0", "1.4");
  }

  @Test
  public void testAttributes() {
    control.replay();

    IHostAttributes hostA =
        hostAttributes(
            HOST_A,
            valueAttribute("jvm", "1.4", "1.6", "1.7"),
            valueAttribute("zone", "a", "b", "c"));

    // Matches attribute, matching value.
    checkConstraint(hostA, "jvm", true, "1.4");

    // Matches attribute, different value.
    checkConstraint(hostA, "jvm", false, "1.0");

    // Does not match attribute.
    checkConstraint(hostA, "xxx", false, "1.4");

    // Logical 'OR' with attribute and value match.
    checkConstraint(hostA, "jvm", true, "1.2", "1.4");

    // Does not match attribute.
    checkConstraint(hostA, "xxx", false, "1.0", "1.4");

    // Check that logical AND works.
    Constraint jvmConstraint = makeConstraint("jvm", "1.6");
    Constraint zoneConstraint = makeConstraint("zone", "c");

    ITaskConfig task = makeTask(JOB_A, jvmConstraint, zoneConstraint);
    assertEquals(
        ImmutableSet.of(),
        defaultFilter.filter(
            new UnusedResource(DEFAULT_OFFER, hostA), new ResourceRequest(task, EMPTY)));

    Constraint jvmNegated = jvmConstraint.deepCopy();
    jvmNegated.getConstraint().getValue().setNegated(true);
    Constraint zoneNegated = jvmConstraint.deepCopy();
    zoneNegated.getConstraint().getValue().setNegated(true);
    assertVetoes(makeTask(JOB_A, jvmNegated, zoneNegated), hostA, Veto.constraintMismatch("jvm"));
  }

  @Test
  public void testVetoScaling() {
    control.replay();

    int maxScore = VetoType.INSUFFICIENT_RESOURCES.getScore();
    assertEquals((int) (maxScore * 1.0 / CPU.getRange()), CPU.veto(1).getScore());
    assertEquals(maxScore, CPU.veto(CPU.getRange() * 10).getScore());
    assertEquals((int) (maxScore * 2.0 / RAM.getRange()), RAM.veto(2).getScore());
    assertEquals((int) (maxScore * 200.0 / DISK.getRange()), DISK.veto(200).getScore());
  }

  @Test
  public void testDuplicatedAttribute() {
    control.replay();

    IHostAttributes hostA =
        hostAttributes(HOST_A, valueAttribute("jvm", "1.4"), valueAttribute("jvm", "1.6", "1.7"));

    // Matches attribute, matching value.
    checkConstraint(hostA, "jvm", true, "1.4");
    checkConstraint(hostA, "jvm", true, "1.6");
    checkConstraint(hostA, "jvm", true, "1.7");
    checkConstraint(hostA, "jvm", true, "1.6", "1.7");
  }

  @Test
  public void testVetoGroups() {
    control.replay();

    assertEquals(VetoGroup.EMPTY, Veto.identifyGroup(ImmutableSet.of()));

    assertEquals(
        VetoGroup.STATIC,
        Veto.identifyGroup(
            ImmutableSet.of(
                Veto.constraintMismatch("denied"),
                Veto.insufficientResources("ram", 100),
                Veto.maintenance("draining"))));

    assertEquals(
        VetoGroup.DYNAMIC, Veto.identifyGroup(ImmutableSet.of(Veto.unsatisfiedLimit("denied"))));

    assertEquals(
        VetoGroup.MIXED,
        Veto.identifyGroup(
            ImmutableSet.of(
                Veto.insufficientResources("ram", 100), Veto.unsatisfiedLimit("denied"))));
  }

  private ITaskConfig checkConstraint(
      IHostAttributes hostAttributes,
      String constraintName,
      boolean expected,
      String value,
      String... vs) {

    return checkConstraint(JOB_A, hostAttributes, constraintName, expected, value, vs);
  }

  private ITaskConfig checkConstraint(
      IJobKey job,
      IHostAttributes hostAttributes,
      String constraintName,
      boolean expected,
      String value,
      String... vs) {

    return checkConstraint(
        job,
        EMPTY,
        hostAttributes,
        constraintName,
        expected,
        new ValueConstraint(
            false, ImmutableSet.<String>builder().add(value).addAll(Arrays.asList(vs)).build()));
  }

  private ITaskConfig checkConstraint(
      IJobKey job,
      AttributeAggregate aggregate,
      IHostAttributes hostAttributes,
      String constraintName,
      boolean expected,
      ValueConstraint value) {

    Constraint constraint = new Constraint(constraintName, TaskConstraint.value(value));
    ITaskConfig task = makeTask(job, constraint);
    assertEquals(
        expected,
        defaultFilter
            .filter(
                new UnusedResource(DEFAULT_OFFER, hostAttributes),
                new ResourceRequest(task, aggregate))
            .isEmpty());

    Constraint negated = constraint.deepCopy();
    negated.getConstraint().getValue().setNegated(!value.isNegated());
    ITaskConfig negatedTask = makeTask(job, negated);
    assertEquals(
        !expected,
        defaultFilter
            .filter(
                new UnusedResource(DEFAULT_OFFER, hostAttributes),
                new ResourceRequest(negatedTask, aggregate))
            .isEmpty());
    return task;
  }

  private void assertNoVetoes(ITaskConfig task, IHostAttributes hostAttributes) {
    assertVetoes(task, hostAttributes, EMPTY);
  }

  private void assertNoVetoes(
      ITaskConfig task, IHostAttributes attributes, AttributeAggregate jobState) {

    assertVetoes(task, attributes, jobState);
  }

  private void assertVetoes(ITaskConfig task, IHostAttributes hostAttributes, Veto... vetoes) {
    assertVetoes(task, hostAttributes, EMPTY, vetoes);
  }

  private void assertVetoes(
      ITaskConfig task,
      IHostAttributes hostAttributes,
      AttributeAggregate jobState,
      Veto... vetoes) {

    assertEquals(
        ImmutableSet.copyOf(vetoes),
        defaultFilter.filter(
            new UnusedResource(DEFAULT_OFFER, hostAttributes),
            new ResourceRequest(task, jobState)));
  }

  private static IHostAttributes hostAttributes(
      String host, MaintenanceMode mode, IAttribute... attributes) {

    return IHostAttributes.build(
        new HostAttributes()
            .setHost(host)
            .setMode(mode)
            .setAttributes(IAttribute.toBuildersSet(ImmutableSet.copyOf(attributes))));
  }

  private static IHostAttributes hostAttributes(String host, IAttribute... attributes) {

    return hostAttributes(host, MaintenanceMode.NONE, attributes);
  }

  private IAttribute valueAttribute(String name, String string, String... strings) {
    return IAttribute.build(
        new Attribute(
            name,
            ImmutableSet.<String>builder().add(string).addAll(Arrays.asList(strings)).build()));
  }

  private static Constraint makeConstraint(String name, String... values) {
    return new Constraint(
        name, TaskConstraint.value(new ValueConstraint(false, ImmutableSet.copyOf(values))));
  }

  private Constraint limitConstraint(String name, int value) {
    return new Constraint(name, TaskConstraint.limit(new LimitConstraint(value)));
  }

  private ITaskConfig makeTask(IJobKey job, Constraint... constraint) {
    return ITaskConfig.build(
        makeTask(job, DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK)
            .newBuilder()
            .setConstraints(Sets.newHashSet(constraint)));
  }

  private ITaskConfig hostLimitTask(IJobKey job, int maxPerHost) {
    return makeTask(job, limitConstraint(HOST_ATTRIBUTE, maxPerHost));
  }

  private ITaskConfig hostLimitTask(int maxPerHost) {
    return hostLimitTask(JOB_A, maxPerHost);
  }

  private ITaskConfig rackLimitTask(IJobKey job, int maxPerRack) {
    return makeTask(job, limitConstraint(RACK_ATTRIBUTE, maxPerRack));
  }

  private ITaskConfig makeTask(IJobKey job, int cpus, long ramMb, long diskMb) {
    return ITaskConfig.build(
        new TaskConfig()
            .setJob(job.newBuilder())
            .setNumCpus(cpus)
            .setRamMb(ramMb)
            .setDiskMb(diskMb)
            .setExecutorConfig(new ExecutorConfig("aurora", "config")));
  }

  private ITaskConfig makeTask(int cpus, long ramMb, long diskMb) {
    return makeTask(JOB_A, cpus, ramMb, diskMb);
  }

  private ITaskConfig makeTask() {
    return makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK);
  }
}
Пример #14
0
 private static String formatLockKey(ILockKey lockKey) {
   return lockKey.getSetField() == _Fields.JOB
       ? JobKeys.canonicalString(lockKey.getJob())
       : "Unknown lock key type: " + lockKey.getSetField();
 }
Пример #15
0
final class Fixtures {
  static final String ROLE = "bar_role";
  static final String USER = "******";
  static final Identity ROLE_IDENTITY = new Identity(ROLE, USER);
  static final String JOB_NAME = "job_foo";
  static final IJobKey JOB_KEY = JobKeys.from(ROLE, "devel", JOB_NAME);
  static final ILockKey LOCK_KEY = ILockKey.build(LockKey.job(JOB_KEY.newBuilder()));
  static final ILock LOCK = ILock.build(new Lock().setKey(LOCK_KEY.newBuilder()).setToken("token"));
  static final JobConfiguration CRON_JOB = makeJob().setCronSchedule("* * * * *");
  static final String TASK_ID = "task_id";
  static final String UPDATE_ID = "82d6d790-3212-11e3-aa6e-0800200c9a74";
  static final IJobUpdateKey UPDATE_KEY =
      IJobUpdateKey.build(new JobUpdateKey(JOB_KEY.newBuilder(), UPDATE_ID));
  static final UUID UU_ID = UUID.fromString(UPDATE_ID);
  private static final Function<String, ResponseDetail> MESSAGE_TO_DETAIL =
      new Function<String, ResponseDetail>() {
        @Override
        public ResponseDetail apply(String message) {
          return new ResponseDetail().setMessage(message);
        }
      };
  static final String CRON_SCHEDULE = "0 * * * *";
  static final IResourceAggregate QUOTA =
      IResourceAggregate.build(new ResourceAggregate(10.0, 1024, 2048));
  static final QuotaCheckResult ENOUGH_QUOTA = new QuotaCheckResult(SUFFICIENT_QUOTA);
  static final QuotaCheckResult NOT_ENOUGH_QUOTA = new QuotaCheckResult(INSUFFICIENT_QUOTA);

  private Fixtures() {
    // Utility class.
  }

  static JobConfiguration makeJob() {
    return makeJob(nonProductionTask(), 1);
  }

  static JobConfiguration makeJob(TaskConfig task, int shardCount) {
    return new JobConfiguration()
        .setOwner(ROLE_IDENTITY)
        .setInstanceCount(shardCount)
        .setTaskConfig(task)
        .setKey(JOB_KEY.newBuilder());
  }

  static TaskConfig defaultTask(boolean production) {
    return new TaskConfig()
        .setJob(JOB_KEY.newBuilder())
        .setOwner(new Identity(ROLE, USER))
        .setEnvironment("devel")
        .setJobName(JOB_NAME)
        .setContactEmail("*****@*****.**")
        .setExecutorConfig(new ExecutorConfig("aurora", "data"))
        .setNumCpus(1)
        .setRamMb(1024)
        .setDiskMb(1024)
        .setProduction(production)
        .setRequestedPorts(ImmutableSet.of())
        .setTaskLinks(ImmutableMap.of())
        .setMaxTaskFailures(1)
        .setContainer(Container.mesos(new MesosContainer()));
  }

  static TaskConfig nonProductionTask() {
    return defaultTask(false);
  }

  static Response jobSummaryResponse(Set<JobSummary> jobSummaries) {
    return okResponse(Result.jobSummaryResult(new JobSummaryResult().setSummaries(jobSummaries)));
  }

  static Response response(ResponseCode code, Optional<Result> result, String... messages) {
    Response response = Responses.empty().setResponseCode(code).setResult(result.orNull());
    if (messages.length > 0) {
      response.setDetails(
          FluentIterable.from(Arrays.asList(messages)).transform(MESSAGE_TO_DETAIL).toList());
    }

    return response;
  }

  static Response okResponse(Result result) {
    return response(OK, Optional.of(IResult.build(result).newBuilder()));
  }

  static JobConfiguration makeProdJob() {
    return makeJob(productionTask(), 1);
  }

  static TaskConfig productionTask() {
    return defaultTask(true);
  }

  static JobConfiguration makeJob(TaskConfig task) {
    return makeJob(task, 1);
  }

  static Iterable<IScheduledTask> makeDefaultScheduledTasks(int n) {
    return makeDefaultScheduledTasks(n, defaultTask(true));
  }

  static Iterable<IScheduledTask> makeDefaultScheduledTasks(int n, TaskConfig config) {
    List<IScheduledTask> tasks = Lists.newArrayList();
    for (int i = 0; i < n; i++) {
      tasks.add(
          IScheduledTask.build(
              new ScheduledTask()
                  .setAssignedTask(new AssignedTask().setTask(config).setInstanceId(i))));
    }

    return tasks;
  }

  static Response assertOkResponse(Response response) {
    return assertResponse(OK, response);
  }

  static Response assertResponse(ResponseCode expected, Response response) {
    assertEquals(expected, response.getResponseCode());
    return response;
  }
}
Пример #16
0
 private static IJobConfiguration makeJob(String name) {
   return IJobConfiguration.build(
       new JobConfiguration()
           .setKey(JobKeys.from("role-" + name, "env-" + name, name).newBuilder()));
 }
Пример #17
0
  /**
   * Check validity of and populates defaults in a task configuration. This will return a deep copy
   * of the provided task configuration with default configuration values applied, and configuration
   * map values sanitized and applied to their respective struct fields.
   *
   * @param config Task config to validate and populate.
   * @return A reference to the modified {@code config} (for chaining).
   * @throws TaskDescriptionException If the task is invalid.
   */
  public ITaskConfig validateAndPopulate(ITaskConfig config) throws TaskDescriptionException {
    TaskConfig builder = config.newBuilder();

    if (!builder.isSetRequestedPorts()) {
      builder.setRequestedPorts(ImmutableSet.of());
    }

    if (config.isSetTier() && !UserProvidedStrings.isGoodIdentifier(config.getTier())) {
      throw new TaskDescriptionException("Tier contains illegal characters: " + config.getTier());
    }

    try {
      tierManager.getTier(config);
    } catch (IllegalArgumentException e) {
      throw new TaskDescriptionException(e.getMessage(), e);
    }

    if (!JobKeys.isValid(config.getJob())) {
      // Job key is set but invalid
      throw new TaskDescriptionException("Job key " + config.getJob() + " is invalid.");
    }

    // A task must either have an executor configuration or specify a Docker container.
    if (!builder.isSetExecutorConfig()
        && !(builder.isSetContainer() && builder.getContainer().isSetDocker())) {

      throw new TaskDescriptionException(NO_EXECUTOR_OR_CONTAINER);
    }

    // Docker containers don't require executors, validate the rest
    if (builder.isSetExecutorConfig()) {

      if (!builder.getExecutorConfig().isSetName()) {
        throw new TaskDescriptionException(INVALID_EXECUTOR_CONFIG);
      }

      executorSettings
          .getExecutorConfig(builder.getExecutorConfig().getName())
          .orElseThrow(
              () ->
                  new TaskDescriptionException(
                      "Configuration for executor '"
                          + builder.getExecutorConfig().getName()
                          + "' doesn't exist."));
    }

    // Maximize the usefulness of any thrown error message by checking required fields first.
    for (RequiredFieldValidator<?> validator : REQUIRED_FIELDS_VALIDATORS) {
      validator.validate(builder);
    }

    IConstraint constraint = getDedicatedConstraint(config);
    if (constraint != null) {
      if (!isValueConstraint(constraint.getConstraint())) {
        throw new TaskDescriptionException("A dedicated constraint must be of value type.");
      }

      IValueConstraint valueConstraint = constraint.getConstraint().getValue();

      if (valueConstraint.getValues().size() != 1) {
        throw new TaskDescriptionException("A dedicated constraint must have exactly one value");
      }

      String dedicatedRole = getRole(valueConstraint);
      if (!("*".equals(dedicatedRole) || config.getJob().getRole().equals(dedicatedRole))) {
        throw new TaskDescriptionException(
            "Only " + dedicatedRole + " may use hosts dedicated for that role.");
      }
    }

    Optional<Container._Fields> containerType;
    if (config.isSetContainer()) {
      IContainer containerConfig = config.getContainer();
      containerType = Optional.of(containerConfig.getSetField());
      if (containerConfig.isSetDocker()) {
        if (!containerConfig.getDocker().isSetImage()) {
          throw new TaskDescriptionException("A container must specify an image.");
        }
        if (containerConfig.getDocker().getParameters().isEmpty()) {
          for (Map.Entry<String, String> e : settings.defaultDockerParameters.entries()) {
            builder
                .getContainer()
                .getDocker()
                .addToParameters(new DockerParameter(e.getKey(), e.getValue()));
          }
        } else {
          if (!settings.allowDockerParameters) {
            throw new TaskDescriptionException(NO_DOCKER_PARAMETERS);
          }
        }

        if (settings.requireDockerUseExecutor && !config.isSetExecutorConfig()) {
          throw new TaskDescriptionException(EXECUTOR_REQUIRED_WITH_DOCKER);
        }
      }
    } else {
      // Default to mesos container type if unset.
      containerType = Optional.of(Container._Fields.MESOS);
    }

    if (!containerType.isPresent()) {
      throw new TaskDescriptionException("A job must have a container type.");
    }
    if (!settings.allowedContainerTypes.contains(containerType.get())) {
      throw new TaskDescriptionException(
          "This scheduler is not configured to allow the container type "
              + containerType.get().toString());
    }

    thriftBackfill.backfillTask(builder);

    String types =
        config
            .getResources()
            .stream()
            .collect(Collectors.groupingBy(e -> ResourceType.fromResource(e)))
            .entrySet()
            .stream()
            .filter(e -> !e.getKey().isMultipleAllowed() && e.getValue().size() > 1)
            .map(r -> r.getKey().getAuroraName())
            .sorted()
            .collect(Collectors.joining(", "));

    if (!Strings.isNullOrEmpty(types)) {
      throw new TaskDescriptionException("Multiple resource values are not supported for " + types);
    }

    if (!settings.allowGpuResource
        && config
            .getResources()
            .stream()
            .filter(r -> ResourceType.fromResource(r).equals(GPUS))
            .findAny()
            .isPresent()) {

      throw new TaskDescriptionException("GPU resource support is disabled in this cluster.");
    }

    if (!settings.enableMesosFetcher && !config.getMesosFetcherUris().isEmpty()) {
      throw new TaskDescriptionException(MESOS_FETCHER_DISABLED);
    }

    if (config.getContainer().isSetMesos()) {
      IMesosContainer container = config.getContainer().getMesos();
      if (!settings.allowContainerVolumes && !container.getVolumes().isEmpty()) {
        throw new TaskDescriptionException(NO_CONTAINER_VOLUMES);
      }
    }

    maybeFillLinks(builder);

    return ITaskConfig.build(builder);
  }
Пример #18
0
  /**
   * Check validity of and populates defaults in a task configuration. This will return a deep copy
   * of the provided task configuration with default configuration values applied, and configuration
   * map values sanitized and applied to their respective struct fields.
   *
   * @param config Task config to validate and populate.
   * @return A reference to the modified {@code config} (for chaining).
   * @throws TaskDescriptionException If the task is invalid.
   */
  public static ITaskConfig validateAndPopulate(ITaskConfig config)
      throws TaskDescriptionException {

    TaskConfig builder = config.newBuilder();

    if (!builder.isSetRequestedPorts()) {
      builder.setRequestedPorts(ImmutableSet.of());
    }

    maybeFillLinks(builder);

    if (!isGoodIdentifier(config.getJobName())) {
      throw new TaskDescriptionException(
          "Job name contains illegal characters: " + config.getJobName());
    }

    if (!isGoodIdentifier(config.getEnvironment())) {
      throw new TaskDescriptionException(
          "Environment contains illegal characters: " + config.getEnvironment());
    }

    if (config.isSetTier() && !isGoodIdentifier(config.getTier())) {
      throw new TaskDescriptionException("Tier contains illegal characters: " + config.getTier());
    }

    if (config.isSetJob()) {
      if (!JobKeys.isValid(config.getJob())) {
        // Job key is set but invalid
        throw new TaskDescriptionException("Job key " + config.getJob() + " is invalid.");
      }

      if (!config.getJob().getRole().equals(config.getOwner().getRole())) {
        // Both owner and job key are set but don't match
        throw new TaskDescriptionException("Role must match job owner.");
      }
    } else {
      // TODO(maxim): Make sure both key and owner are populated to support older clients.
      // Remove in 0.7.0. (AURORA-749).
      // Job key is not set -> populate from owner, environment and name
      assertOwnerValidity(config.getOwner());
      builder.setJob(
          JobKeys.from(config.getOwner().getRole(), config.getEnvironment(), config.getJobName())
              .newBuilder());
    }

    if (!builder.isSetExecutorConfig()) {
      throw new TaskDescriptionException("Configuration may not be null");
    }

    // Maximize the usefulness of any thrown error message by checking required fields first.
    for (RequiredFieldValidator<?> validator : REQUIRED_FIELDS_VALIDATORS) {
      validator.validate(builder);
    }

    IConstraint constraint = getDedicatedConstraint(config);
    if (constraint != null) {
      if (!isValueConstraint(constraint.getConstraint())) {
        throw new TaskDescriptionException("A dedicated constraint must be of value type.");
      }

      IValueConstraint valueConstraint = constraint.getConstraint().getValue();

      if (valueConstraint.getValues().size() != 1) {
        throw new TaskDescriptionException("A dedicated constraint must have exactly one value");
      }

      String dedicatedRole = getRole(valueConstraint);
      if (!config.getOwner().getRole().equals(dedicatedRole)) {
        throw new TaskDescriptionException(
            "Only " + dedicatedRole + " may use hosts dedicated for that role.");
      }
    }

    Optional<Container._Fields> containerType;
    if (config.isSetContainer()) {
      IContainer containerConfig = config.getContainer();
      containerType = Optional.of(containerConfig.getSetField());
      if (containerConfig.isSetDocker()) {
        if (!containerConfig.getDocker().isSetImage()) {
          throw new TaskDescriptionException("A container must specify an image");
        }
        if (containerConfig.getDocker().isSetParameters()
            && !containerConfig.getDocker().getParameters().isEmpty()
            && !ENABLE_DOCKER_PARAMETERS.get()) {
          throw new TaskDescriptionException("Docker parameters not allowed.");
        }
      }
    } else {
      // Default to mesos container type if unset.
      containerType = Optional.of(Container._Fields.MESOS);
    }
    if (!containerType.isPresent()) {
      throw new TaskDescriptionException("A job must have a container type.");
    }
    if (!ALLOWED_CONTAINER_TYPES.get().contains(containerType.get())) {
      throw new TaskDescriptionException(
          "The container type " + containerType.get().toString() + " is not allowed");
    }

    return ITaskConfig.build(builder);
  }