@Test public void testAssignerDoesNotReturnOnFirstMismatch() throws Exception { // Ensures scheduling loop does not terminate prematurely when the first mismatch is identified. HostOffer mismatched = new HostOffer( Offer.newBuilder() .setId(OfferID.newBuilder().setValue("offerId0")) .setFrameworkId(FrameworkID.newBuilder().setValue("frameworkId")) .setSlaveId(SlaveID.newBuilder().setValue("slaveId0")) .setHostname("hostName0") .addResources( Resource.newBuilder() .setName("ports") .setType(Type.RANGES) .setRanges( Ranges.newBuilder() .addRange(Range.newBuilder().setBegin(PORT).setEnd(PORT)))) .build(), IHostAttributes.build(new HostAttributes())); expect(offerManager.getOffers(GROUP_KEY)).andReturn(ImmutableSet.of(mismatched, OFFER)); expect(tierManager.getTier(TASK.getAssignedTask().getTask())).andReturn(DEFAULT).times(2); expect( filter.filter( new UnusedResource( Resources.from(mismatched.getOffer()).slot(), mismatched.getAttributes()), new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY))) .andReturn(ImmutableSet.of(Veto.constraintMismatch("constraint mismatch"))); offerManager.banOffer(mismatched.getOffer().getId(), GROUP_KEY); expect( filter.filter( new UnusedResource(Resources.from(OFFER.getOffer()).slot(), OFFER.getAttributes()), new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY))) .andReturn(ImmutableSet.of()); expect( stateManager.assignTask( storeProvider, Tasks.id(TASK), OFFER.getOffer().getHostname(), OFFER.getOffer().getSlaveId(), ImmutableMap.of(PORT_NAME, PORT))) .andReturn(TASK.getAssignedTask()); expect(taskFactory.createFrom(TASK.getAssignedTask(), OFFER.getOffer().getSlaveId())) .andReturn(TASK_INFO); offerManager.launchTask(OFFER.getOffer().getId(), TASK_INFO); control.replay(); assertTrue( assigner.maybeAssign( storeProvider, new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY), TaskGroupKey.from(TASK.getAssignedTask().getTask()), Tasks.id(TASK), ImmutableMap.of(SLAVE_ID, GROUP_KEY))); }
@Test public void testSufficientPorts() { control.replay(); ResourceSlot twoPorts = Resources.from(Offers.createOffer(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK, Pair.of(80, 81))) .slot(); ITaskConfig noPortTask = ITaskConfig.build( makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setRequestedPorts(ImmutableSet.of())); ITaskConfig onePortTask = ITaskConfig.build( makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setRequestedPorts(ImmutableSet.of("one"))); ITaskConfig twoPortTask = ITaskConfig.build( makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setRequestedPorts(ImmutableSet.of("one", "two"))); ITaskConfig threePortTask = ITaskConfig.build( makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setRequestedPorts(ImmutableSet.of("one", "two", "three"))); Set<Veto> none = ImmutableSet.of(); IHostAttributes hostA = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A)); assertEquals( none, defaultFilter.filter( new UnusedResource(twoPorts, hostA), new ResourceRequest(noPortTask, EMPTY))); assertEquals( none, defaultFilter.filter( new UnusedResource(twoPorts, hostA), new ResourceRequest(onePortTask, EMPTY))); assertEquals( none, defaultFilter.filter( new UnusedResource(twoPorts, hostA), new ResourceRequest(twoPortTask, EMPTY))); assertEquals( ImmutableSet.of(PORTS.veto(1)), defaultFilter.filter( new UnusedResource(twoPorts, hostA), new ResourceRequest(threePortTask, EMPTY))); }
public class SchedulingFilterImplTest extends EasyMockTest { private static final String HOST_A = "hostA"; private static final String HOST_B = "hostB"; private static final String HOST_C = "hostC"; private static final String RACK_A = "rackA"; private static final String RACK_B = "rackB"; private static final String RACK_ATTRIBUTE = "rack"; private static final String HOST_ATTRIBUTE = "host"; private static final IJobKey JOB_A = JobKeys.from("roleA", "env", "jobA"); private static final IJobKey JOB_B = JobKeys.from("roleB", "env", "jobB"); private static final int DEFAULT_CPUS = 4; private static final long DEFAULT_RAM = 1000; private static final long DEFAULT_DISK = 2000; private static final ResourceSlot DEFAULT_OFFER = Resources.from(Offers.createOffer(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK, Pair.of(80, 80))) .slot(); private SchedulingFilter defaultFilter; @Before public void setUp() { defaultFilter = new SchedulingFilterImpl(TaskExecutors.NO_OVERHEAD_EXECUTOR); } @Test public void testMeetsOffer() { control.replay(); IHostAttributes attributes = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A)); assertNoVetoes(makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK), attributes); assertNoVetoes(makeTask(DEFAULT_CPUS - 1, DEFAULT_RAM - 1, DEFAULT_DISK - 1), attributes); } @Test public void testSufficientPorts() { control.replay(); ResourceSlot twoPorts = Resources.from(Offers.createOffer(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK, Pair.of(80, 81))) .slot(); ITaskConfig noPortTask = ITaskConfig.build( makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setRequestedPorts(ImmutableSet.of())); ITaskConfig onePortTask = ITaskConfig.build( makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setRequestedPorts(ImmutableSet.of("one"))); ITaskConfig twoPortTask = ITaskConfig.build( makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setRequestedPorts(ImmutableSet.of("one", "two"))); ITaskConfig threePortTask = ITaskConfig.build( makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setRequestedPorts(ImmutableSet.of("one", "two", "three"))); Set<Veto> none = ImmutableSet.of(); IHostAttributes hostA = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A)); assertEquals( none, defaultFilter.filter( new UnusedResource(twoPorts, hostA), new ResourceRequest(noPortTask, EMPTY))); assertEquals( none, defaultFilter.filter( new UnusedResource(twoPorts, hostA), new ResourceRequest(onePortTask, EMPTY))); assertEquals( none, defaultFilter.filter( new UnusedResource(twoPorts, hostA), new ResourceRequest(twoPortTask, EMPTY))); assertEquals( ImmutableSet.of(PORTS.veto(1)), defaultFilter.filter( new UnusedResource(twoPorts, hostA), new ResourceRequest(threePortTask, EMPTY))); } @Test public void testInsufficientResources() { control.replay(); IHostAttributes hostA = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A)); assertVetoes( makeTask(DEFAULT_CPUS + 1, DEFAULT_RAM + 1, DEFAULT_DISK + 1), hostA, CPU.veto(1), DISK.veto(1), RAM.veto(1)); assertVetoes(makeTask(DEFAULT_CPUS + 1, DEFAULT_RAM, DEFAULT_DISK), hostA, CPU.veto(1)); assertVetoes(makeTask(DEFAULT_CPUS, DEFAULT_RAM + 1, DEFAULT_DISK), hostA, RAM.veto(1)); assertVetoes(makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK + 1), hostA, DISK.veto(1)); } @Test public void testDedicatedRole() { control.replay(); IHostAttributes hostA = hostAttributes(HOST_A, dedicated(JOB_A.getRole())); checkConstraint(hostA, DEDICATED_ATTRIBUTE, true, JOB_A.getRole()); assertVetoes(makeTask(JOB_B), hostA, Veto.dedicatedHostConstraintMismatch()); } @Test public void testSharedDedicatedHost() { control.replay(); String dedicated1 = dedicatedFor(JOB_A); String dedicated2 = dedicatedFor(JOB_B); IHostAttributes hostA = hostAttributes(HOST_A, dedicated(dedicated1, dedicated2)); assertNoVetoes(checkConstraint(JOB_A, hostA, DEDICATED_ATTRIBUTE, true, dedicated1), hostA); assertNoVetoes(checkConstraint(JOB_B, hostA, DEDICATED_ATTRIBUTE, true, dedicated2), hostA); } @Test public void testMultiValuedAttributes() { control.replay(); IHostAttributes hostA = hostAttributes(HOST_A, valueAttribute("jvm", "1.0", "2.0", "3.0")); checkConstraint(hostA, "jvm", true, "1.0"); checkConstraint(hostA, "jvm", false, "4.0"); checkConstraint(hostA, "jvm", true, "1.0", "2.0"); IHostAttributes hostB = hostAttributes(HOST_A, valueAttribute("jvm", "1.0")); checkConstraint(hostB, "jvm", false, "2.0", "3.0"); } @Test public void testHostScheduledForMaintenance() { control.replay(); assertNoVetoes( makeTask(), hostAttributes(HOST_A, MaintenanceMode.SCHEDULED, host(HOST_A), rack(RACK_A))); } @Test public void testHostDrainingForMaintenance() { control.replay(); assertVetoes( makeTask(), hostAttributes(HOST_A, MaintenanceMode.DRAINING, host(HOST_A), rack(RACK_A)), Veto.maintenance("draining")); } @Test public void testHostDrainedForMaintenance() { control.replay(); assertVetoes( makeTask(), hostAttributes(HOST_A, MaintenanceMode.DRAINED, host(HOST_A), rack(RACK_A)), Veto.maintenance("drained")); } @Test public void testMultipleTaskConstraints() { control.replay(); Constraint constraint1 = makeConstraint("host", HOST_A); Constraint constraint2 = makeConstraint(DEDICATED_ATTRIBUTE, "xxx"); assertVetoes( makeTask(JOB_A, constraint1, constraint2), hostAttributes(HOST_A, dedicated(HOST_A), host(HOST_A)), Veto.constraintMismatch(DEDICATED_ATTRIBUTE)); assertNoVetoes( makeTask(JOB_B, constraint1, constraint2), hostAttributes(HOST_B, dedicated("xxx"), host(HOST_A))); } @Test public void testDedicatedMismatchShortCircuits() { // Ensures that a dedicated mismatch short-circuits other filter operations, such as // evaluation of limit constraints. Reduction of task queries is the desired outcome. control.replay(); Constraint hostLimit = limitConstraint("host", 1); assertVetoes( makeTask(JOB_A, hostLimit, makeConstraint(DEDICATED_ATTRIBUTE, "xxx")), hostAttributes(HOST_A, host(HOST_A)), Veto.constraintMismatch(DEDICATED_ATTRIBUTE)); assertVetoes( makeTask(JOB_A, hostLimit, makeConstraint(DEDICATED_ATTRIBUTE, "xxx")), hostAttributes(HOST_B, dedicated(dedicatedFor(JOB_B)), host(HOST_B)), Veto.constraintMismatch(DEDICATED_ATTRIBUTE)); } @Test public void testUnderLimitNoTasks() { control.replay(); assertNoVetoes(hostLimitTask(2), hostAttributes(HOST_A, host(HOST_A))); } private IAttribute host(String host) { return valueAttribute(HOST_ATTRIBUTE, host); } private IAttribute rack(String rack) { return valueAttribute(RACK_ATTRIBUTE, rack); } private IAttribute dedicated(String value, String... values) { return valueAttribute(DEDICATED_ATTRIBUTE, value, values); } private String dedicatedFor(IJobKey job) { return job.getRole() + "/" + job.getName(); } @Test public void testLimitWithinJob() throws Exception { control.replay(); AttributeAggregate stateA = AttributeAggregate.create( Suppliers.ofInstance( ImmutableList.of( host(HOST_A), rack(RACK_A), host(HOST_B), rack(RACK_A), host(HOST_B), rack(RACK_A), host(HOST_C), rack(RACK_B)))); AttributeAggregate stateB = AttributeAggregate.create( Suppliers.ofInstance( ImmutableList.of( host(HOST_A), rack(RACK_A), host(HOST_A), rack(RACK_A), host(HOST_B), rack(RACK_A)))); IHostAttributes hostA = hostAttributes(HOST_A, host(HOST_A), rack(RACK_A)); IHostAttributes hostB = hostAttributes(HOST_B, host(HOST_B), rack(RACK_A)); IHostAttributes hostC = hostAttributes(HOST_C, host(HOST_C), rack(RACK_B)); assertNoVetoes(hostLimitTask(JOB_A, 2), hostA, stateA); assertVetoes(hostLimitTask(JOB_A, 1), hostB, stateA, Veto.unsatisfiedLimit(HOST_ATTRIBUTE)); assertVetoes(hostLimitTask(JOB_A, 2), hostB, stateA, Veto.unsatisfiedLimit(HOST_ATTRIBUTE)); assertNoVetoes(hostLimitTask(JOB_A, 3), hostB, stateA); assertVetoes(rackLimitTask(JOB_A, 2), hostB, stateB, Veto.unsatisfiedLimit(RACK_ATTRIBUTE)); assertVetoes(rackLimitTask(JOB_A, 3), hostB, stateB, Veto.unsatisfiedLimit(RACK_ATTRIBUTE)); assertNoVetoes(rackLimitTask(JOB_A, 4), hostB, stateB); assertNoVetoes(rackLimitTask(JOB_A, 1), hostC, stateB); assertVetoes(rackLimitTask(JOB_A, 1), hostC, stateA, Veto.unsatisfiedLimit(RACK_ATTRIBUTE)); assertNoVetoes(rackLimitTask(JOB_A, 2), hostC, stateB); } @Test public void testAttribute() { control.replay(); IHostAttributes hostA = hostAttributes(HOST_A, valueAttribute("jvm", "1.0")); // Matches attribute, matching value. checkConstraint(hostA, "jvm", true, "1.0"); // Matches attribute, different value. checkConstraint(hostA, "jvm", false, "1.4"); // Does not match attribute. checkConstraint(hostA, "xxx", false, "1.4"); // Logical 'OR' matching attribute. checkConstraint(hostA, "jvm", false, "1.2", "1.4"); // Logical 'OR' not matching attribute. checkConstraint(hostA, "xxx", false, "1.0", "1.4"); } @Test public void testAttributes() { control.replay(); IHostAttributes hostA = hostAttributes( HOST_A, valueAttribute("jvm", "1.4", "1.6", "1.7"), valueAttribute("zone", "a", "b", "c")); // Matches attribute, matching value. checkConstraint(hostA, "jvm", true, "1.4"); // Matches attribute, different value. checkConstraint(hostA, "jvm", false, "1.0"); // Does not match attribute. checkConstraint(hostA, "xxx", false, "1.4"); // Logical 'OR' with attribute and value match. checkConstraint(hostA, "jvm", true, "1.2", "1.4"); // Does not match attribute. checkConstraint(hostA, "xxx", false, "1.0", "1.4"); // Check that logical AND works. Constraint jvmConstraint = makeConstraint("jvm", "1.6"); Constraint zoneConstraint = makeConstraint("zone", "c"); ITaskConfig task = makeTask(JOB_A, jvmConstraint, zoneConstraint); assertEquals( ImmutableSet.of(), defaultFilter.filter( new UnusedResource(DEFAULT_OFFER, hostA), new ResourceRequest(task, EMPTY))); Constraint jvmNegated = jvmConstraint.deepCopy(); jvmNegated.getConstraint().getValue().setNegated(true); Constraint zoneNegated = jvmConstraint.deepCopy(); zoneNegated.getConstraint().getValue().setNegated(true); assertVetoes(makeTask(JOB_A, jvmNegated, zoneNegated), hostA, Veto.constraintMismatch("jvm")); } @Test public void testVetoScaling() { control.replay(); int maxScore = VetoType.INSUFFICIENT_RESOURCES.getScore(); assertEquals((int) (maxScore * 1.0 / CPU.getRange()), CPU.veto(1).getScore()); assertEquals(maxScore, CPU.veto(CPU.getRange() * 10).getScore()); assertEquals((int) (maxScore * 2.0 / RAM.getRange()), RAM.veto(2).getScore()); assertEquals((int) (maxScore * 200.0 / DISK.getRange()), DISK.veto(200).getScore()); } @Test public void testDuplicatedAttribute() { control.replay(); IHostAttributes hostA = hostAttributes(HOST_A, valueAttribute("jvm", "1.4"), valueAttribute("jvm", "1.6", "1.7")); // Matches attribute, matching value. checkConstraint(hostA, "jvm", true, "1.4"); checkConstraint(hostA, "jvm", true, "1.6"); checkConstraint(hostA, "jvm", true, "1.7"); checkConstraint(hostA, "jvm", true, "1.6", "1.7"); } @Test public void testVetoGroups() { control.replay(); assertEquals(VetoGroup.EMPTY, Veto.identifyGroup(ImmutableSet.of())); assertEquals( VetoGroup.STATIC, Veto.identifyGroup( ImmutableSet.of( Veto.constraintMismatch("denied"), Veto.insufficientResources("ram", 100), Veto.maintenance("draining")))); assertEquals( VetoGroup.DYNAMIC, Veto.identifyGroup(ImmutableSet.of(Veto.unsatisfiedLimit("denied")))); assertEquals( VetoGroup.MIXED, Veto.identifyGroup( ImmutableSet.of( Veto.insufficientResources("ram", 100), Veto.unsatisfiedLimit("denied")))); } private ITaskConfig checkConstraint( IHostAttributes hostAttributes, String constraintName, boolean expected, String value, String... vs) { return checkConstraint(JOB_A, hostAttributes, constraintName, expected, value, vs); } private ITaskConfig checkConstraint( IJobKey job, IHostAttributes hostAttributes, String constraintName, boolean expected, String value, String... vs) { return checkConstraint( job, EMPTY, hostAttributes, constraintName, expected, new ValueConstraint( false, ImmutableSet.<String>builder().add(value).addAll(Arrays.asList(vs)).build())); } private ITaskConfig checkConstraint( IJobKey job, AttributeAggregate aggregate, IHostAttributes hostAttributes, String constraintName, boolean expected, ValueConstraint value) { Constraint constraint = new Constraint(constraintName, TaskConstraint.value(value)); ITaskConfig task = makeTask(job, constraint); assertEquals( expected, defaultFilter .filter( new UnusedResource(DEFAULT_OFFER, hostAttributes), new ResourceRequest(task, aggregate)) .isEmpty()); Constraint negated = constraint.deepCopy(); negated.getConstraint().getValue().setNegated(!value.isNegated()); ITaskConfig negatedTask = makeTask(job, negated); assertEquals( !expected, defaultFilter .filter( new UnusedResource(DEFAULT_OFFER, hostAttributes), new ResourceRequest(negatedTask, aggregate)) .isEmpty()); return task; } private void assertNoVetoes(ITaskConfig task, IHostAttributes hostAttributes) { assertVetoes(task, hostAttributes, EMPTY); } private void assertNoVetoes( ITaskConfig task, IHostAttributes attributes, AttributeAggregate jobState) { assertVetoes(task, attributes, jobState); } private void assertVetoes(ITaskConfig task, IHostAttributes hostAttributes, Veto... vetoes) { assertVetoes(task, hostAttributes, EMPTY, vetoes); } private void assertVetoes( ITaskConfig task, IHostAttributes hostAttributes, AttributeAggregate jobState, Veto... vetoes) { assertEquals( ImmutableSet.copyOf(vetoes), defaultFilter.filter( new UnusedResource(DEFAULT_OFFER, hostAttributes), new ResourceRequest(task, jobState))); } private static IHostAttributes hostAttributes( String host, MaintenanceMode mode, IAttribute... attributes) { return IHostAttributes.build( new HostAttributes() .setHost(host) .setMode(mode) .setAttributes(IAttribute.toBuildersSet(ImmutableSet.copyOf(attributes)))); } private static IHostAttributes hostAttributes(String host, IAttribute... attributes) { return hostAttributes(host, MaintenanceMode.NONE, attributes); } private IAttribute valueAttribute(String name, String string, String... strings) { return IAttribute.build( new Attribute( name, ImmutableSet.<String>builder().add(string).addAll(Arrays.asList(strings)).build())); } private static Constraint makeConstraint(String name, String... values) { return new Constraint( name, TaskConstraint.value(new ValueConstraint(false, ImmutableSet.copyOf(values)))); } private Constraint limitConstraint(String name, int value) { return new Constraint(name, TaskConstraint.limit(new LimitConstraint(value))); } private ITaskConfig makeTask(IJobKey job, Constraint... constraint) { return ITaskConfig.build( makeTask(job, DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK) .newBuilder() .setConstraints(Sets.newHashSet(constraint))); } private ITaskConfig hostLimitTask(IJobKey job, int maxPerHost) { return makeTask(job, limitConstraint(HOST_ATTRIBUTE, maxPerHost)); } private ITaskConfig hostLimitTask(int maxPerHost) { return hostLimitTask(JOB_A, maxPerHost); } private ITaskConfig rackLimitTask(IJobKey job, int maxPerRack) { return makeTask(job, limitConstraint(RACK_ATTRIBUTE, maxPerRack)); } private ITaskConfig makeTask(IJobKey job, int cpus, long ramMb, long diskMb) { return ITaskConfig.build( new TaskConfig() .setJob(job.newBuilder()) .setNumCpus(cpus) .setRamMb(ramMb) .setDiskMb(diskMb) .setExecutorConfig(new ExecutorConfig("aurora", "config"))); } private ITaskConfig makeTask(int cpus, long ramMb, long diskMb) { return makeTask(JOB_A, cpus, ramMb, diskMb); } private ITaskConfig makeTask() { return makeTask(DEFAULT_CPUS, DEFAULT_RAM, DEFAULT_DISK); } }
class PreemptionVictimFilterImpl implements PreemptionVictimFilter { private final SchedulingFilter schedulingFilter; private final ExecutorSettings executorSettings; private final PreemptorMetrics metrics; private final TierManager tierManager; @Inject PreemptionVictimFilterImpl( SchedulingFilter schedulingFilter, ExecutorSettings executorSettings, PreemptorMetrics metrics, TierManager tierManager) { this.schedulingFilter = requireNonNull(schedulingFilter); this.executorSettings = requireNonNull(executorSettings); this.metrics = requireNonNull(metrics); this.tierManager = requireNonNull(tierManager); } private static final Function<HostOffer, ResourceSlot> OFFER_TO_RESOURCE_SLOT = offer -> Resources.from(offer.getOffer()).filter(Resources.NON_REVOCABLE).slot(); private static final Function<HostOffer, String> OFFER_TO_HOST = offer -> offer.getOffer().getHostname(); private static final Function<PreemptionVictim, String> VICTIM_TO_HOST = PreemptionVictim::getSlaveHost; private final Function<PreemptionVictim, ResourceSlot> victimToResources = new Function<PreemptionVictim, ResourceSlot>() { @Override public ResourceSlot apply(PreemptionVictim victim) { ResourceSlot slot = victim.getResourceSlot(); if (tierManager.getTier(victim.getConfig()).isRevocable()) { // Revocable task CPU cannot be used for preemption purposes as it's a compressible // resource. We can still use RAM, DISK and PORTS as they are not compressible. slot = new ResourceSlot(0.0, slot.getRam(), slot.getDisk(), slot.getNumPorts()); } return slot.add(executorSettings.getExecutorOverhead()); } }; // TODO(zmanji) Consider using Dominant Resource Fairness for ordering instead of the vector // ordering private final Ordering<PreemptionVictim> resourceOrder = ResourceSlot.ORDER.onResultOf(victimToResources).reverse(); @Override public Optional<ImmutableSet<PreemptionVictim>> filterPreemptionVictims( ITaskConfig pendingTask, Iterable<PreemptionVictim> possibleVictims, AttributeAggregate jobState, Optional<HostOffer> offer, StoreProvider storeProvider) { // This enforces the precondition that all of the resources are from the same host. We need to // get the host for the schedulingFilter. Set<String> hosts = ImmutableSet.<String>builder() .addAll(Iterables.transform(possibleVictims, VICTIM_TO_HOST)) .addAll(Iterables.transform(offer.asSet(), OFFER_TO_HOST)) .build(); ResourceSlot slackResources = sum(Iterables.transform(offer.asSet(), OFFER_TO_RESOURCE_SLOT)); FluentIterable<PreemptionVictim> preemptableTasks = FluentIterable.from(possibleVictims).filter(preemptionFilter(pendingTask)); if (preemptableTasks.isEmpty()) { return Optional.absent(); } Set<PreemptionVictim> toPreemptTasks = Sets.newHashSet(); Iterable<PreemptionVictim> sortedVictims = resourceOrder.immutableSortedCopy(preemptableTasks); Optional<IHostAttributes> attributes = storeProvider.getAttributeStore().getHostAttributes(Iterables.getOnlyElement(hosts)); if (!attributes.isPresent()) { metrics.recordMissingAttributes(); return Optional.absent(); } for (PreemptionVictim victim : sortedVictims) { toPreemptTasks.add(victim); ResourceSlot totalResource = sum(Iterables.transform(toPreemptTasks, victimToResources)).add(slackResources); Set<Veto> vetoes = schedulingFilter.filter( new UnusedResource(totalResource, attributes.get()), new ResourceRequest(pendingTask, jobState)); if (vetoes.isEmpty()) { return Optional.of(ImmutableSet.copyOf(toPreemptTasks)); } } return Optional.absent(); } /** * Creates a filter that will find tasks that the provided {@code pendingTask} may preempt. * * @param pendingTask A task that is not scheduled to possibly preempt other tasks for. * @return A filter that will compare the priorities and resources required by other tasks with * {@code preemptableTask}. */ private static Predicate<PreemptionVictim> preemptionFilter(final ITaskConfig pendingTask) { return possibleVictim -> { boolean pendingIsProduction = pendingTask.isProduction(); boolean victimIsProduction = possibleVictim.isProduction(); if (pendingIsProduction && !victimIsProduction) { return true; } else if (pendingIsProduction == victimIsProduction) { // If production flags are equal, preemption is based on priority within the same role. if (pendingTask.getJob().getRole().equals(possibleVictim.getRole())) { return pendingTask.getPriority() > possibleVictim.getPriority(); } else { return false; } } else { return false; } }; } }
public class TaskAssignerImplTest extends EasyMockTest { private static final int PORT = 5000; private static final String SLAVE_ID = "slaveId"; private static final Offer MESOS_OFFER = Offer.newBuilder() .setId(OfferID.newBuilder().setValue("offerId")) .setFrameworkId(FrameworkID.newBuilder().setValue("frameworkId")) .setSlaveId(SlaveID.newBuilder().setValue(SLAVE_ID)) .setHostname("hostName") .addResources( Resource.newBuilder() .setName("ports") .setType(Type.RANGES) .setRanges( Ranges.newBuilder().addRange(Range.newBuilder().setBegin(PORT).setEnd(PORT)))) .build(); private static final HostOffer OFFER = new HostOffer(MESOS_OFFER, IHostAttributes.build(new HostAttributes())); private static final String PORT_NAME = "http"; private static final IScheduledTask TASK = IScheduledTask.build( new ScheduledTask() .setAssignedTask( new AssignedTask() .setTaskId("taskId") .setTask( new TaskConfig() .setJob(new JobKey("r", "e", "n")) .setExecutorConfig(new ExecutorConfig().setData("opaque data")) .setRequestedPorts(ImmutableSet.of(PORT_NAME))))); private static final TaskGroupKey GROUP_KEY = TaskGroupKey.from(TASK.getAssignedTask().getTask()); private static final TaskInfo TASK_INFO = TaskInfo.newBuilder() .setName("taskName") .setTaskId(TaskID.newBuilder().setValue(Tasks.id(TASK))) .setSlaveId(MESOS_OFFER.getSlaveId()) .build(); private static final Map<String, TaskGroupKey> NO_RESERVATION = ImmutableMap.of(); private static final UnusedResource UNUSED = new UnusedResource(Resources.from(MESOS_OFFER).slot(), OFFER.getAttributes()); private static final ResourceRequest RESOURCE_REQUEST = new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY); private MutableStoreProvider storeProvider; private StateManager stateManager; private SchedulingFilter filter; private MesosTaskFactory taskFactory; private OfferManager offerManager; private TaskAssigner assigner; private TierManager tierManager; @Before public void setUp() throws Exception { storeProvider = createMock(MutableStoreProvider.class); filter = createMock(SchedulingFilter.class); taskFactory = createMock(MesosTaskFactory.class); stateManager = createMock(StateManager.class); offerManager = createMock(OfferManager.class); tierManager = createMock(TierManager.class); assigner = new TaskAssignerImpl(stateManager, filter, taskFactory, offerManager, tierManager); } @Test public void testAssignNoVetoes() throws Exception { expect(offerManager.getOffers(GROUP_KEY)).andReturn(ImmutableSet.of(OFFER)); offerManager.launchTask(MESOS_OFFER.getId(), TASK_INFO); expect(tierManager.getTier(TASK.getAssignedTask().getTask())).andReturn(DEFAULT); expect(filter.filter(UNUSED, RESOURCE_REQUEST)).andReturn(ImmutableSet.of()); expect( stateManager.assignTask( storeProvider, Tasks.id(TASK), MESOS_OFFER.getHostname(), MESOS_OFFER.getSlaveId(), ImmutableMap.of(PORT_NAME, PORT))) .andReturn(TASK.getAssignedTask()); expect(taskFactory.createFrom(TASK.getAssignedTask(), MESOS_OFFER.getSlaveId())) .andReturn(TASK_INFO); control.replay(); assertTrue( assigner.maybeAssign( storeProvider, new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY), TaskGroupKey.from(TASK.getAssignedTask().getTask()), Tasks.id(TASK), ImmutableMap.of(SLAVE_ID, GROUP_KEY))); } @Test public void testAssignVetoesWithStaticBan() throws Exception { expect(offerManager.getOffers(GROUP_KEY)).andReturn(ImmutableSet.of(OFFER)); offerManager.banOffer(MESOS_OFFER.getId(), GROUP_KEY); expect(tierManager.getTier(TASK.getAssignedTask().getTask())).andReturn(DEFAULT); expect(filter.filter(UNUSED, RESOURCE_REQUEST)) .andReturn(ImmutableSet.of(Veto.constraintMismatch("denied"))); control.replay(); assertFalse( assigner.maybeAssign( storeProvider, new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY), TaskGroupKey.from(TASK.getAssignedTask().getTask()), Tasks.id(TASK), NO_RESERVATION)); } @Test public void testAssignVetoesWithNoStaticBan() throws Exception { expect(offerManager.getOffers(GROUP_KEY)).andReturn(ImmutableSet.of(OFFER)); expect(tierManager.getTier(TASK.getAssignedTask().getTask())).andReturn(DEFAULT); expect(filter.filter(UNUSED, RESOURCE_REQUEST)) .andReturn(ImmutableSet.of(Veto.unsatisfiedLimit("limit"))); control.replay(); assertFalse( assigner.maybeAssign( storeProvider, new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY), TaskGroupKey.from(TASK.getAssignedTask().getTask()), Tasks.id(TASK), NO_RESERVATION)); } @Test public void testAssignmentClearedOnError() throws Exception { expect(offerManager.getOffers(GROUP_KEY)).andReturn(ImmutableSet.of(OFFER)); offerManager.launchTask(MESOS_OFFER.getId(), TASK_INFO); expectLastCall().andThrow(new OfferManager.LaunchException("expected")); expect(tierManager.getTier(TASK.getAssignedTask().getTask())).andReturn(DEFAULT); expect(filter.filter(UNUSED, RESOURCE_REQUEST)).andReturn(ImmutableSet.of()); expect( stateManager.assignTask( storeProvider, Tasks.id(TASK), MESOS_OFFER.getHostname(), MESOS_OFFER.getSlaveId(), ImmutableMap.of(PORT_NAME, PORT))) .andReturn(TASK.getAssignedTask()); expect( stateManager.changeState( storeProvider, Tasks.id(TASK), Optional.of(PENDING), LOST, LAUNCH_FAILED_MSG)) .andReturn(StateChangeResult.SUCCESS); expect(taskFactory.createFrom(TASK.getAssignedTask(), MESOS_OFFER.getSlaveId())) .andReturn(TASK_INFO); control.replay(); assertFalse( assigner.maybeAssign( storeProvider, new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY), TaskGroupKey.from(TASK.getAssignedTask().getTask()), Tasks.id(TASK), NO_RESERVATION)); } @Test public void testAssignmentSkippedForReservedSlave() throws Exception { expect(offerManager.getOffers(GROUP_KEY)).andReturn(ImmutableSet.of(OFFER)); control.replay(); assertFalse( assigner.maybeAssign( storeProvider, new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY), TaskGroupKey.from(TASK.getAssignedTask().getTask()), Tasks.id(TASK), ImmutableMap.of( SLAVE_ID, TaskGroupKey.from( ITaskConfig.build(new TaskConfig().setJob(new JobKey("other", "e", "n"))))))); } @Test public void testTaskWithReservedSlaveLandsElsewhere() throws Exception { // Ensures slave/task reservation relationship is only enforced in slave->task direction // and permissive in task->slave direction. In other words, a task with a slave reservation // should still be tried against other unreserved slaves. HostOffer offer = new HostOffer( Offer.newBuilder() .setId(OfferID.newBuilder().setValue("offerId0")) .setFrameworkId(FrameworkID.newBuilder().setValue("frameworkId")) .setSlaveId(SlaveID.newBuilder().setValue("slaveId0")) .setHostname("hostName0") .addResources( Resource.newBuilder() .setName("ports") .setType(Type.RANGES) .setRanges( Ranges.newBuilder() .addRange(Range.newBuilder().setBegin(PORT).setEnd(PORT)))) .build(), IHostAttributes.build(new HostAttributes())); expect(offerManager.getOffers(GROUP_KEY)).andReturn(ImmutableSet.of(offer, OFFER)); expect(tierManager.getTier(TASK.getAssignedTask().getTask())).andReturn(DEFAULT); expect(filter.filter(UNUSED, RESOURCE_REQUEST)).andReturn(ImmutableSet.of()); expect( stateManager.assignTask( storeProvider, Tasks.id(TASK), offer.getOffer().getHostname(), offer.getOffer().getSlaveId(), ImmutableMap.of(PORT_NAME, PORT))) .andReturn(TASK.getAssignedTask()); expect(taskFactory.createFrom(TASK.getAssignedTask(), offer.getOffer().getSlaveId())) .andReturn(TASK_INFO); offerManager.launchTask(offer.getOffer().getId(), TASK_INFO); control.replay(); assertTrue( assigner.maybeAssign( storeProvider, new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY), TaskGroupKey.from(TASK.getAssignedTask().getTask()), Tasks.id(TASK), ImmutableMap.of(SLAVE_ID, GROUP_KEY))); } @Test public void testAssignerDoesNotReturnOnFirstMismatch() throws Exception { // Ensures scheduling loop does not terminate prematurely when the first mismatch is identified. HostOffer mismatched = new HostOffer( Offer.newBuilder() .setId(OfferID.newBuilder().setValue("offerId0")) .setFrameworkId(FrameworkID.newBuilder().setValue("frameworkId")) .setSlaveId(SlaveID.newBuilder().setValue("slaveId0")) .setHostname("hostName0") .addResources( Resource.newBuilder() .setName("ports") .setType(Type.RANGES) .setRanges( Ranges.newBuilder() .addRange(Range.newBuilder().setBegin(PORT).setEnd(PORT)))) .build(), IHostAttributes.build(new HostAttributes())); expect(offerManager.getOffers(GROUP_KEY)).andReturn(ImmutableSet.of(mismatched, OFFER)); expect(tierManager.getTier(TASK.getAssignedTask().getTask())).andReturn(DEFAULT).times(2); expect( filter.filter( new UnusedResource( Resources.from(mismatched.getOffer()).slot(), mismatched.getAttributes()), new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY))) .andReturn(ImmutableSet.of(Veto.constraintMismatch("constraint mismatch"))); offerManager.banOffer(mismatched.getOffer().getId(), GROUP_KEY); expect( filter.filter( new UnusedResource(Resources.from(OFFER.getOffer()).slot(), OFFER.getAttributes()), new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY))) .andReturn(ImmutableSet.of()); expect( stateManager.assignTask( storeProvider, Tasks.id(TASK), OFFER.getOffer().getHostname(), OFFER.getOffer().getSlaveId(), ImmutableMap.of(PORT_NAME, PORT))) .andReturn(TASK.getAssignedTask()); expect(taskFactory.createFrom(TASK.getAssignedTask(), OFFER.getOffer().getSlaveId())) .andReturn(TASK_INFO); offerManager.launchTask(OFFER.getOffer().getId(), TASK_INFO); control.replay(); assertTrue( assigner.maybeAssign( storeProvider, new ResourceRequest(TASK.getAssignedTask().getTask(), EMPTY), TaskGroupKey.from(TASK.getAssignedTask().getTask()), Tasks.id(TASK), ImmutableMap.of(SLAVE_ID, GROUP_KEY))); } }