/** * Note, this test can only work as long as we have a single thread executor executing the state * update tasks! */ @Test public void testPrioritizedTasks() throws Exception { Settings settings = settingsBuilder().put("discovery.type", "local").build(); internalCluster().startNode(settings); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); BlockingTask block = new BlockingTask(); clusterService.submitStateUpdateTask("test", Priority.IMMEDIATE, block); int taskCount = randomIntBetween(5, 20); Priority[] priorities = Priority.values(); // will hold all the tasks in the order in which they were executed List<PrioritiezedTask> tasks = new ArrayList<>(taskCount); CountDownLatch latch = new CountDownLatch(taskCount); for (int i = 0; i < taskCount; i++) { Priority priority = priorities[randomIntBetween(0, priorities.length - 1)]; clusterService.submitStateUpdateTask( "test", priority, new PrioritiezedTask(priority, latch, tasks)); } block.release(); latch.await(); Priority prevPriority = null; for (PrioritiezedTask task : tasks) { if (prevPriority == null) { prevPriority = task.priority; } else { assertThat(task.priority.sameOrAfter(prevPriority), is(true)); } } }
/** * Note, this test can only work as long as we have a single thread executor executing the state * update tasks! */ public void testPrioritizedTasks() throws Exception { BlockingTask block = new BlockingTask(Priority.IMMEDIATE); clusterService.submitStateUpdateTask("test", block); int taskCount = randomIntBetween(5, 20); // will hold all the tasks in the order in which they were executed List<PrioritizedTask> tasks = new ArrayList<>(taskCount); CountDownLatch latch = new CountDownLatch(taskCount); for (int i = 0; i < taskCount; i++) { Priority priority = randomFrom(Priority.values()); clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks)); } block.close(); latch.await(); Priority prevPriority = null; for (PrioritizedTask task : tasks) { if (prevPriority == null) { prevPriority = task.priority(); } else { assertThat(task.priority().sameOrAfter(prevPriority), is(true)); } } }
public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException { AtomicInteger counter = new AtomicInteger(); class Task { private AtomicBoolean state = new AtomicBoolean(); private final int id; Task(int id) { this.id = id; } public void execute() { if (!state.compareAndSet(false, true)) { throw new IllegalStateException(); } else { counter.incrementAndGet(); } } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Task task = (Task) o; return id == task.id; } @Override public int hashCode() { return id; } @Override public String toString() { return Integer.toString(id); } } int numberOfThreads = randomIntBetween(2, 8); int taskSubmissionsPerThread = randomIntBetween(1, 64); int numberOfExecutors = Math.max(1, numberOfThreads / 4); final Semaphore semaphore = new Semaphore(numberOfExecutors); class TaskExecutor implements ClusterStateTaskExecutor<Task> { private final List<Set<Task>> taskGroups; private AtomicInteger counter = new AtomicInteger(); private AtomicInteger batches = new AtomicInteger(); private AtomicInteger published = new AtomicInteger(); public TaskExecutor(List<Set<Task>> taskGroups) { this.taskGroups = taskGroups; } @Override public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks) throws Exception { for (Set<Task> expectedSet : taskGroups) { long count = tasks.stream().filter(expectedSet::contains).count(); assertThat( "batched set should be executed together or not at all. Expected " + expectedSet + "s. Executing " + tasks, count, anyOf(equalTo(0L), equalTo((long) expectedSet.size()))); } tasks.forEach(Task::execute); counter.addAndGet(tasks.size()); ClusterState maybeUpdatedClusterState = currentState; if (randomBoolean()) { maybeUpdatedClusterState = ClusterState.builder(currentState).build(); batches.incrementAndGet(); semaphore.acquire(); } return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState); } @Override public boolean runOnlyOnMaster() { return false; } @Override public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { published.incrementAndGet(); semaphore.release(); } } ConcurrentMap<String, AtomicInteger> processedStates = new ConcurrentHashMap<>(); List<Set<Task>> taskGroups = new ArrayList<>(); List<TaskExecutor> executors = new ArrayList<>(); for (int i = 0; i < numberOfExecutors; i++) { executors.add(new TaskExecutor(taskGroups)); } // randomly assign tasks to executors List<Tuple<TaskExecutor, Set<Task>>> assignments = new ArrayList<>(); int taskId = 0; for (int i = 0; i < numberOfThreads; i++) { for (int j = 0; j < taskSubmissionsPerThread; j++) { TaskExecutor executor = randomFrom(executors); Set<Task> tasks = new HashSet<>(); for (int t = randomInt(3); t >= 0; t--) { tasks.add(new Task(taskId++)); } taskGroups.add(tasks); assignments.add(Tuple.tuple(executor, tasks)); } } Map<TaskExecutor, Integer> counts = new HashMap<>(); int totalTaskCount = 0; for (Tuple<TaskExecutor, Set<Task>> assignment : assignments) { final int taskCount = assignment.v2().size(); counts.merge(assignment.v1(), taskCount, (previous, count) -> previous + count); totalTaskCount += taskCount; } final CountDownLatch updateLatch = new CountDownLatch(totalTaskCount); final ClusterStateTaskListener listener = new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { fail(ExceptionsHelper.detailedMessage(e)); } @Override public void clusterStateProcessed( String source, ClusterState oldState, ClusterState newState) { processedStates.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet(); updateLatch.countDown(); } }; final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>(); CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { final int index = i; Thread thread = new Thread( () -> { final String threadName = Thread.currentThread().getName(); try { barrier.await(); for (int j = 0; j < taskSubmissionsPerThread; j++) { Tuple<TaskExecutor, Set<Task>> assignment = assignments.get(index * taskSubmissionsPerThread + j); final Set<Task> tasks = assignment.v2(); submittedTasksPerThread .computeIfAbsent(threadName, key -> new AtomicInteger()) .addAndGet(tasks.size()); final TaskExecutor executor = assignment.v1(); if (tasks.size() == 1) { clusterService.submitStateUpdateTask( threadName, tasks.stream().findFirst().get(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), executor, listener); } else { Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>(); tasks.stream().forEach(t -> taskListeners.put(t, listener)); clusterService.submitStateUpdateTasks( threadName, taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executor); } } barrier.await(); } catch (BrokenBarrierException | InterruptedException e) { throw new AssertionError(e); } }); thread.start(); } // wait for all threads to be ready barrier.await(); // wait for all threads to finish barrier.await(); // wait until all the cluster state updates have been processed updateLatch.await(); // and until all of the publication callbacks have completed semaphore.acquire(numberOfExecutors); // assert the number of executed tasks is correct assertEquals(totalTaskCount, counter.get()); // assert each executor executed the correct number of tasks for (TaskExecutor executor : executors) { if (counts.containsKey(executor)) { assertEquals((int) counts.get(executor), executor.counter.get()); assertEquals(executor.batches.get(), executor.published.get()); } } // assert the correct number of clusterStateProcessed events were triggered for (Map.Entry<String, AtomicInteger> entry : processedStates.entrySet()) { assertThat(submittedTasksPerThread, hasKey(entry.getKey())); assertEquals( "not all tasks submitted by " + entry.getKey() + " received a processed event", entry.getValue().get(), submittedTasksPerThread.get(entry.getKey()).get()); } }
// test that for a single thread, tasks are executed in the order // that they are submitted public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException { class TaskExecutor implements ClusterStateTaskExecutor<Integer> { List<Integer> tasks = new ArrayList<>(); @Override public BatchResult<Integer> execute(ClusterState currentState, List<Integer> tasks) throws Exception { this.tasks.addAll(tasks); return BatchResult.<Integer>builder() .successes(tasks) .build(ClusterState.builder(currentState).build()); } @Override public boolean runOnlyOnMaster() { return false; } } int numberOfThreads = randomIntBetween(2, 8); TaskExecutor[] executors = new TaskExecutor[numberOfThreads]; for (int i = 0; i < numberOfThreads; i++) { executors[i] = new TaskExecutor(); } int tasksSubmittedPerThread = randomIntBetween(2, 1024); CopyOnWriteArrayList<Tuple<String, Throwable>> failures = new CopyOnWriteArrayList<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); ClusterStateTaskListener listener = new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { logger.error( (Supplier<?>) () -> new ParameterizedMessage("unexpected failure: [{}]", source), e); failures.add(new Tuple<>(source, e)); updateLatch.countDown(); } @Override public void clusterStateProcessed( String source, ClusterState oldState, ClusterState newState) { updateLatch.countDown(); } }; CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { final int index = i; Thread thread = new Thread( () -> { try { barrier.await(); for (int j = 0; j < tasksSubmittedPerThread; j++) { clusterService.submitStateUpdateTask( "[" + index + "][" + j + "]", j, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener); } barrier.await(); } catch (InterruptedException | BrokenBarrierException e) { throw new AssertionError(e); } }); thread.start(); } // wait for all threads to be ready barrier.await(); // wait for all threads to finish barrier.await(); updateLatch.await(); assertThat(failures, empty()); for (int i = 0; i < numberOfThreads; i++) { assertEquals(tasksSubmittedPerThread, executors[i].tasks.size()); for (int j = 0; j < tasksSubmittedPerThread; j++) { assertNotNull(executors[i].tasks.get(j)); assertEquals( "cluster state update task executed out of order", j, (int) executors[i].tasks.get(j)); } } }