Ejemplo n.º 1
0
 private static <D extends DynamicObject<D>> D createIndyProxy(Map map, Class<D> type) {
   ensureInitialized(type);
   try {
     Object proxy =
         proxyCache.computeIfAbsent(type, Instances::createProxy).constructor().invoke(map, type);
     return type.cast(proxy);
   } catch (Throwable t) {
     throw new RuntimeException(t);
   }
 }
Ejemplo n.º 2
0
 /**
  * Returns the cached instance rather than the de-serialized instance if there is a cached
  * instance.
  */
 Object readResolve() throws ObjectStreamException {
   if (cache.containsKey(name) && cache.get(name).ordinal != this.ordinal) {
     KEMException.criticalError(
         "The ordinal for sort: "
             + name
             + " is "
             + cache.get(name).ordinal
             + " in the cache and "
             + this.ordinal
             + " serialized.");
   }
   // TODO: fix bug: ordinals from deserialized objects may overlap with those of newly created
   // objects
   return cache.computeIfAbsent(name, x -> this);
 }
 private ClusterCacheStatus initCacheStatusIfAbsent(String cacheName) {
   return cacheStatusMap.computeIfAbsent(
       cacheName,
       (name) -> {
         // We assume that any cache with partition handling configured is already defined on all
         // the nodes
         // (including the coordinator) before it starts on any node.
         Configuration cacheConfiguration = cacheManager.getCacheConfiguration(cacheName);
         AvailabilityStrategy availabilityStrategy;
         if (cacheConfiguration != null
             && cacheConfiguration.clustering().partitionHandling().enabled()) {
           availabilityStrategy = new PreferConsistencyStrategy();
         } else {
           availabilityStrategy = new PreferAvailabilityStrategy();
         }
         return new ClusterCacheStatus(cacheName, availabilityStrategy, this, transport);
       });
 }
Ejemplo n.º 4
0
 /**
  * Gets the corresponding {@code Sort} from its {@code String} representation.
  *
  * @param name the name of the sort
  * @return the sort
  */
 public static Sort of(String name) {
   return cache.computeIfAbsent(name, s -> new Sort(s, maxOrdinal.getAndIncrement()));
 }
 /**
  * Publish a message on a specific topic.
  *
  * @param topic The topic name to publish on. Note that this is the short name, not the fully
  *     qualified name including project. The project to publish on is configured using the {@link
  *     Builder}.
  * @param message The message to publish.
  * @return A future that is fulfilled with the resulting Google Pub/Sub message ID when the
  *     message has been successfully published.
  */
 public CompletableFuture<String> publish(final String topic, final Message message) {
   final TopicQueue queue = topics.computeIfAbsent(topic, TopicQueue::new);
   final CompletableFuture<String> future = queue.send(message);
   listener.publishingMessage(this, topic, message, future);
   return future;
 }
Ejemplo n.º 6
0
  public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
    AtomicInteger counter = new AtomicInteger();
    class Task {
      private AtomicBoolean state = new AtomicBoolean();
      private final int id;

      Task(int id) {
        this.id = id;
      }

      public void execute() {
        if (!state.compareAndSet(false, true)) {
          throw new IllegalStateException();
        } else {
          counter.incrementAndGet();
        }
      }

      @Override
      public boolean equals(Object o) {
        if (this == o) {
          return true;
        }
        if (o == null || getClass() != o.getClass()) {
          return false;
        }
        Task task = (Task) o;
        return id == task.id;
      }

      @Override
      public int hashCode() {
        return id;
      }

      @Override
      public String toString() {
        return Integer.toString(id);
      }
    }

    int numberOfThreads = randomIntBetween(2, 8);
    int taskSubmissionsPerThread = randomIntBetween(1, 64);
    int numberOfExecutors = Math.max(1, numberOfThreads / 4);
    final Semaphore semaphore = new Semaphore(numberOfExecutors);

    class TaskExecutor implements ClusterStateTaskExecutor<Task> {
      private final List<Set<Task>> taskGroups;
      private AtomicInteger counter = new AtomicInteger();
      private AtomicInteger batches = new AtomicInteger();
      private AtomicInteger published = new AtomicInteger();

      public TaskExecutor(List<Set<Task>> taskGroups) {
        this.taskGroups = taskGroups;
      }

      @Override
      public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks)
          throws Exception {
        for (Set<Task> expectedSet : taskGroups) {
          long count = tasks.stream().filter(expectedSet::contains).count();
          assertThat(
              "batched set should be executed together or not at all. Expected "
                  + expectedSet
                  + "s. Executing "
                  + tasks,
              count,
              anyOf(equalTo(0L), equalTo((long) expectedSet.size())));
        }
        tasks.forEach(Task::execute);
        counter.addAndGet(tasks.size());
        ClusterState maybeUpdatedClusterState = currentState;
        if (randomBoolean()) {
          maybeUpdatedClusterState = ClusterState.builder(currentState).build();
          batches.incrementAndGet();
          semaphore.acquire();
        }
        return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
      }

      @Override
      public boolean runOnlyOnMaster() {
        return false;
      }

      @Override
      public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
        published.incrementAndGet();
        semaphore.release();
      }
    }

    ConcurrentMap<String, AtomicInteger> processedStates = new ConcurrentHashMap<>();

    List<Set<Task>> taskGroups = new ArrayList<>();
    List<TaskExecutor> executors = new ArrayList<>();
    for (int i = 0; i < numberOfExecutors; i++) {
      executors.add(new TaskExecutor(taskGroups));
    }

    // randomly assign tasks to executors
    List<Tuple<TaskExecutor, Set<Task>>> assignments = new ArrayList<>();
    int taskId = 0;
    for (int i = 0; i < numberOfThreads; i++) {
      for (int j = 0; j < taskSubmissionsPerThread; j++) {
        TaskExecutor executor = randomFrom(executors);
        Set<Task> tasks = new HashSet<>();
        for (int t = randomInt(3); t >= 0; t--) {
          tasks.add(new Task(taskId++));
        }
        taskGroups.add(tasks);
        assignments.add(Tuple.tuple(executor, tasks));
      }
    }

    Map<TaskExecutor, Integer> counts = new HashMap<>();
    int totalTaskCount = 0;
    for (Tuple<TaskExecutor, Set<Task>> assignment : assignments) {
      final int taskCount = assignment.v2().size();
      counts.merge(assignment.v1(), taskCount, (previous, count) -> previous + count);
      totalTaskCount += taskCount;
    }
    final CountDownLatch updateLatch = new CountDownLatch(totalTaskCount);
    final ClusterStateTaskListener listener =
        new ClusterStateTaskListener() {
          @Override
          public void onFailure(String source, Exception e) {
            fail(ExceptionsHelper.detailedMessage(e));
          }

          @Override
          public void clusterStateProcessed(
              String source, ClusterState oldState, ClusterState newState) {
            processedStates.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
            updateLatch.countDown();
          }
        };

    final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>();
    CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
    for (int i = 0; i < numberOfThreads; i++) {
      final int index = i;
      Thread thread =
          new Thread(
              () -> {
                final String threadName = Thread.currentThread().getName();
                try {
                  barrier.await();
                  for (int j = 0; j < taskSubmissionsPerThread; j++) {
                    Tuple<TaskExecutor, Set<Task>> assignment =
                        assignments.get(index * taskSubmissionsPerThread + j);
                    final Set<Task> tasks = assignment.v2();
                    submittedTasksPerThread
                        .computeIfAbsent(threadName, key -> new AtomicInteger())
                        .addAndGet(tasks.size());
                    final TaskExecutor executor = assignment.v1();
                    if (tasks.size() == 1) {
                      clusterService.submitStateUpdateTask(
                          threadName,
                          tasks.stream().findFirst().get(),
                          ClusterStateTaskConfig.build(randomFrom(Priority.values())),
                          executor,
                          listener);
                    } else {
                      Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>();
                      tasks.stream().forEach(t -> taskListeners.put(t, listener));
                      clusterService.submitStateUpdateTasks(
                          threadName,
                          taskListeners,
                          ClusterStateTaskConfig.build(randomFrom(Priority.values())),
                          executor);
                    }
                  }
                  barrier.await();
                } catch (BrokenBarrierException | InterruptedException e) {
                  throw new AssertionError(e);
                }
              });
      thread.start();
    }

    // wait for all threads to be ready
    barrier.await();
    // wait for all threads to finish
    barrier.await();

    // wait until all the cluster state updates have been processed
    updateLatch.await();
    // and until all of the publication callbacks have completed
    semaphore.acquire(numberOfExecutors);

    // assert the number of executed tasks is correct
    assertEquals(totalTaskCount, counter.get());

    // assert each executor executed the correct number of tasks
    for (TaskExecutor executor : executors) {
      if (counts.containsKey(executor)) {
        assertEquals((int) counts.get(executor), executor.counter.get());
        assertEquals(executor.batches.get(), executor.published.get());
      }
    }

    // assert the correct number of clusterStateProcessed events were triggered
    for (Map.Entry<String, AtomicInteger> entry : processedStates.entrySet()) {
      assertThat(submittedTasksPerThread, hasKey(entry.getKey()));
      assertEquals(
          "not all tasks submitted by " + entry.getKey() + " received a processed event",
          entry.getValue().get(),
          submittedTasksPerThread.get(entry.getKey()).get());
    }
  }