@Test
  public void functionTest() {
    Map<Integer, String> map = Maps.newHashMap();
    for (int i = 0; i < 10; i++) {
      map.putIfAbsent(i, "val" + i);
    }

    log.info("key 3 value:'{}'", map.get(3));

    log.info("map computeIfPresent:'{}'", map.computeIfPresent(9, (num, val) -> null));

    log.info("key 9 containsKey:'{}'", map.containsKey(9));

    log.info("computeIfAbsent 23 :'{}'", map.computeIfAbsent(23, num -> "val" + num));

    log.info("map.containsKey(23):'{}'", map.containsKey(23));

    log.info("computeIfAbsent 3:'{}'", map.computeIfAbsent(3, num -> "bam"));
    log.info("get 3:'{}'", map.get(3));

    map.remove(3, "val");

    log.info("remove get 3:'{}'", map.get(3));

    map.remove(3, "val3");

    log.info("remove get 3:'{}'", map.get(3));

    log.info("getDefault:'{}'", map.getOrDefault(43, "not found"));

    log.info("merge:'{}'", map.merge(9, "val9", (value, newValue) -> value.concat(newValue)));

    log.info("merge:'{}'", map.merge(9, "count", (value, newValue) -> value.concat(newValue)));
  }
Beispiel #2
0
  public static void main(String[] args) {
    Map<Integer, String> map = new HashMap<>();
    for (int i = 0; i < 10; i++) {
      map.putIfAbsent(i, "val" + i);
    }
    map.forEach((id, val) -> System.out.println(val));

    map.computeIfPresent(3, (num, val) -> val + num);
    System.out.println(map.get(3)); // val33

    map.computeIfPresent(9, (num, val) -> null);
    System.out.println(map.containsKey(9)); // false

    map.computeIfAbsent(23, num -> "val" + num);
    System.out.println(map.containsKey(23)); // true

    map.computeIfAbsent(3, num -> "bam");
    System.out.println(map.get(3)); // val33

    map.remove(3, "val3");
    System.out.println(map.get(3)); // val33

    map.remove(3, "val33");
    System.out.println(map.get(3)); // null

    System.out.println(map.getOrDefault(42, "not found")); // not found

    map.merge(9, "val9", (value, newValue) -> value.concat(newValue));
    System.out.println(map.get(9)); // val9

    map.merge(9, "concat", (value, newValue) -> value.concat(newValue));
    System.out.println(map.get(9)); // val9concat
  }
Beispiel #3
0
 /**
  * Try to reserve the given number of bytes. Return value indicates whether the caller may use the
  * requested memory.
  */
 public synchronized boolean tryReserve(QueryId queryId, long bytes) {
   checkArgument(bytes >= 0, "bytes is negative");
   if (freeBytes - bytes < 0) {
     return false;
   }
   freeBytes -= bytes;
   if (bytes != 0) {
     queryMemoryReservations.merge(queryId, bytes, Long::sum);
   }
   return true;
 }
Beispiel #4
0
 /**
  * Reserves the given number of bytes. Caller should wait on the returned future, before
  * allocating more memory.
  */
 public synchronized ListenableFuture<?> reserve(QueryId queryId, long bytes) {
   checkArgument(bytes >= 0, "bytes is negative");
   if (bytes != 0) {
     queryMemoryReservations.merge(queryId, bytes, Long::sum);
   }
   freeBytes -= bytes;
   if (freeBytes <= 0) {
     if (future == null) {
       future = SettableFuture.create();
     }
     checkState(!future.isDone(), "future is already completed");
     return future;
   }
   return NOT_BLOCKED;
 }
Beispiel #5
0
  public static void run(Folder f) {
    System.out.println("+" + f.path);

    for (Extension x : f.allowedExtensions) {
      System.out.println(x.toString());
    }

    f.getFilePaths();

    for (String path : f.files) {
      File file = FileFactory.Create(path);
      System.out.println(file.getPath());
      file.read();

      file.getWords().forEach((k, v) -> words.merge(k, v, (v1, v2) -> v1 + v2));
    }

    int times =
        Integer.parseInt(
            JOptionPane.showInputDialog("Qual o mínimo de repetições a serem mostradas"));
    File.ListWords(words, times);
  }
 public final void addPoints(L2PcInstance player, int bossId, int points) {
   final Map<Integer, Integer> tmpPoint =
       _list.computeIfAbsent(player.getObjectId(), k -> new HashMap<>());
   updatePointsInDB(player, bossId, tmpPoint.merge(bossId, points, Integer::sum));
 }
  public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
    AtomicInteger counter = new AtomicInteger();
    class Task {
      private AtomicBoolean state = new AtomicBoolean();
      private final int id;

      Task(int id) {
        this.id = id;
      }

      public void execute() {
        if (!state.compareAndSet(false, true)) {
          throw new IllegalStateException();
        } else {
          counter.incrementAndGet();
        }
      }

      @Override
      public boolean equals(Object o) {
        if (this == o) {
          return true;
        }
        if (o == null || getClass() != o.getClass()) {
          return false;
        }
        Task task = (Task) o;
        return id == task.id;
      }

      @Override
      public int hashCode() {
        return id;
      }

      @Override
      public String toString() {
        return Integer.toString(id);
      }
    }

    int numberOfThreads = randomIntBetween(2, 8);
    int taskSubmissionsPerThread = randomIntBetween(1, 64);
    int numberOfExecutors = Math.max(1, numberOfThreads / 4);
    final Semaphore semaphore = new Semaphore(numberOfExecutors);

    class TaskExecutor implements ClusterStateTaskExecutor<Task> {
      private final List<Set<Task>> taskGroups;
      private AtomicInteger counter = new AtomicInteger();
      private AtomicInteger batches = new AtomicInteger();
      private AtomicInteger published = new AtomicInteger();

      public TaskExecutor(List<Set<Task>> taskGroups) {
        this.taskGroups = taskGroups;
      }

      @Override
      public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks)
          throws Exception {
        for (Set<Task> expectedSet : taskGroups) {
          long count = tasks.stream().filter(expectedSet::contains).count();
          assertThat(
              "batched set should be executed together or not at all. Expected "
                  + expectedSet
                  + "s. Executing "
                  + tasks,
              count,
              anyOf(equalTo(0L), equalTo((long) expectedSet.size())));
        }
        tasks.forEach(Task::execute);
        counter.addAndGet(tasks.size());
        ClusterState maybeUpdatedClusterState = currentState;
        if (randomBoolean()) {
          maybeUpdatedClusterState = ClusterState.builder(currentState).build();
          batches.incrementAndGet();
          semaphore.acquire();
        }
        return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
      }

      @Override
      public boolean runOnlyOnMaster() {
        return false;
      }

      @Override
      public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
        published.incrementAndGet();
        semaphore.release();
      }
    }

    ConcurrentMap<String, AtomicInteger> processedStates = new ConcurrentHashMap<>();

    List<Set<Task>> taskGroups = new ArrayList<>();
    List<TaskExecutor> executors = new ArrayList<>();
    for (int i = 0; i < numberOfExecutors; i++) {
      executors.add(new TaskExecutor(taskGroups));
    }

    // randomly assign tasks to executors
    List<Tuple<TaskExecutor, Set<Task>>> assignments = new ArrayList<>();
    int taskId = 0;
    for (int i = 0; i < numberOfThreads; i++) {
      for (int j = 0; j < taskSubmissionsPerThread; j++) {
        TaskExecutor executor = randomFrom(executors);
        Set<Task> tasks = new HashSet<>();
        for (int t = randomInt(3); t >= 0; t--) {
          tasks.add(new Task(taskId++));
        }
        taskGroups.add(tasks);
        assignments.add(Tuple.tuple(executor, tasks));
      }
    }

    Map<TaskExecutor, Integer> counts = new HashMap<>();
    int totalTaskCount = 0;
    for (Tuple<TaskExecutor, Set<Task>> assignment : assignments) {
      final int taskCount = assignment.v2().size();
      counts.merge(assignment.v1(), taskCount, (previous, count) -> previous + count);
      totalTaskCount += taskCount;
    }
    final CountDownLatch updateLatch = new CountDownLatch(totalTaskCount);
    final ClusterStateTaskListener listener =
        new ClusterStateTaskListener() {
          @Override
          public void onFailure(String source, Exception e) {
            fail(ExceptionsHelper.detailedMessage(e));
          }

          @Override
          public void clusterStateProcessed(
              String source, ClusterState oldState, ClusterState newState) {
            processedStates.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
            updateLatch.countDown();
          }
        };

    final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>();
    CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
    for (int i = 0; i < numberOfThreads; i++) {
      final int index = i;
      Thread thread =
          new Thread(
              () -> {
                final String threadName = Thread.currentThread().getName();
                try {
                  barrier.await();
                  for (int j = 0; j < taskSubmissionsPerThread; j++) {
                    Tuple<TaskExecutor, Set<Task>> assignment =
                        assignments.get(index * taskSubmissionsPerThread + j);
                    final Set<Task> tasks = assignment.v2();
                    submittedTasksPerThread
                        .computeIfAbsent(threadName, key -> new AtomicInteger())
                        .addAndGet(tasks.size());
                    final TaskExecutor executor = assignment.v1();
                    if (tasks.size() == 1) {
                      clusterService.submitStateUpdateTask(
                          threadName,
                          tasks.stream().findFirst().get(),
                          ClusterStateTaskConfig.build(randomFrom(Priority.values())),
                          executor,
                          listener);
                    } else {
                      Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>();
                      tasks.stream().forEach(t -> taskListeners.put(t, listener));
                      clusterService.submitStateUpdateTasks(
                          threadName,
                          taskListeners,
                          ClusterStateTaskConfig.build(randomFrom(Priority.values())),
                          executor);
                    }
                  }
                  barrier.await();
                } catch (BrokenBarrierException | InterruptedException e) {
                  throw new AssertionError(e);
                }
              });
      thread.start();
    }

    // wait for all threads to be ready
    barrier.await();
    // wait for all threads to finish
    barrier.await();

    // wait until all the cluster state updates have been processed
    updateLatch.await();
    // and until all of the publication callbacks have completed
    semaphore.acquire(numberOfExecutors);

    // assert the number of executed tasks is correct
    assertEquals(totalTaskCount, counter.get());

    // assert each executor executed the correct number of tasks
    for (TaskExecutor executor : executors) {
      if (counts.containsKey(executor)) {
        assertEquals((int) counts.get(executor), executor.counter.get());
        assertEquals(executor.batches.get(), executor.published.get());
      }
    }

    // assert the correct number of clusterStateProcessed events were triggered
    for (Map.Entry<String, AtomicInteger> entry : processedStates.entrySet()) {
      assertThat(submittedTasksPerThread, hasKey(entry.getKey()));
      assertEquals(
          "not all tasks submitted by " + entry.getKey() + " received a processed event",
          entry.getValue().get(),
          submittedTasksPerThread.get(entry.getKey()).get());
    }
  }