Пример #1
0
    public NonBlockingIdentityHashMap<Long, TestKey> getMapMultithreaded()
        throws InterruptedException, ExecutionException {
      final int threadCount = _items.keySet().size();
      final NonBlockingIdentityHashMap<Long, TestKey> map =
          new NonBlockingIdentityHashMap<Long, TestKey>();

      // use a barrier to open the gate for all threads at once to avoid rolling start and no actual
      // concurrency
      final CyclicBarrier barrier = new CyclicBarrier(threadCount);
      final ExecutorService ex = Executors.newFixedThreadPool(threadCount);
      final CompletionService<Integer> co = new ExecutorCompletionService<Integer>(ex);
      for (Integer type : _items.keySet()) {
        // A linked-list of things to insert
        List<TestKey> items = _items.get(type);
        TestKeyFeederThread feeder = new TestKeyFeederThread(type, items, map, barrier);
        co.submit(feeder);
      }

      // wait for all threads to return
      int itemCount = 0;
      for (int retCount = 0; retCount < threadCount; retCount++) {
        final Future<Integer> result = co.take();
        itemCount += result.get();
      }
      ex.shutdown();
      return map;
    }
Пример #2
0
  /**
   * Test concurrent reader and writer (GH-458).
   *
   * <p><b>Test case:</b>
   *
   * <ol>
   *   <li/>start a long running reader;
   *   <li/>try to start a writer: it should time out;
   *   <li/>stop the reader;
   *   <li/>start the writer again: it should succeed.
   * </ol>
   *
   * @throws Exception error during request execution
   */
  @Test
  @Ignore("There is no way to stop a query on the server!")
  public void testReaderWriter() throws Exception {
    final String readerQuery = "?query=(1%20to%20100000000000000)%5b.=1%5d";
    final String writerQuery = "/test.xml";
    final byte[] content = Token.token("<a/>");

    final Get readerAction = new Get(readerQuery);
    final Put writerAction = new Put(writerQuery, content);

    final ExecutorService exec = Executors.newFixedThreadPool(2);

    // start reader
    exec.submit(readerAction);
    Performance.sleep(TIMEOUT); // delay in order to be sure that the reader has started
    // start writer
    Future<HTTPResponse> writer = exec.submit(writerAction);

    try {
      final HTTPResponse result = writer.get(TIMEOUT, TimeUnit.MILLISECONDS);

      if (result.status.isSuccess()) fail("Database modified while a reader is running");
      throw new Exception(result.toString());
    } catch (final TimeoutException e) {
      // writer is blocked by the reader: stop it
      writerAction.stop = true;
    }

    // stop reader
    readerAction.stop = true;

    // start the writer again
    writer = exec.submit(writerAction);
    assertEquals(HTTPCode.CREATED, writer.get().status);
  }
Пример #3
0
  @Test
  public void isDone_shouldReturnFalse_whenTheThreadIsFinished_givenAFutureReturnedByExecute()
      throws Exception {
    Future<?> result = asyncExecutor.execute(() -> {});

    Thread.sleep(50L); // give other thread a chance to finish...

    assertTrue(result.isDone());
  }
Пример #4
0
  @Test
  public void get_shouldJoinTheCreatedThread_givenAFutureReturnedByExecute() throws Exception {
    long startTime = System.nanoTime();
    Future<?> result = asyncExecutor.execute(suppressCheckedExceptions(() -> Thread.sleep(250L)));
    result.get();
    long endTime = System.nanoTime();

    assertThat(endTime - startTime, isGreaterThan(100 * 1000000L));
  }
Пример #5
0
 @Test
 public void get_shouldSucceed_whenTheThreadThrowsAnException_givenAFutureReturnedByExecute()
     throws Exception {
   Future<?> result =
       asyncExecutor.execute(
           () -> {
             throw new RuntimeException("blah");
           });
   result.get(100, TimeUnit.MILLISECONDS);
 }
 @Test
 public void testGetAsync() throws Exception {
   HazelcastClient hClient = getHazelcastClient();
   String key = "key";
   String value1 = "value1";
   IMap<String, String> map = hClient.getMap("map:test:getAsync");
   map.put(key, value1);
   Future<String> f1 = map.getAsync(key);
   assertEquals(value1, f1.get());
 }
Пример #7
0
  @Test
  public void cancel_shouldThrowAnUnsupportedOperationException_givenAFutureReturnedByExecute()
      throws Exception {
    Future<?> result = asyncExecutor.execute(() -> {});

    try {
      result.cancel(true);
      fail("Expected an exception");
    } catch (UnsupportedOperationException ignore) {
    }
  }
Пример #8
0
  @Test
  public void
      getWithATimeout_shouldThrowATimeoutException_whenTheThreadDoesNotEndQuickly_givenAFutureReturnedByExecute()
          throws Exception {
    Future<?> result = asyncExecutor.execute(suppressCheckedExceptions(() -> Thread.sleep(250L)));

    try {
      result.get(50, TimeUnit.MILLISECONDS);
      fail("Expected an exception");
    } catch (TimeoutException ignore) {
    }
  }
Пример #9
0
  @Test
  public void isDone_shouldReturnTrue_whenTheThreadIsAlive_givenAFutureReturnedByExecute()
      throws Exception {
    CountDownLatch latch = new CountDownLatch(1);

    Future<?> result = asyncExecutor.execute(suppressCheckedExceptions(latch::await));

    try {
      assertFalse(result.isDone());
    } finally {
      latch.countDown(); // don't want a thread waiting forever...
    }
  }
Пример #10
0
  /**
   * Test 2 concurrent readers (GH-458).
   *
   * <p><b>Test case:</b>
   *
   * <ol>
   *   <li/>start a long running reader;
   *   <li/>start a fast reader: it should succeed.
   * </ol>
   *
   * @throws Exception error during request execution
   */
  @Test
  public void testMultipleReaders() throws Exception {
    final String number = "63177";
    final String slowQuery = "?query=(1%20to%20100000000000000)%5b.=1%5d";
    final String fastQuery = "?query=" + number;

    final Get slowAction = new Get(slowQuery);
    final Get fastAction = new Get(fastQuery);

    final ExecutorService exec = Executors.newFixedThreadPool(2);

    exec.submit(slowAction);
    Performance.sleep(TIMEOUT); // delay in order to be sure that the reader has started
    final Future<HTTPResponse> fast = exec.submit(fastAction);

    try {
      final HTTPResponse result = fast.get(TIMEOUT, TimeUnit.MILLISECONDS);
      assertEquals(HTTPCode.OK, result.status);
      assertEquals(number, result.data);
    } finally {
      slowAction.stop = true;
    }
  }
Пример #11
0
  /**
   * Test concurrent writers (GH-458).
   *
   * <p><b>Test case:</b>
   *
   * <ol>
   *   <li/>start several writers one after another;
   *   <li/>all writers should succeed.
   * </ol>
   *
   * @throws Exception error during request execution
   */
  @Test
  public void testMultipleWriters() throws Exception {
    final int count = 10;
    final String template =
        "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
            + "<command xmlns=\"http://basex.org/rest\"><text><![CDATA["
            + "ADD TO %1$d <node id=\"%1$d\"/>"
            + "]]></text></command>";

    @SuppressWarnings("unchecked")
    final Future<HTTPResponse>[] tasks = new Future[count];
    final ExecutorService exec = Executors.newFixedThreadPool(count);

    // start all writers (not at the same time, but still in parallel)
    for (int i = 0; i < count; i++) {
      final String command = String.format(template, i);
      tasks[i] = exec.submit(new Post("", Token.token(command)));
    }

    // check if all have finished successfully
    for (final Future<HTTPResponse> task : tasks) {
      assertEquals(HTTPCode.OK, task.get(TIMEOUT, TimeUnit.MILLISECONDS).status);
    }
  }
Пример #12
0
 @Test
 public void getWithATimeout_shouldSucceed_whenTheThreadEndsQuickly_givenAFutureReturnedByExecute()
     throws Exception {
   Future<?> result = asyncExecutor.execute(suppressCheckedExceptions(() -> Thread.sleep(50L)));
   result.get(100, TimeUnit.MILLISECONDS);
 }
Пример #13
0
  @Test
  public void isCancelled_shouldReturnFalse_givenAFutureReturnedByExecute() throws Exception {
    Future<?> result = asyncExecutor.execute(() -> {});

    assertFalse(result.isCancelled());
  }
  public void testGet() throws Exception {
    cache(0).put("myKey", "myValue");

    // add an interceptor on node B that will block state transfer until we are ready
    final CountDownLatch applyStateProceedLatch = new CountDownLatch(1);
    final CountDownLatch applyStateStartedLatch = new CountDownLatch(1);
    cacheConfigBuilder
        .customInterceptors()
        .addInterceptor()
        .before(InvocationContextInterceptor.class)
        .interceptor(
            new CommandInterceptor() {
              @Override
              protected Object handleDefault(InvocationContext ctx, VisitableCommand cmd)
                  throws Throwable {
                // if this 'put' command is caused by state transfer we block until GET begins
                if (cmd instanceof PutKeyValueCommand
                    && ((PutKeyValueCommand) cmd).hasFlag(Flag.PUT_FOR_STATE_TRANSFER)) {
                  // signal we encounter a state transfer PUT
                  applyStateStartedLatch.countDown();
                  // wait until it is ok to apply state
                  if (!applyStateProceedLatch.await(15, TimeUnit.SECONDS)) {
                    throw new TimeoutException();
                  }
                }
                return super.handleDefault(ctx, cmd);
              }
            });

    // add an interceptor on node B that will block GET commands until we are ready
    final CountDownLatch getKeyStartedLatch = new CountDownLatch(1);
    final CountDownLatch getKeyProceedLatch = new CountDownLatch(1);
    cacheConfigBuilder
        .customInterceptors()
        .addInterceptor()
        .before(CallInterceptor.class)
        .interceptor(
            new CommandInterceptor() {
              @Override
              protected Object handleDefault(InvocationContext ctx, VisitableCommand cmd)
                  throws Throwable {
                if (cmd instanceof GetKeyValueCommand) {
                  // signal we encounter a GET
                  getKeyStartedLatch.countDown();
                  // wait until it is ok to continue with GET
                  if (!getKeyProceedLatch.await(15, TimeUnit.SECONDS)) {
                    throw new TimeoutException();
                  }
                }
                return super.handleDefault(ctx, cmd);
              }
            });

    log.info("Adding a new node ..");
    addClusterEnabledCacheManager(cacheConfigBuilder);
    log.info("Added a new node");

    // state transfer is blocked, no keys should be present on node B yet
    assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty());

    // wait for state transfer on node B to progress to the point where data segments are about to
    // be applied
    if (!applyStateStartedLatch.await(15, TimeUnit.SECONDS)) {
      throw new TimeoutException();
    }

    // state transfer is blocked, no keys should be present on node B yet
    assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty());

    // initiate a GET
    Future<Object> getFuture =
        fork(
            new Callable<Object>() {
              @Override
              public Object call() {
                return cache(1).get("myKey");
              }
            });

    // wait for GET command on node B to reach beyond *DistributionInterceptor, where it will block.
    // the value seen so far is null
    if (!getKeyStartedLatch.await(15, TimeUnit.SECONDS)) {
      throw new TimeoutException();
    }

    // allow state transfer to apply state
    applyStateProceedLatch.countDown();

    // wait for state transfer to end
    TestingUtil.waitForRehashToComplete(cache(0), cache(1));

    assertEquals(1, cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().size());

    // allow GET to continue
    getKeyProceedLatch.countDown();

    Object value = getFuture.get(15, TimeUnit.SECONDS);
    assertEquals("myValue", value);
  }
  public void testReplace() throws Exception {
    cache(0).put("myKey", "myValue");

    // add an interceptor on second node that will block REPLACE commands right after
    // EntryWrappingInterceptor until we are ready
    final CountDownLatch replaceStartedLatch = new CountDownLatch(1);
    final CountDownLatch replaceProceedLatch = new CountDownLatch(1);
    boolean isVersioningEnabled = cache(0).getCacheConfiguration().versioning().enabled();
    cacheConfigBuilder
        .customInterceptors()
        .addInterceptor()
        .after(
            isVersioningEnabled
                ? VersionedEntryWrappingInterceptor.class
                : EntryWrappingInterceptor.class)
        .interceptor(
            new CommandInterceptor() {
              @Override
              protected Object handleDefault(InvocationContext ctx, VisitableCommand cmd)
                  throws Throwable {
                if (cmd instanceof ReplaceCommand) {
                  // signal we encounter a REPLACE
                  replaceStartedLatch.countDown();
                  // wait until it is ok to continue with REPLACE
                  if (!replaceProceedLatch.await(15, TimeUnit.SECONDS)) {
                    throw new TimeoutException();
                  }
                }
                return super.handleDefault(ctx, cmd);
              }
            });

    // do not allow coordinator to send topology updates to node B
    final ClusterTopologyManager ctm0 =
        TestingUtil.extractGlobalComponent(manager(0), ClusterTopologyManager.class);
    ctm0.setRebalancingEnabled(false);

    log.info("Adding a new node ..");
    addClusterEnabledCacheManager(cacheConfigBuilder);
    log.info("Added a new node");

    // node B is not a member yet and rebalance has not started yet
    CacheTopology cacheTopology =
        advancedCache(1).getComponentRegistry().getStateTransferManager().getCacheTopology();
    assertNull(cacheTopology.getPendingCH());
    assertTrue(cacheTopology.getMembers().contains(address(0)));
    assertFalse(cacheTopology.getMembers().contains(address(1)));
    assertFalse(cacheTopology.getCurrentCH().getMembers().contains(address(1)));

    // no keys should be present on node B yet because state transfer is blocked
    assertTrue(cache(1).keySet().isEmpty());

    // initiate a REPLACE
    Future<Object> getFuture =
        fork(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                try {
                  return cache(1).replace("myKey", "newValue");
                } catch (Exception e) {
                  log.errorf(e, "REPLACE failed: %s", e.getMessage());
                  throw e;
                }
              }
            });

    // wait for REPLACE command on node B to reach beyond *EntryWrappingInterceptor, where it will
    // block.
    // the value seen so far is null
    if (!replaceStartedLatch.await(15, TimeUnit.SECONDS)) {
      throw new TimeoutException();
    }

    // paranoia, yes the value is still missing from data container
    assertTrue(cache(1).keySet().isEmpty());

    // allow rebalance to start
    ctm0.setRebalancingEnabled(true);

    // wait for state transfer to end
    TestingUtil.waitForRehashToComplete(cache(0), cache(1));

    // the state should be already transferred now
    assertEquals(1, cache(1).keySet().size());

    // allow REPLACE to continue
    replaceProceedLatch.countDown();

    Object oldVal = getFuture.get(15, TimeUnit.SECONDS);
    assertNotNull(oldVal);
    assertEquals("myValue", oldVal);

    assertEquals("newValue", cache(0).get("myKey"));
    assertEquals("newValue", cache(1).get("myKey"));
  }