Exemplo n.º 1
0
  @Test
  public void testFutureUpdateExpiration() throws Exception {
    CyclicBarrier loadBarrier = new CyclicBarrier(2);
    CountDownLatch flushLatch = new CountDownLatch(2);
    CountDownLatch commitLatch = new CountDownLatch(1);

    Future<Boolean> first = updateFlushWait(itemId, loadBarrier, null, flushLatch, commitLatch);
    Future<Boolean> second = updateFlushWait(itemId, loadBarrier, null, flushLatch, commitLatch);
    awaitOrThrow(flushLatch);

    Map contents = Caches.entrySet(entityCache).toMap();
    assertEquals(1, contents.size());
    assertEquals(FutureUpdate.class, contents.get(itemId).getClass());
    commitLatch.countDown();
    first.get(WAIT_TIMEOUT, TimeUnit.SECONDS);
    second.get(WAIT_TIMEOUT, TimeUnit.SECONDS);

    // since we had two concurrent updates, the result should be invalid
    contents = Caches.entrySet(entityCache).toMap();
    assertEquals(1, contents.size());
    Object value = contents.get(itemId);
    if (value instanceof FutureUpdate) {
      // DB did not blocked two concurrent updates
      TIME_SERVICE.advance(timeout + 1);
      assertNull(entityCache.get(itemId));
      contents = Caches.entrySet(entityCache).toMap();
      assertEquals(Collections.EMPTY_MAP, contents);
    } else {
      // DB left only one update to proceed, and the entry should not be expired
      assertNotNull(value);
      assertEquals(StandardCacheEntryImpl.class, value.getClass());
      TIME_SERVICE.advance(timeout + 1);
      assertEquals(value, entityCache.get(itemId));
    }
  }
Exemplo n.º 2
0
  @Test
  public void testRemoveUpdateExpiration() throws Exception {
    CyclicBarrier loadBarrier = new CyclicBarrier(2);
    CountDownLatch preFlushLatch = new CountDownLatch(1);
    CountDownLatch flushLatch = new CountDownLatch(1);
    CountDownLatch commitLatch = new CountDownLatch(1);

    Future<Boolean> first = removeFlushWait(itemId, loadBarrier, null, flushLatch, commitLatch);
    Future<Boolean> second = updateFlushWait(itemId, loadBarrier, preFlushLatch, null, commitLatch);
    awaitOrThrow(flushLatch);

    Map contents = Caches.entrySet(entityCache).toMap();
    assertEquals(1, contents.size());
    assertEquals(Tombstone.class, contents.get(itemId).getClass());

    preFlushLatch.countDown();
    commitLatch.countDown();
    first.get(WAIT_TIMEOUT, TimeUnit.SECONDS);
    second.get(WAIT_TIMEOUT, TimeUnit.SECONDS);

    contents = Caches.entrySet(entityCache).toMap();
    assertEquals(1, contents.size());
    assertEquals(Tombstone.class, contents.get(itemId).getClass());

    TIME_SERVICE.advance(timeout + 1);
    assertNull(entityCache.get(itemId)); // force expiration
    contents = Caches.entrySet(entityCache).toMap();
    assertEquals(Collections.EMPTY_MAP, contents);
  }
Exemplo n.º 3
0
  @Test
  public void testEvictUpdateExpiration() throws Exception {
    CyclicBarrier loadBarrier = new CyclicBarrier(2);
    CountDownLatch preFlushLatch = new CountDownLatch(1);
    CountDownLatch postEvictLatch = new CountDownLatch(1);
    CountDownLatch flushLatch = new CountDownLatch(1);
    CountDownLatch commitLatch = new CountDownLatch(1);

    Future<Boolean> first = evictWait(itemId, loadBarrier, null, postEvictLatch);
    Future<Boolean> second =
        updateFlushWait(itemId, loadBarrier, preFlushLatch, flushLatch, commitLatch);
    awaitOrThrow(postEvictLatch);

    Map contents = Caches.entrySet(entityCache).toMap();
    assertEquals(Collections.EMPTY_MAP, contents);
    assertNull(contents.get(itemId));

    preFlushLatch.countDown();
    awaitOrThrow(flushLatch);
    contents = Caches.entrySet(entityCache).toMap();
    assertEquals(1, contents.size());
    assertEquals(FutureUpdate.class, contents.get(itemId).getClass());

    commitLatch.countDown();
    first.get(WAIT_TIMEOUT, TimeUnit.SECONDS);
    second.get(WAIT_TIMEOUT, TimeUnit.SECONDS);

    contents = Caches.entrySet(entityCache).toMap();
    assertEquals(1, contents.size());
    Object value = contents.get(itemId);
    assertNotNull(value);
    assertEquals(StandardCacheEntryImpl.class, value.getClass());
    TIME_SERVICE.advance(timeout + 1);
    assertEquals(value, entityCache.get(itemId));
  }
Exemplo n.º 4
0
  public void testWriteDuringPartition() throws Exception {
    DISCARD[] discards = new DISCARD[NUM_NODES];
    for (int i = 0; i < NUM_NODES; i++) {
      discards[i] = TestingUtil.getDiscardForCache(cache(i));
    }

    final List<Future<Object>> futures = new ArrayList<>(NUM_NODES);
    final ConcurrentMap<String, Integer> insertedKeys = CollectionFactory.makeConcurrentMap();
    final AtomicBoolean stop = new AtomicBoolean(false);
    for (int i = 0; i < NUM_NODES; i++) {
      final int cacheIndex = i;
      Future<Object> future =
          fork(
              new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                  Cache<String, Integer> cache = cache(cacheIndex);
                  int count = 0;
                  while (!stop.get()) {
                    String key = "key" + cacheIndex + "_" + count;
                    try {
                      cache.put(key, count);
                      insertedKeys.put(key, count);
                    } catch (AvailabilityException e) {
                      // expected, ignore
                    } catch (CacheException e) {
                      if (e.getCause() instanceof XAException
                          && e.getCause().getCause() instanceof AvailabilityException) {
                        // expected, ignore
                      }
                    }
                    count++;
                    Thread.sleep(0);
                  }
                  return count;
                }
              });
      futures.add(future);
    }

    long startTime = TIME_SERVICE.time();
    int splitIndex = 0;
    while (splitIndex < NUM_NODES) {
      List<Address> partitionOne = new ArrayList<Address>(NUM_NODES);
      List<Address> partitionTwo = new ArrayList<Address>(NUM_NODES);
      List<EmbeddedCacheManager> partitionOneManagers = new ArrayList<>();
      List<EmbeddedCacheManager> partitionTwoManagers = new ArrayList<>();
      for (int i = 0; i < NUM_NODES; i++) {
        if ((i + splitIndex) % NUM_NODES < NUM_NODES / 2) {
          partitionTwo.add(address(i));
          partitionTwoManagers.add(manager(i));
        } else {
          partitionOne.add(address(i));
          partitionOneManagers.add(manager(i));
        }
      }
      assertEquals(NUM_NODES / 2, partitionTwo.size());
      log.infof(
          "Cache is available, splitting cluster at index %d. First partition is %s, second partition is %s",
          splitIndex, partitionOne, partitionTwo);

      for (int i = 0; i < NUM_NODES; i++) {
        if (partitionOne.contains(address(i))) {
          for (Address a : partitionTwo) {
            discards[i].addIgnoreMember(((JGroupsAddress) a).getJGroupsAddress());
          }
        } else {
          for (Address a : partitionOne) {
            discards[i].addIgnoreMember(((JGroupsAddress) a).getJGroupsAddress());
          }
        }
      }
      TestingUtil.blockForMemberToFail(30000, partitionOneManagers.toArray(new CacheContainer[0]));
      TestingUtil.blockForMemberToFail(30000, partitionTwoManagers.toArray(new CacheContainer[0]));

      log.infof("Nodes split, waiting for the caches to become degraded");
      eventually(
          new Condition() {
            @Override
            public boolean isSatisfied() throws Exception {
              return TestingUtil.extractComponent(cache(0), PartitionHandlingManager.class)
                      .getAvailabilityMode()
                  == AvailabilityMode.DEGRADED_MODE;
            }
          });

      assertFuturesRunning(futures);

      log.infof("Cache is degraded, merging partitions %s and %s", partitionOne, partitionTwo);
      for (int i = 0; i < NUM_NODES; i++) {
        discards[i].resetIgnoredMembers();
      }
      TestingUtil.blockUntilViewsReceived(
          60000, true, cacheManagers.toArray(new CacheContainer[0]));

      log.infof("Partitions merged, waiting for the caches to become available");
      eventually(
          new Condition() {
            @Override
            public boolean isSatisfied() throws Exception {
              return TestingUtil.extractComponent(cache(0), PartitionHandlingManager.class)
                      .getAvailabilityMode()
                  == AvailabilityMode.AVAILABLE;
            }
          });
      TestingUtil.waitForRehashToComplete(caches());

      assertFuturesRunning(futures);
      splitIndex++;
    }

    stop.set(true);
    for (Future<Object> future : futures) {
      future.get(10, TimeUnit.SECONDS);
    }

    for (String key : insertedKeys.keySet()) {
      for (int i = 0; i < NUM_NODES; i++) {
        assertEquals(
            "Failure for key " + key + " on " + cache(i), insertedKeys.get(key), cache(i).get(key));
      }
    }

    long duration = TIME_SERVICE.timeDuration(startTime, TimeUnit.SECONDS);
    log.infof("Test finished in %d seconds", duration);
  }
  private void testLockMigration(int nodeThatPuts, boolean commit) throws Exception {
    Map<Object, Transaction> key2Tx = new HashMap<Object, Transaction>();
    for (int i = 0; i < NUM_KEYS; i++) {
      Object key = getKeyForCache(0);
      if (key2Tx.containsKey(key)) continue;

      // put a key to have some data in cache
      cache(nodeThatPuts).put(key, key);

      // start a TX that locks the key and then we suspend it
      tm(nodeThatPuts).begin();
      Transaction tx = tm(nodeThatPuts).getTransaction();
      advancedCache(nodeThatPuts).lock(key);
      tm(nodeThatPuts).suspend();
      key2Tx.put(key, tx);

      assertLocked(0, key);
    }

    log.trace("Lock transfer happens here");

    // add a third node hoping that some of the previously created keys will be migrated to it
    addClusterEnabledCacheManager(dccc);
    waitForClusterToForm();

    // search for a key that was migrated to third node and the suspended TX that locked it
    Object migratedKey = null;
    Transaction migratedTransaction = null;
    ConsistentHash consistentHash = advancedCache(2).getDistributionManager().getConsistentHash();
    for (Object key : key2Tx.keySet()) {
      if (consistentHash.locatePrimaryOwner(key).equals(address(2))) {
        migratedKey = key;
        migratedTransaction = key2Tx.get(key);
        log.trace("Migrated key = " + migratedKey);
        log.trace(
            "Migrated transaction = "
                + ((DummyTransaction) migratedTransaction).getEnlistedResources());
        break;
      }
    }

    // we do not focus on the other transactions so we commit them now
    log.trace("Committing all transactions except the migrated one.");
    for (Object key : key2Tx.keySet()) {
      if (!key.equals(migratedKey)) {
        Transaction tx = key2Tx.get(key);
        tm(nodeThatPuts).resume(tx);
        tm(nodeThatPuts).commit();
      }
    }

    if (migratedKey == null) {
      // this could happen in extreme cases
      log.trace("No key migrated to new owner - test cannot be performed!");
    } else {
      // the migrated TX is resumed and committed or rolled back. we expect the migrated key to be
      // unlocked now
      tm(nodeThatPuts).resume(migratedTransaction);
      if (commit) {
        tm(nodeThatPuts).commit();
      } else {
        tm(nodeThatPuts).rollback();
      }

      // there should not be any locks
      assertNotLocked(cache(0), migratedKey);
      assertNotLocked(cache(1), migratedKey);
      assertNotLocked(cache(2), migratedKey);

      // if a new TX tries to write to the migrated key this should not fail, the key should not be
      // locked
      tm(nodeThatPuts).begin();
      cache(nodeThatPuts)
          .put(
              migratedKey,
              "someValue"); // this should not result in TimeoutException due to key still locked
      tm(nodeThatPuts).commit();
    }

    log.trace("Checking the values from caches...");
    for (Object key : key2Tx.keySet()) {
      log.tracef("Checking key: %s", key);
      Object expectedValue = key;
      if (key.equals(migratedKey)) {
        expectedValue = "someValue";
      }
      // check them directly in data container
      InternalCacheEntry d0 = advancedCache(0).getDataContainer().get(key);
      InternalCacheEntry d1 = advancedCache(1).getDataContainer().get(key);
      InternalCacheEntry d2 = advancedCache(2).getDataContainer().get(key);
      int c = 0;
      if (d0 != null && !d0.isExpired(TIME_SERVICE.wallClockTime())) {
        assertEquals(expectedValue, d0.getValue());
        c++;
      }
      if (d1 != null && !d1.isExpired(TIME_SERVICE.wallClockTime())) {
        assertEquals(expectedValue, d1.getValue());
        c++;
      }
      if (d2 != null && !d2.isExpired(TIME_SERVICE.wallClockTime())) {
        assertEquals(expectedValue, d2.getValue());
        c++;
      }
      assertEquals(1, c);

      // look at them also via cache API
      assertEquals(expectedValue, cache(0).get(key));
      assertEquals(expectedValue, cache(1).get(key));
      assertEquals(expectedValue, cache(2).get(key));
    }
  }