コード例 #1
0
  public void testAtomicMapPutDuringJoin() throws ExecutionException, InterruptedException {
    Cache cache = cache(0, "atomic");
    ControlledRpcManager crm = new ControlledRpcManager(cache.getAdvancedCache().getRpcManager());
    TestingUtil.replaceComponent(cache, RpcManager.class, crm, true);

    MagicKey atomicMapKey = new MagicKey("atomicMapKey", cache);
    AtomicMap atomicMap = AtomicMapLookup.getAtomicMap(cache, atomicMapKey);
    atomicMap.put("key1", "value1");

    crm.blockBefore(StateResponseCommand.class);

    ConfigurationBuilder c = getConfigurationBuilder();
    final EmbeddedCacheManager joiner = addClusterEnabledCacheManager(c);
    Future<Cache> future =
        fork(
            new Callable<Cache>() {
              @Override
              public Cache call() throws Exception {
                return joiner.getCache("atomic");
              }
            });

    crm.waitForCommandToBlock();

    // Now we know state transfer will try to create an AtomicMap(key1=value1) on cache2
    // Insert another key in the atomic map, and check that cache2 has both keys after the state
    // transfer
    atomicMap.put("key2", "value2");

    crm.stopBlocking();
    Cache cache2 = future.get();

    AtomicMap atomicMap2 = AtomicMapLookup.getAtomicMap(cache2, atomicMapKey);
    assertEquals(new HashSet<String>(Arrays.asList("key1", "key2")), atomicMap2.keySet());
  }
コード例 #2
0
 private ControlledRpcManager blockStateResponseCommand(final Cache cache)
     throws InterruptedException {
   RpcManager rpcManager = TestingUtil.extractComponent(cache, RpcManager.class);
   ControlledRpcManager controlledRpcManager = new ControlledRpcManager(rpcManager);
   controlledRpcManager.blockBefore(StateResponseCommand.class);
   TestingUtil.replaceComponent(cache, RpcManager.class, controlledRpcManager, true);
   return controlledRpcManager;
 }
コード例 #3
0
  public void testPrimaryOwnerGoesDownBeforeBackupRaisesEvent()
      throws InterruptedException, TimeoutException, ExecutionException, BrokenBarrierException {
    final Cache<Object, String> cache0 = cache(0, CACHE_NAME);
    Cache<Object, String> cache1 = cache(1, CACHE_NAME);
    Cache<Object, String> cache2 = cache(2, CACHE_NAME);

    ClusterListener clusterListener = new ClusterListener();
    cache0.addListener(clusterListener);

    // Now we want to block the outgoing put to the backup owner
    RpcManager rpcManager = TestingUtil.extractComponent(cache1, RpcManager.class);
    ControlledRpcManager controlledRpcManager = new ControlledRpcManager(rpcManager);
    controlledRpcManager.blockBefore(PutKeyValueCommand.class);
    TestingUtil.replaceComponent(cache1, RpcManager.class, controlledRpcManager, true);

    final MagicKey key = new MagicKey(cache1, cache2);
    Future<String> future =
        fork(
            new Callable<String>() {
              @Override
              public String call() throws Exception {
                return cache0.put(key, FIRST_VALUE);
              }
            });

    // Wait until the primary owner has sent the put command successfully to  backup
    controlledRpcManager.waitForCommandToBlock(10, TimeUnit.SECONDS);

    // Kill the cache now
    TestingUtil.killCacheManagers(cache1.getCacheManager());

    // This should return null normally, but since it was retried it returns it's own value :(
    // Maybe some day this can work properly
    assertEquals(null, future.get(10, TimeUnit.SECONDS));

    // We should have received an event that was marked as retried
    assertEquals(clusterListener.events.size(), 1);
    CacheEntryEvent<Object, String> event = clusterListener.events.get(0);
    // Since it was a retry but the backup got the write the event isn't a CREATE!!
    assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED);
    CacheEntryCreatedEvent<Object, String> modEvent =
        (CacheEntryCreatedEvent<Object, String>) event;
    assertTrue(modEvent.isCommandRetried());
    assertEquals(modEvent.getKey(), key);
    assertEquals(modEvent.getValue(), FIRST_VALUE);
  }
コード例 #4
0
  /**
   * When L1 is enabled this test should not be ran when a previous value is present as it will
   * cause timeouts. Due to how locking works with L1 this cannot occur when the previous value
   * exists.
   *
   * @param op
   * @throws Exception
   */
  protected void doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(
      final TestWriteOperation op) throws Exception {
    if (l1Enabled() && op.getPreviousValue() != null) {
      fail("This test cannot be ran with L1 when a previous value is set");
    }
    // Test scenario:
    // cache0,1,2 are in the cluster, an owner leaves
    // Key k is in the cache, and is transferred to the non owner
    // A user operation also modifies key k causing an invalidation
    // on the non owner which is getting the state transfer
    final AdvancedCache<Object, Object> primaryOwnerCache = cache(0, cacheName).getAdvancedCache();
    final AdvancedCache<Object, Object> backupOwnerCache = cache(1, cacheName).getAdvancedCache();
    final AdvancedCache<Object, Object> nonOwnerCache = cache(2, cacheName).getAdvancedCache();

    final MagicKey key = new MagicKey(primaryOwnerCache, backupOwnerCache);

    // Prepare for replace/remove: put a previous value in cache0
    final Object previousValue = op.getPreviousValue();
    if (previousValue != null) {
      primaryOwnerCache.put(key, previousValue);
      assertEquals(previousValue, primaryOwnerCache.get(key));
      log.tracef("Previous value inserted: %s = %s", key, previousValue);

      assertEquals(previousValue, nonOwnerCache.get(key));

      if (l1Enabled()) {
        assertIsInL1(nonOwnerCache, key);
      }
    }

    int preJoinTopologyId =
        primaryOwnerCache
            .getComponentRegistry()
            .getStateTransferManager()
            .getCacheTopology()
            .getTopologyId();

    // Block any state response commands on cache0
    CheckPoint checkPoint = new CheckPoint();
    ControlledRpcManager blockingRpcManager0 = blockStateResponseCommand(primaryOwnerCache);

    // Block the rebalance confirmation on cache0
    blockRebalanceConfirmation(primaryOwnerCache.getCacheManager(), checkPoint);

    assertEquals(
        primaryOwnerCache.getCacheManager().getCoordinator(),
        primaryOwnerCache.getCacheManager().getAddress());

    // Remove the leaver
    log.trace("Stopping the cache");
    backupOwnerCache.getCacheManager().stop();

    int rebalanceTopologyId = preJoinTopologyId + 2;

    // Wait for the write CH to contain the joiner everywhere
    eventually(
        new Condition() {
          @Override
          public boolean isSatisfied() throws Exception {
            return primaryOwnerCache.getRpcManager().getMembers().size() == 2
                && nonOwnerCache.getRpcManager().getMembers().size() == 2;
          }
        });

    assertEquals(
        primaryOwnerCache.getCacheManager().getCoordinator(),
        primaryOwnerCache.getCacheManager().getAddress());

    // Wait for cache0 to collect the state to send to cache1 (including our previous value).
    blockingRpcManager0.waitForCommandToBlock();

    // Every PutKeyValueCommand will be blocked before committing the entry on cache1
    CyclicBarrier beforeCommitCache1Barrier = new CyclicBarrier(2);
    BlockingInterceptor blockingInterceptor1 =
        new BlockingInterceptor(beforeCommitCache1Barrier, op.getCommandClass(), true);
    nonOwnerCache.addInterceptorAfter(blockingInterceptor1, EntryWrappingInterceptor.class);

    // Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 will become a backup
    // owner for the retry
    // The put command will be blocked on cache1 just before committing the entry.
    Future<Object> future =
        fork(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                return op.perform(primaryOwnerCache, key);
              }
            });

    // Wait for the entry to be wrapped on cache1
    beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);

    // Remove the interceptor so we don't mess up any other state transfer puts
    removeAllBlockingInterceptorsFromCache(nonOwnerCache);

    // Allow the state to be applied on cache1 (writing the old value for our entry)
    blockingRpcManager0.stopBlocking();

    // Wait for second in line to finish applying the state, but don't allow the rebalance
    // confirmation to be processed.
    // (It would change the topology and it would trigger a retry for the command.)
    checkPoint.awaitStrict(
        "pre_rebalance_confirmation_"
            + rebalanceTopologyId
            + "_from_"
            + primaryOwnerCache.getCacheManager().getAddress(),
        10,
        SECONDS);

    // Now allow the command to commit on cache1
    beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);

    // Wait for the command to finish and check that it didn't fail
    Object result = future.get(10, TimeUnit.SECONDS);
    assertEquals(op.getReturnValue(), result);
    log.tracef("%s operation is done", op);

    // Allow the rebalance confirmation to proceed and wait for the topology to change everywhere
    checkPoint.trigger(
        "resume_rebalance_confirmation_"
            + rebalanceTopologyId
            + "_from_"
            + primaryOwnerCache.getCacheManager().getAddress());
    checkPoint.trigger(
        "resume_rebalance_confirmation_"
            + rebalanceTopologyId
            + "_from_"
            + nonOwnerCache.getCacheManager().getAddress());
    TestingUtil.waitForRehashToComplete(primaryOwnerCache, nonOwnerCache);

    switch (op) {
      case REMOVE:
      case REMOVE_EXACT:
        break;
      default:
        assertIsInContainerImmortal(primaryOwnerCache, key);
        assertIsInContainerImmortal(nonOwnerCache, key);
        break;
    }

    // Check the value to make sure data container contains correct value
    assertEquals(op.getValue(), primaryOwnerCache.get(key));
    assertEquals(op.getValue(), nonOwnerCache.get(key));
  }