@Test(groups = "unstable")
  public void testInvalidationDuringStateTransfer() throws Exception {
    cache(0).put("key1", "value1");

    CheckPoint checkPoint = new CheckPoint();
    blockJoinResponse(manager(0), checkPoint);

    addClusterEnabledCacheManager(dccc);
    Future<Object> joinFuture =
        fork(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                // The cache only joins here
                return cache(2);
              }
            });

    checkPoint.awaitStrict("sending_join_response", 10, SECONDS);

    // This will invoke an invalidation on the joiner
    NotifyingFuture<Object> putFuture = cache(0).putAsync("key2", "value2");
    try {
      putFuture.get(1, SECONDS);
      fail("Put operation should have been blocked, but it finished successfully");
    } catch (java.util.concurrent.TimeoutException e) {
      // expected
    }

    checkPoint.trigger("resume_join_response");
    putFuture.get(10, SECONDS);
  }
Exemplo n.º 2
0
 private void doBlock(InvocationContext ctx, ReplicableCommand command)
     throws InterruptedException, TimeoutException {
   log.tracef("Delaying command %s originating from %s", command, ctx.getOrigin());
   Integer myCount = counter.incrementAndGet();
   checkPoint.trigger("blocked_" + myCount + "_on_" + cache);
   checkPoint.awaitStrict("resume_" + myCount + "_on_" + cache, 15, SECONDS);
   log.tracef("Command unblocked: %s", command);
 }
 @Override
 public Object visitGetKeysInGroupCommand(InvocationContext ctx, GetKeysInGroupCommand command)
     throws Throwable {
   if (!open) {
     checkPoint.trigger("before");
     checkPoint.awaitStrict("after", 30, TimeUnit.SECONDS);
   }
   return invokeNextInterceptor(ctx, command);
 }
  @Test
  public void testPrimaryOwnerGoesDownAfterSendingEvent()
      throws InterruptedException, ExecutionException, TimeoutException {
    final Cache<Object, String> cache0 = cache(0, CACHE_NAME);
    Cache<Object, String> cache1 = cache(1, CACHE_NAME);
    Cache<Object, String> cache2 = cache(2, CACHE_NAME);

    ClusterListener clusterListener = listener();
    cache0.addListener(clusterListener);

    CheckPoint checkPoint = new CheckPoint();
    waitUntilNotificationRaised(cache1, checkPoint);
    checkPoint.triggerForever("pre_raise_notification_release");

    final MagicKey key = new MagicKey(cache1, cache2);
    Future<String> future =
        fork(
            new Callable<String>() {
              @Override
              public String call() throws Exception {
                return cache0.put(key, FIRST_VALUE);
              }
            });

    checkPoint.awaitStrict("post_raise_notification_invoked", 10, TimeUnit.SECONDS);

    // Kill the cache now - note this will automatically unblock the fork thread
    TestingUtil.killCacheManagers(cache1.getCacheManager());

    future.get(10, TimeUnit.SECONDS);

    // We should have received 2 events
    assertEquals(clusterListener.events.size(), 2);
    CacheEntryEvent<Object, String> event = clusterListener.events.get(0);
    assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED);
    CacheEntryCreatedEvent<Object, String> createEvent =
        (CacheEntryCreatedEvent<Object, String>) event;
    assertFalse(createEvent.isCommandRetried());
    assertEquals(createEvent.getKey(), key);
    assertEquals(createEvent.getValue(), FIRST_VALUE);

    event = clusterListener.events.get(1);
    // Since it was a retry but the backup got the write the event isn't a CREATE!!
    assertEquals(event.getType(), Event.Type.CACHE_ENTRY_MODIFIED);
    CacheEntryModifiedEvent<Object, String> modEvent =
        (CacheEntryModifiedEvent<Object, String>) event;
    assertTrue(modEvent.isCommandRetried());
    assertEquals(modEvent.getKey(), key);
    assertEquals(modEvent.getValue(), FIRST_VALUE);
  }
  /**
   * When L1 is enabled this test should not be ran when a previous value is present as it will
   * cause timeouts. Due to how locking works with L1 this cannot occur when the previous value
   * exists.
   *
   * @param op
   * @throws Exception
   */
  protected void doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(
      final TestWriteOperation op) throws Exception {
    if (l1Enabled() && op.getPreviousValue() != null) {
      fail("This test cannot be ran with L1 when a previous value is set");
    }
    // Test scenario:
    // cache0,1,2 are in the cluster, an owner leaves
    // Key k is in the cache, and is transferred to the non owner
    // A user operation also modifies key k causing an invalidation
    // on the non owner which is getting the state transfer
    final AdvancedCache<Object, Object> primaryOwnerCache = cache(0, cacheName).getAdvancedCache();
    final AdvancedCache<Object, Object> backupOwnerCache = cache(1, cacheName).getAdvancedCache();
    final AdvancedCache<Object, Object> nonOwnerCache = cache(2, cacheName).getAdvancedCache();

    final MagicKey key = new MagicKey(primaryOwnerCache, backupOwnerCache);

    // Prepare for replace/remove: put a previous value in cache0
    final Object previousValue = op.getPreviousValue();
    if (previousValue != null) {
      primaryOwnerCache.put(key, previousValue);
      assertEquals(previousValue, primaryOwnerCache.get(key));
      log.tracef("Previous value inserted: %s = %s", key, previousValue);

      assertEquals(previousValue, nonOwnerCache.get(key));

      if (l1Enabled()) {
        assertIsInL1(nonOwnerCache, key);
      }
    }

    int preJoinTopologyId =
        primaryOwnerCache
            .getComponentRegistry()
            .getStateTransferManager()
            .getCacheTopology()
            .getTopologyId();

    // Block any state response commands on cache0
    CheckPoint checkPoint = new CheckPoint();
    ControlledRpcManager blockingRpcManager0 = blockStateResponseCommand(primaryOwnerCache);

    // Block the rebalance confirmation on cache0
    blockRebalanceConfirmation(primaryOwnerCache.getCacheManager(), checkPoint);

    assertEquals(
        primaryOwnerCache.getCacheManager().getCoordinator(),
        primaryOwnerCache.getCacheManager().getAddress());

    // Remove the leaver
    log.trace("Stopping the cache");
    backupOwnerCache.getCacheManager().stop();

    int rebalanceTopologyId = preJoinTopologyId + 2;

    // Wait for the write CH to contain the joiner everywhere
    eventually(
        new Condition() {
          @Override
          public boolean isSatisfied() throws Exception {
            return primaryOwnerCache.getRpcManager().getMembers().size() == 2
                && nonOwnerCache.getRpcManager().getMembers().size() == 2;
          }
        });

    assertEquals(
        primaryOwnerCache.getCacheManager().getCoordinator(),
        primaryOwnerCache.getCacheManager().getAddress());

    // Wait for cache0 to collect the state to send to cache1 (including our previous value).
    blockingRpcManager0.waitForCommandToBlock();

    // Every PutKeyValueCommand will be blocked before committing the entry on cache1
    CyclicBarrier beforeCommitCache1Barrier = new CyclicBarrier(2);
    BlockingInterceptor blockingInterceptor1 =
        new BlockingInterceptor(beforeCommitCache1Barrier, op.getCommandClass(), true);
    nonOwnerCache.addInterceptorAfter(blockingInterceptor1, EntryWrappingInterceptor.class);

    // Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 will become a backup
    // owner for the retry
    // The put command will be blocked on cache1 just before committing the entry.
    Future<Object> future =
        fork(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                return op.perform(primaryOwnerCache, key);
              }
            });

    // Wait for the entry to be wrapped on cache1
    beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);

    // Remove the interceptor so we don't mess up any other state transfer puts
    removeAllBlockingInterceptorsFromCache(nonOwnerCache);

    // Allow the state to be applied on cache1 (writing the old value for our entry)
    blockingRpcManager0.stopBlocking();

    // Wait for second in line to finish applying the state, but don't allow the rebalance
    // confirmation to be processed.
    // (It would change the topology and it would trigger a retry for the command.)
    checkPoint.awaitStrict(
        "pre_rebalance_confirmation_"
            + rebalanceTopologyId
            + "_from_"
            + primaryOwnerCache.getCacheManager().getAddress(),
        10,
        SECONDS);

    // Now allow the command to commit on cache1
    beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);

    // Wait for the command to finish and check that it didn't fail
    Object result = future.get(10, TimeUnit.SECONDS);
    assertEquals(op.getReturnValue(), result);
    log.tracef("%s operation is done", op);

    // Allow the rebalance confirmation to proceed and wait for the topology to change everywhere
    checkPoint.trigger(
        "resume_rebalance_confirmation_"
            + rebalanceTopologyId
            + "_from_"
            + primaryOwnerCache.getCacheManager().getAddress());
    checkPoint.trigger(
        "resume_rebalance_confirmation_"
            + rebalanceTopologyId
            + "_from_"
            + nonOwnerCache.getCacheManager().getAddress());
    TestingUtil.waitForRehashToComplete(primaryOwnerCache, nonOwnerCache);

    switch (op) {
      case REMOVE:
      case REMOVE_EXACT:
        break;
      default:
        assertIsInContainerImmortal(primaryOwnerCache, key);
        assertIsInContainerImmortal(nonOwnerCache, key);
        break;
    }

    // Check the value to make sure data container contains correct value
    assertEquals(op.getValue(), primaryOwnerCache.get(key));
    assertEquals(op.getValue(), nonOwnerCache.get(key));
  }
  protected void doStateTransferInBetweenPrepareCommit(
      final TestWriteOperation op, final boolean additionalValueOnNonOwner) throws Exception {
    final String key = getClass().getName() + "-key";
    // Test scenario:
    // cache0,1,2 are in the cluster, an owner leaves
    // Key k is in the cache, and is transferred to the non owner
    // A user operation also modifies key k causing an invalidation
    // on the non owner which is getting the state transfer
    final AdvancedCache<Object, Object> primaryOwnerCache = getFirstOwner(key).getAdvancedCache();
    final AdvancedCache<Object, Object> backupOwnerCache = getOwners(key)[1].getAdvancedCache();
    final AdvancedCache<Object, Object> nonOwnerCache = getFirstNonOwner(key).getAdvancedCache();

    // Prepare for replace/remove: put a previous value in cache0
    final Object previousValue = op.getPreviousValue();
    if (previousValue != null) {
      primaryOwnerCache.put(key, previousValue);
      assertEquals(previousValue, primaryOwnerCache.get(key));
      log.tracef("Previous value inserted: %s = %s", key, previousValue);

      assertEquals(previousValue, nonOwnerCache.get(key));

      if (l1Enabled()) {
        assertIsInL1(nonOwnerCache, key);
      }
    }

    // Need to block after Prepare command was sent after it clears the StateTransferInterceptor
    final CyclicBarrier cyclicBarrier = new CyclicBarrier(2);

    try {
      TransactionManager tm = primaryOwnerCache.getTransactionManager();
      Future<Object> future =
          fork(
              runWithTx(
                  tm,
                  new Callable<Object>() {

                    @Override
                    public Object call() throws Exception {
                      if (additionalValueOnNonOwner) {
                        MagicKey mk = new MagicKey("placeholder", nonOwnerCache);
                        String value = "somevalue";
                        primaryOwnerCache.put(mk, value);
                        log.tracef(
                            "Adding additional value on nonOwner value inserted: %s = %s",
                            mk, value);
                      }
                      primaryOwnerCache
                          .getAdvancedCache()
                          .addInterceptorBefore(
                              new BlockingInterceptor(cyclicBarrier, getVisitableCommand(op), true),
                              StateTransferInterceptor.class);
                      return op.perform(primaryOwnerCache, key);
                    }
                  }));

      cyclicBarrier.await(10, SECONDS);

      // Block the rebalance confirmation on nonOwnerCache
      CheckPoint checkPoint = new CheckPoint();
      log.trace("Adding proxy to state transfer");
      waitUntilStateBeingTransferred(nonOwnerCache, checkPoint);

      backupOwnerCache.getCacheManager().stop();

      // Wait for non owner to just about get state
      checkPoint.awaitStrict("pre_state_apply_invoked_for_" + nonOwnerCache, 10, SECONDS);

      // let prepare complete and thus commit command invalidating on nonOwner
      cyclicBarrier.await(10, SECONDS);

      assertEquals(op.getReturnValue(), future.get(10, SECONDS));

      // let state transfer go
      checkPoint.trigger("pre_state_apply_release_for_" + nonOwnerCache);

      TestingUtil.waitForRehashToComplete(primaryOwnerCache, nonOwnerCache);

      switch (op) {
        case REMOVE:
        case REMOVE_EXACT:
          break;
        default:
          assertIsInContainerImmortal(primaryOwnerCache, key);
          assertIsInContainerImmortal(nonOwnerCache, key);
          break;
      }

      // Check the value to make sure data container contains correct value
      assertEquals(op.getValue(), primaryOwnerCache.get(key));
      assertEquals(op.getValue(), nonOwnerCache.get(key));
    } finally {
      removeAllBlockingInterceptorsFromCache(primaryOwnerCache);
    }
  }
Exemplo n.º 7
0
 public void unblock(int count)
     throws InterruptedException, TimeoutException, BrokenBarrierException {
   log.tracef("Unblocking command on cache %s", cache);
   checkPoint.awaitStrict("blocked_" + count + "_on_" + cache, 5, SECONDS);
   checkPoint.trigger("resume_" + count + "_on_" + cache);
 }
Exemplo n.º 8
0
 public void waitUntilBlocked(int count) throws TimeoutException, InterruptedException {
   String event = checkPoint.peek(5, SECONDS, "blocked_" + count + "_on_" + cache);
   assertEquals("blocked_" + count + "_on_" + cache, event);
 }
  protected void testIterationBeganAndSegmentNotComplete(
      final StateListener<String, String> listener,
      Operation operation,
      boolean shouldBePrimaryOwner)
      throws TimeoutException, InterruptedException, ExecutionException {
    final Map<String, String> expectedValues = new HashMap<String, String>(10);
    final Cache<String, String> cache = cache(0, CACHE_NAME);
    for (int i = 0; i < 10; i++) {
      String key = "key-" + i;
      String value = "value-" + i;
      expectedValues.put(key, value);
      cache.put(key, value);
    }

    String value;
    String keyToChange =
        findKeyBasedOnOwnership(
            expectedValues.keySet(),
            cache.getAdvancedCache().getDistributionManager().getConsistentHash(),
            shouldBePrimaryOwner,
            cache.getCacheManager().getAddress());

    switch (operation) {
      case CREATE:
        keyToChange = "new-key";
        value = "new-value";
        break;
      case PUT:
        value = cache.get(keyToChange) + "-changed";
        break;
      case REMOVE:
        value = null;
        break;
      default:
        throw new IllegalArgumentException("Unsupported Operation provided " + operation);
    }

    CheckPoint checkPoint = new CheckPoint();
    int segmentToUse =
        cache
            .getAdvancedCache()
            .getDistributionManager()
            .getConsistentHash()
            .getSegment(keyToChange);

    // do the operation, which should put it in the queue.
    ClusterCacheNotifier notifier = waitUntilClosingSegment(cache, segmentToUse, checkPoint);

    Future<Void> future =
        fork(
            new Callable<Void>() {

              @Override
              public Void call() throws Exception {
                cache.addListener(listener);
                return null;
              }
            });

    try {
      checkPoint.awaitStrict("pre_complete_segment_invoked", 10, TimeUnit.SECONDS);
      Object oldValue = operation.perform(cache, keyToChange, value);

      // Now let the iteration complete
      checkPoint.triggerForever("pre_complete_segment_released");

      future.get(10, TimeUnit.SECONDS);

      boolean isClustered = isClustered(listener);

      // We should have 1 or 2 (local) events due to the modification coming after we iterated on
      // it.  Note the value
      // isn't brought up until the iteration is done
      assertEquals(
          listener.events.size(),
          isClustered ? expectedValues.size() + 1 : (expectedValues.size() + 1) * 2);

      // If it is clustered, then the modify can occur in the middle.  In non clustered we have to
      // block all events
      // just in case of tx event listeners (ie. tx start/tx end would have to wrap all events) and
      // such so we can't
      // release them early.  The cluster listeners aren't affected by transaction since it those
      // are not currently
      // supported
      if (isClustered) {

        CacheEntryEvent event = null;
        boolean foundEarlierCreate = false;
        // We iterate backwards so we only have to do it once
        for (int i = listener.events.size() - 1; i >= 0; --i) {
          CacheEntryEvent currentEvent = listener.events.get(i);
          if (currentEvent.getKey().equals(keyToChange)
              && operation.getType() == currentEvent.getType()) {
            if (event == null) {
              event = currentEvent;
              // We can remove safely since we are doing backwards counter as well
              listener.events.remove(i);

              // If it is a create there is no previous create
              if (operation.getType() == Event.Type.CACHE_ENTRY_CREATED) {
                foundEarlierCreate = true;
                break;
              }
            } else {
              fail("There should only be a single event in the event queue!");
            }
          } else if (event != null
              && (foundEarlierCreate = event.getKey().equals(currentEvent.getKey()))) {
            break;
          }
        }
        // This should have been set
        assertTrue(
            foundEarlierCreate, "There was no matching create event for key " + event.getKey());

        assertEquals(event.getType(), operation.getType());
        assertEquals(event.isPre(), false);
        assertEquals(event.getValue(), value);
      }

      // Assert the first 10/20 since they should all be from iteration - this may not work since
      // segments complete earlier..
      boolean isPost = true;
      int position = 0;
      for (;
          position < (isClustered ? expectedValues.size() : expectedValues.size() * 2);
          ++position) {
        // Even checks means it will be post and have a value - note we force every check to be
        // even for clustered since those should always be post
        if (!isClustered) {
          isPost = !isPost;
        }

        CacheEntryEvent event = listener.events.get(position);

        assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED);
        assertTrue(expectedValues.containsKey(event.getKey()));
        assertEquals(event.isPre(), !isPost);
        if (isPost) {
          assertEquals(event.getValue(), expectedValues.get(event.getKey()));
        } else {
          assertNull(event.getValue());
        }
      }

      // We should have 2 extra events at the end which are our modifications
      if (!isClustered) {
        CacheEntryEvent<String, String> event = listener.events.get(position);
        assertEquals(event.getType(), operation.getType());
        assertEquals(event.isPre(), true);
        assertEquals(event.getKey(), keyToChange);
        assertEquals(event.getValue(), oldValue);

        event = listener.events.get(position + 1);
        assertEquals(event.getType(), operation.getType());
        assertEquals(event.isPre(), false);
        assertEquals(event.getKey(), keyToChange);
        assertEquals(event.getValue(), value);
      }
    } finally {
      TestingUtil.replaceComponent(cache, CacheNotifier.class, notifier, true);
      TestingUtil.replaceComponent(cache, ClusterCacheNotifier.class, notifier, true);
      cache.removeListener(listener);
    }
  }
  /** This test is to verify that the modification event is sent after the creation event is done */
  private void testModificationAfterIterationBeganAndCompletedSegmentValueOwner(
      final StateListener<String, String> listener,
      Operation operation,
      boolean shouldBePrimaryOwner)
      throws IOException, InterruptedException, TimeoutException, BrokenBarrierException,
          ExecutionException {
    final Map<String, String> expectedValues = new HashMap<String, String>(10);
    final Cache<String, String> cache = cache(0, CACHE_NAME);
    for (int i = 0; i < 10; i++) {
      String key = "key-" + i;
      String value = "value-" + i;
      expectedValues.put(key, value);
      cache.put(key, value);
    }

    CheckPoint checkPoint = new CheckPoint();

    InterceptorChain chain =
        mockStream(
            cache,
            (mock, real, additional) -> {
              doAnswer(
                      i -> {
                        // Wait for main thread to sync up
                        checkPoint.trigger("pre_close_iter_invoked");
                        // Now wait until main thread lets us through
                        checkPoint.awaitStrict("pre_close_iter_released", 10, TimeUnit.SECONDS);
                        return i.getMethod().invoke(real, i.getArguments());
                      })
                  .when(mock)
                  .close();

              doAnswer(i -> i.getMethod().invoke(real, i.getArguments())).when(mock).iterator();
            });

    try {
      Future<Void> future =
          fork(
              () -> {
                cache.addListener(listener);
                return null;
              });

      checkPoint.awaitStrict("pre_close_iter_invoked", 10, TimeUnit.SECONDS);

      String value;
      String keyToChange =
          findKeyBasedOnOwnership(
              expectedValues.keySet(),
              cache.getAdvancedCache().getDistributionManager().getConsistentHash(),
              shouldBePrimaryOwner,
              cache.getCacheManager().getAddress());

      switch (operation) {
        case CREATE:
          keyToChange = "new-key";
          value = "new-value";
          break;
        case PUT:
          value = cache.get(keyToChange) + "-changed";
          break;
        case REMOVE:
          value = null;
          break;
        default:
          throw new IllegalArgumentException("Unsupported Operation provided " + operation);
      }

      Object oldValue = operation.perform(cache, keyToChange, value);

      // Now let the iteration complete
      checkPoint.triggerForever("pre_close_iter_released");

      future.get(10, TimeUnit.SECONDS);

      boolean isClustered = isClustered(listener);

      // We should have 1 or 2 (local) events due to the modification coming after we iterated on
      // it.  Note the value
      // isn't brought up until the iteration is done
      assertEquals(
          listener.events.size(),
          isClustered ? expectedValues.size() + 1 : (expectedValues.size() + 1) * 2);

      // Assert the first 10/20 since they should all be from iteration - this may not work since
      // segments complete earlier..
      boolean isPost = true;
      int position = 0;
      for (;
          position < (isClustered ? expectedValues.size() : expectedValues.size() * 2);
          ++position) {
        // Even checks means it will be post and have a value - note we force every check to be
        // even for clustered since those should always be post
        if (!isClustered) {
          isPost = !isPost;
        }

        CacheEntryEvent event = listener.events.get(position);

        assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED);
        assertTrue(expectedValues.containsKey(event.getKey()));
        assertEquals(event.isPre(), !isPost);
        if (isPost) {
          assertEquals(event.getValue(), expectedValues.get(event.getKey()));
        } else {
          assertNull(event.getValue());
        }
      }

      // We should have 2 extra events at the end which are our modifications
      if (isClustered) {
        CacheEntryEvent<String, String> event = listener.events.get(position);
        assertEquals(event.getType(), operation.getType());
        assertEquals(event.isPre(), false);
        assertEquals(event.getKey(), keyToChange);
        assertEquals(event.getValue(), value);
      } else {
        CacheEntryEvent<String, String> event = listener.events.get(position);
        assertEquals(event.getType(), operation.getType());
        assertEquals(event.isPre(), true);
        assertEquals(event.getKey(), keyToChange);
        assertEquals(event.getValue(), oldValue);

        event = listener.events.get(position + 1);
        assertEquals(event.getType(), operation.getType());
        assertEquals(event.isPre(), false);
        assertEquals(event.getKey(), keyToChange);
        assertEquals(event.getValue(), value);
      }
    } finally {
      TestingUtil.replaceComponent(cache, InterceptorChain.class, chain, true);
      cache.removeListener(listener);
    }
  }
  /** This test is to verify that the modification event replaces the current value for the key */
  private void testModificationAfterIterationBeganButNotIteratedValueYet(
      final StateListener<String, String> listener,
      Operation operation,
      boolean shouldBePrimaryOwner)
      throws InterruptedException, TimeoutException, BrokenBarrierException, ExecutionException {
    final Map<String, String> expectedValues = new HashMap<>(10);
    final Cache<String, String> cache = cache(0, CACHE_NAME);
    for (int i = 0; i < 10; i++) {
      String key = "key-" + i;
      String value = "value-" + i;
      expectedValues.put(key, value);
      cache.put(key, value);
    }

    final CheckPoint checkPoint = new CheckPoint();

    InterceptorChain chain =
        mockStream(
            cache,
            (mock, real, additional) ->
                doAnswer(
                        i -> {
                          // Wait for main thread to sync up
                          checkPoint.trigger("pre_retrieve_entry_invoked");
                          // Now wait until main thread lets us through
                          checkPoint.awaitStrict(
                              "pre_retrieve_entry_released", 10, TimeUnit.SECONDS);
                          return i.getMethod().invoke(real, i.getArguments());
                        })
                    .when(mock)
                    .iterator());
    try {
      Future<Void> future =
          fork(
              () -> {
                cache.addListener(listener);
                return null;
              });

      checkPoint.awaitStrict("pre_retrieve_entry_invoked", 10000, TimeUnit.SECONDS);

      String value;
      String keyToChange =
          findKeyBasedOnOwnership(
              expectedValues.keySet(),
              cache.getAdvancedCache().getDistributionManager().getConsistentHash(),
              shouldBePrimaryOwner,
              cache.getCacheManager().getAddress());

      switch (operation) {
        case CREATE:
          keyToChange = "new-key";
          value = "new-value";
          expectedValues.put(keyToChange, value);
          break;
        case PUT:
          value = cache.get(keyToChange) + "-changed";
          // Now remove the old value and put in the new one
          expectedValues.put(keyToChange, value);
          break;
        case REMOVE:
          value = null;
          expectedValues.remove(keyToChange);
          break;
        default:
          throw new IllegalArgumentException("Unsupported Operation provided " + operation);
      }

      operation.perform(cache, keyToChange, value);

      // Now let the iteration complete
      checkPoint.triggerForever("pre_retrieve_entry_released");

      future.get(10, TimeUnit.SECONDS);

      verifyEvents(isClustered(listener), listener, expectedValues);
    } finally {
      TestingUtil.replaceComponent(cache, InterceptorChain.class, chain, true);
      cache.removeListener(listener);
    }
  }
 public final void unblockCommandAndOpen() {
   open = true;
   checkPoint.trigger("after");
 }
 public final void unblockCommand() {
   checkPoint.trigger("after");
 }
 public final void awaitCommandBlock() throws TimeoutException, InterruptedException {
   checkPoint.awaitStrict("before", 30, TimeUnit.SECONDS);
 }