private void verifyEvents( boolean isClustered, StateListener<String, String> listener, Map<String, String> expected) { assertEquals(listener.events.size(), isClustered ? expected.size() : expected.size() * 2); boolean isPost = true; for (CacheEntryEvent<String, String> event : listener.events) { // Even checks means it will be post and have a value - note we force every check to be // even for clustered since those should always be post if (!isClustered) { isPost = !isPost; } assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED); assertTrue(expected.containsKey(event.getKey())); assertEquals(event.isPre(), !isPost); if (isPost) { assertEquals(event.getValue(), expected.get(event.getKey())); } else { assertNull(event.getValue()); } } }
protected void testIterationBeganAndSegmentNotComplete( final StateListener<String, String> listener, Operation operation, boolean shouldBePrimaryOwner) throws TimeoutException, InterruptedException, ExecutionException { final Map<String, String> expectedValues = new HashMap<String, String>(10); final Cache<String, String> cache = cache(0, CACHE_NAME); for (int i = 0; i < 10; i++) { String key = "key-" + i; String value = "value-" + i; expectedValues.put(key, value); cache.put(key, value); } String value; String keyToChange = findKeyBasedOnOwnership( expectedValues.keySet(), cache.getAdvancedCache().getDistributionManager().getConsistentHash(), shouldBePrimaryOwner, cache.getCacheManager().getAddress()); switch (operation) { case CREATE: keyToChange = "new-key"; value = "new-value"; break; case PUT: value = cache.get(keyToChange) + "-changed"; break; case REMOVE: value = null; break; default: throw new IllegalArgumentException("Unsupported Operation provided " + operation); } CheckPoint checkPoint = new CheckPoint(); int segmentToUse = cache .getAdvancedCache() .getDistributionManager() .getConsistentHash() .getSegment(keyToChange); // do the operation, which should put it in the queue. ClusterCacheNotifier notifier = waitUntilClosingSegment(cache, segmentToUse, checkPoint); Future<Void> future = fork( new Callable<Void>() { @Override public Void call() throws Exception { cache.addListener(listener); return null; } }); try { checkPoint.awaitStrict("pre_complete_segment_invoked", 10, TimeUnit.SECONDS); Object oldValue = operation.perform(cache, keyToChange, value); // Now let the iteration complete checkPoint.triggerForever("pre_complete_segment_released"); future.get(10, TimeUnit.SECONDS); boolean isClustered = isClustered(listener); // We should have 1 or 2 (local) events due to the modification coming after we iterated on // it. Note the value // isn't brought up until the iteration is done assertEquals( listener.events.size(), isClustered ? expectedValues.size() + 1 : (expectedValues.size() + 1) * 2); // If it is clustered, then the modify can occur in the middle. In non clustered we have to // block all events // just in case of tx event listeners (ie. tx start/tx end would have to wrap all events) and // such so we can't // release them early. The cluster listeners aren't affected by transaction since it those // are not currently // supported if (isClustered) { CacheEntryEvent event = null; boolean foundEarlierCreate = false; // We iterate backwards so we only have to do it once for (int i = listener.events.size() - 1; i >= 0; --i) { CacheEntryEvent currentEvent = listener.events.get(i); if (currentEvent.getKey().equals(keyToChange) && operation.getType() == currentEvent.getType()) { if (event == null) { event = currentEvent; // We can remove safely since we are doing backwards counter as well listener.events.remove(i); // If it is a create there is no previous create if (operation.getType() == Event.Type.CACHE_ENTRY_CREATED) { foundEarlierCreate = true; break; } } else { fail("There should only be a single event in the event queue!"); } } else if (event != null && (foundEarlierCreate = event.getKey().equals(currentEvent.getKey()))) { break; } } // This should have been set assertTrue( foundEarlierCreate, "There was no matching create event for key " + event.getKey()); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), false); assertEquals(event.getValue(), value); } // Assert the first 10/20 since they should all be from iteration - this may not work since // segments complete earlier.. boolean isPost = true; int position = 0; for (; position < (isClustered ? expectedValues.size() : expectedValues.size() * 2); ++position) { // Even checks means it will be post and have a value - note we force every check to be // even for clustered since those should always be post if (!isClustered) { isPost = !isPost; } CacheEntryEvent event = listener.events.get(position); assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED); assertTrue(expectedValues.containsKey(event.getKey())); assertEquals(event.isPre(), !isPost); if (isPost) { assertEquals(event.getValue(), expectedValues.get(event.getKey())); } else { assertNull(event.getValue()); } } // We should have 2 extra events at the end which are our modifications if (!isClustered) { CacheEntryEvent<String, String> event = listener.events.get(position); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), true); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), oldValue); event = listener.events.get(position + 1); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), false); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), value); } } finally { TestingUtil.replaceComponent(cache, CacheNotifier.class, notifier, true); TestingUtil.replaceComponent(cache, ClusterCacheNotifier.class, notifier, true); cache.removeListener(listener); } }
/** This test is to verify that the modification event is sent after the creation event is done */ private void testModificationAfterIterationBeganAndCompletedSegmentValueOwner( final StateListener<String, String> listener, Operation operation, boolean shouldBePrimaryOwner) throws IOException, InterruptedException, TimeoutException, BrokenBarrierException, ExecutionException { final Map<String, String> expectedValues = new HashMap<String, String>(10); final Cache<String, String> cache = cache(0, CACHE_NAME); for (int i = 0; i < 10; i++) { String key = "key-" + i; String value = "value-" + i; expectedValues.put(key, value); cache.put(key, value); } CheckPoint checkPoint = new CheckPoint(); InterceptorChain chain = mockStream( cache, (mock, real, additional) -> { doAnswer( i -> { // Wait for main thread to sync up checkPoint.trigger("pre_close_iter_invoked"); // Now wait until main thread lets us through checkPoint.awaitStrict("pre_close_iter_released", 10, TimeUnit.SECONDS); return i.getMethod().invoke(real, i.getArguments()); }) .when(mock) .close(); doAnswer(i -> i.getMethod().invoke(real, i.getArguments())).when(mock).iterator(); }); try { Future<Void> future = fork( () -> { cache.addListener(listener); return null; }); checkPoint.awaitStrict("pre_close_iter_invoked", 10, TimeUnit.SECONDS); String value; String keyToChange = findKeyBasedOnOwnership( expectedValues.keySet(), cache.getAdvancedCache().getDistributionManager().getConsistentHash(), shouldBePrimaryOwner, cache.getCacheManager().getAddress()); switch (operation) { case CREATE: keyToChange = "new-key"; value = "new-value"; break; case PUT: value = cache.get(keyToChange) + "-changed"; break; case REMOVE: value = null; break; default: throw new IllegalArgumentException("Unsupported Operation provided " + operation); } Object oldValue = operation.perform(cache, keyToChange, value); // Now let the iteration complete checkPoint.triggerForever("pre_close_iter_released"); future.get(10, TimeUnit.SECONDS); boolean isClustered = isClustered(listener); // We should have 1 or 2 (local) events due to the modification coming after we iterated on // it. Note the value // isn't brought up until the iteration is done assertEquals( listener.events.size(), isClustered ? expectedValues.size() + 1 : (expectedValues.size() + 1) * 2); // Assert the first 10/20 since they should all be from iteration - this may not work since // segments complete earlier.. boolean isPost = true; int position = 0; for (; position < (isClustered ? expectedValues.size() : expectedValues.size() * 2); ++position) { // Even checks means it will be post and have a value - note we force every check to be // even for clustered since those should always be post if (!isClustered) { isPost = !isPost; } CacheEntryEvent event = listener.events.get(position); assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED); assertTrue(expectedValues.containsKey(event.getKey())); assertEquals(event.isPre(), !isPost); if (isPost) { assertEquals(event.getValue(), expectedValues.get(event.getKey())); } else { assertNull(event.getValue()); } } // We should have 2 extra events at the end which are our modifications if (isClustered) { CacheEntryEvent<String, String> event = listener.events.get(position); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), false); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), value); } else { CacheEntryEvent<String, String> event = listener.events.get(position); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), true); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), oldValue); event = listener.events.get(position + 1); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), false); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), value); } } finally { TestingUtil.replaceComponent(cache, InterceptorChain.class, chain, true); cache.removeListener(listener); } }