@Override public void notifyClusterListeners( Collection<? extends CacheEntryEvent<K, V>> events, UUID uuid) { for (CacheEntryEvent<K, V> event : events) { if (event.isPre()) { throw new IllegalArgumentException( "Events for cluster listener should never be pre change"); } switch (event.getType()) { case CACHE_ENTRY_MODIFIED: for (CacheEntryListenerInvocation<K, V> listener : cacheEntryModifiedListeners) { if (listener.isClustered() && uuid.equals(listener.getIdentifier())) { // We force invocation, since it means the owning node passed filters already and they // already converted so don't run converter either listener.invokeNoChecks(event, false, true); } } break; case CACHE_ENTRY_CREATED: for (CacheEntryListenerInvocation<K, V> listener : cacheEntryCreatedListeners) { if (listener.isClustered() && uuid.equals(listener.getIdentifier())) { // We force invocation, since it means the owning node passed filters already and they // already converted so don't run converter either listener.invokeNoChecks(event, false, true); } } break; case CACHE_ENTRY_REMOVED: for (CacheEntryListenerInvocation<K, V> listener : cacheEntryRemovedListeners) { if (listener.isClustered() && uuid.equals(listener.getIdentifier())) { // We force invocation, since it means the owning node passed filters already and they // already converted so don't run converter either listener.invokeNoChecks(event, false, true); } } break; default: throw new IllegalArgumentException("Unexpected event type encountered!"); } } }
private void verifyEvents( boolean isClustered, StateListener<String, String> listener, Map<String, String> expected) { assertEquals(listener.events.size(), isClustered ? expected.size() : expected.size() * 2); boolean isPost = true; for (CacheEntryEvent<String, String> event : listener.events) { // Even checks means it will be post and have a value - note we force every check to be // even for clustered since those should always be post if (!isClustered) { isPost = !isPost; } assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED); assertTrue(expected.containsKey(event.getKey())); assertEquals(event.isPre(), !isPost); if (isPost) { assertEquals(event.getValue(), expected.get(event.getKey())); } else { assertNull(event.getValue()); } } }
@Override public void onFilterResult( Object userContext, Object eventType, Object instance, Object[] projection, Comparable[] sortProjection) { CacheEntryEvent<K, V> event = (CacheEntryEvent<K, V>) userContext; if (event.isPre() && isClustered || isPrimaryOnly && !clusteringDependentLogic.localNodeIsPrimaryOwner(event.getKey())) { return; } DelegatingCacheEntryListenerInvocation<K, V>[] invocations; switch (event.getType()) { case CACHE_ENTRY_ACTIVATED: invocations = activated_invocations; break; case CACHE_ENTRY_CREATED: invocations = created_invocations; break; case CACHE_ENTRY_INVALIDATED: invocations = invalidated_invocations; break; case CACHE_ENTRY_LOADED: invocations = loaded_invocations; break; case CACHE_ENTRY_MODIFIED: invocations = modified_invocations; break; case CACHE_ENTRY_PASSIVATED: invocations = passivated_invocations; break; case CACHE_ENTRY_REMOVED: invocations = removed_invocations; break; case CACHE_ENTRY_VISITED: invocations = visited_invocations; break; case CACHE_ENTRY_EVICTED: invocations = evicted_invocations; break; case CACHE_ENTRY_EXPIRED: invocations = expired_invocations; break; default: return; } boolean conversionDone = false; for (DelegatingCacheEntryListenerInvocation<K, V> invocation : invocations) { if (invocation.getObservation().shouldInvoke(event.isPre())) { if (!conversionDone) { if (filterAndConvert && event instanceof EventImpl) { // todo [anistor] can it not be an EventImpl? can it not be // filterAndConvert? EventImpl<K, V> eventImpl = (EventImpl<K, V>) event; EventImpl<K, V> clone = eventImpl.clone(); clone.setValue( (V) makeFilterResult( userContext, eventType, event.getKey(), projection == null ? instance : null, projection, sortProjection)); event = clone; } conversionDone = true; } invocation.invokeNoChecks( new EventWrapper<>(event.getKey(), event), false, filterAndConvert); } } }
private void injectFailure(CacheEntryEvent event) { if (injectFailure) { if (isInjectInPre && event.isPre()) throwSuspectException(); else if (!isInjectInPre && !event.isPre()) throwSuspectException(); } }
protected void testIterationBeganAndSegmentNotComplete( final StateListener<String, String> listener, Operation operation, boolean shouldBePrimaryOwner) throws TimeoutException, InterruptedException, ExecutionException { final Map<String, String> expectedValues = new HashMap<String, String>(10); final Cache<String, String> cache = cache(0, CACHE_NAME); for (int i = 0; i < 10; i++) { String key = "key-" + i; String value = "value-" + i; expectedValues.put(key, value); cache.put(key, value); } String value; String keyToChange = findKeyBasedOnOwnership( expectedValues.keySet(), cache.getAdvancedCache().getDistributionManager().getConsistentHash(), shouldBePrimaryOwner, cache.getCacheManager().getAddress()); switch (operation) { case CREATE: keyToChange = "new-key"; value = "new-value"; break; case PUT: value = cache.get(keyToChange) + "-changed"; break; case REMOVE: value = null; break; default: throw new IllegalArgumentException("Unsupported Operation provided " + operation); } CheckPoint checkPoint = new CheckPoint(); int segmentToUse = cache .getAdvancedCache() .getDistributionManager() .getConsistentHash() .getSegment(keyToChange); // do the operation, which should put it in the queue. ClusterCacheNotifier notifier = waitUntilClosingSegment(cache, segmentToUse, checkPoint); Future<Void> future = fork( new Callable<Void>() { @Override public Void call() throws Exception { cache.addListener(listener); return null; } }); try { checkPoint.awaitStrict("pre_complete_segment_invoked", 10, TimeUnit.SECONDS); Object oldValue = operation.perform(cache, keyToChange, value); // Now let the iteration complete checkPoint.triggerForever("pre_complete_segment_released"); future.get(10, TimeUnit.SECONDS); boolean isClustered = isClustered(listener); // We should have 1 or 2 (local) events due to the modification coming after we iterated on // it. Note the value // isn't brought up until the iteration is done assertEquals( listener.events.size(), isClustered ? expectedValues.size() + 1 : (expectedValues.size() + 1) * 2); // If it is clustered, then the modify can occur in the middle. In non clustered we have to // block all events // just in case of tx event listeners (ie. tx start/tx end would have to wrap all events) and // such so we can't // release them early. The cluster listeners aren't affected by transaction since it those // are not currently // supported if (isClustered) { CacheEntryEvent event = null; boolean foundEarlierCreate = false; // We iterate backwards so we only have to do it once for (int i = listener.events.size() - 1; i >= 0; --i) { CacheEntryEvent currentEvent = listener.events.get(i); if (currentEvent.getKey().equals(keyToChange) && operation.getType() == currentEvent.getType()) { if (event == null) { event = currentEvent; // We can remove safely since we are doing backwards counter as well listener.events.remove(i); // If it is a create there is no previous create if (operation.getType() == Event.Type.CACHE_ENTRY_CREATED) { foundEarlierCreate = true; break; } } else { fail("There should only be a single event in the event queue!"); } } else if (event != null && (foundEarlierCreate = event.getKey().equals(currentEvent.getKey()))) { break; } } // This should have been set assertTrue( foundEarlierCreate, "There was no matching create event for key " + event.getKey()); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), false); assertEquals(event.getValue(), value); } // Assert the first 10/20 since they should all be from iteration - this may not work since // segments complete earlier.. boolean isPost = true; int position = 0; for (; position < (isClustered ? expectedValues.size() : expectedValues.size() * 2); ++position) { // Even checks means it will be post and have a value - note we force every check to be // even for clustered since those should always be post if (!isClustered) { isPost = !isPost; } CacheEntryEvent event = listener.events.get(position); assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED); assertTrue(expectedValues.containsKey(event.getKey())); assertEquals(event.isPre(), !isPost); if (isPost) { assertEquals(event.getValue(), expectedValues.get(event.getKey())); } else { assertNull(event.getValue()); } } // We should have 2 extra events at the end which are our modifications if (!isClustered) { CacheEntryEvent<String, String> event = listener.events.get(position); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), true); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), oldValue); event = listener.events.get(position + 1); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), false); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), value); } } finally { TestingUtil.replaceComponent(cache, CacheNotifier.class, notifier, true); TestingUtil.replaceComponent(cache, ClusterCacheNotifier.class, notifier, true); cache.removeListener(listener); } }
/** This test is to verify that the modification event is sent after the creation event is done */ private void testModificationAfterIterationBeganAndCompletedSegmentValueOwner( final StateListener<String, String> listener, Operation operation, boolean shouldBePrimaryOwner) throws IOException, InterruptedException, TimeoutException, BrokenBarrierException, ExecutionException { final Map<String, String> expectedValues = new HashMap<String, String>(10); final Cache<String, String> cache = cache(0, CACHE_NAME); for (int i = 0; i < 10; i++) { String key = "key-" + i; String value = "value-" + i; expectedValues.put(key, value); cache.put(key, value); } CheckPoint checkPoint = new CheckPoint(); InterceptorChain chain = mockStream( cache, (mock, real, additional) -> { doAnswer( i -> { // Wait for main thread to sync up checkPoint.trigger("pre_close_iter_invoked"); // Now wait until main thread lets us through checkPoint.awaitStrict("pre_close_iter_released", 10, TimeUnit.SECONDS); return i.getMethod().invoke(real, i.getArguments()); }) .when(mock) .close(); doAnswer(i -> i.getMethod().invoke(real, i.getArguments())).when(mock).iterator(); }); try { Future<Void> future = fork( () -> { cache.addListener(listener); return null; }); checkPoint.awaitStrict("pre_close_iter_invoked", 10, TimeUnit.SECONDS); String value; String keyToChange = findKeyBasedOnOwnership( expectedValues.keySet(), cache.getAdvancedCache().getDistributionManager().getConsistentHash(), shouldBePrimaryOwner, cache.getCacheManager().getAddress()); switch (operation) { case CREATE: keyToChange = "new-key"; value = "new-value"; break; case PUT: value = cache.get(keyToChange) + "-changed"; break; case REMOVE: value = null; break; default: throw new IllegalArgumentException("Unsupported Operation provided " + operation); } Object oldValue = operation.perform(cache, keyToChange, value); // Now let the iteration complete checkPoint.triggerForever("pre_close_iter_released"); future.get(10, TimeUnit.SECONDS); boolean isClustered = isClustered(listener); // We should have 1 or 2 (local) events due to the modification coming after we iterated on // it. Note the value // isn't brought up until the iteration is done assertEquals( listener.events.size(), isClustered ? expectedValues.size() + 1 : (expectedValues.size() + 1) * 2); // Assert the first 10/20 since they should all be from iteration - this may not work since // segments complete earlier.. boolean isPost = true; int position = 0; for (; position < (isClustered ? expectedValues.size() : expectedValues.size() * 2); ++position) { // Even checks means it will be post and have a value - note we force every check to be // even for clustered since those should always be post if (!isClustered) { isPost = !isPost; } CacheEntryEvent event = listener.events.get(position); assertEquals(event.getType(), Event.Type.CACHE_ENTRY_CREATED); assertTrue(expectedValues.containsKey(event.getKey())); assertEquals(event.isPre(), !isPost); if (isPost) { assertEquals(event.getValue(), expectedValues.get(event.getKey())); } else { assertNull(event.getValue()); } } // We should have 2 extra events at the end which are our modifications if (isClustered) { CacheEntryEvent<String, String> event = listener.events.get(position); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), false); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), value); } else { CacheEntryEvent<String, String> event = listener.events.get(position); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), true); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), oldValue); event = listener.events.get(position + 1); assertEquals(event.getType(), operation.getType()); assertEquals(event.isPre(), false); assertEquals(event.getKey(), keyToChange); assertEquals(event.getValue(), value); } } finally { TestingUtil.replaceComponent(cache, InterceptorChain.class, chain, true); cache.removeListener(listener); } }
@CacheEntryActivated public void handleActivated(CacheEntryEvent e) { if (e.isPre()) activated.add(e.getKey()); }
@CacheEntryLoaded public void handleLoaded(CacheEntryEvent e) { if (e.isPre()) loaded.add(e.getKey()); }