public void testPreloadOnStart() throws CacheLoaderException { for (int i = 0; i < NUM_KEYS; i++) { c1.put("k" + i, "v" + i); } DataContainer dc1 = c1.getAdvancedCache().getDataContainer(); assert dc1.size(null) == NUM_KEYS; DummyInMemoryCacheStore cs = (DummyInMemoryCacheStore) TestingUtil.extractComponent(c1, CacheLoaderManager.class).getCacheStore(); assert cs.loadAllKeys(Collections.emptySet()).size() == NUM_KEYS; addClusterEnabledCacheManager(); EmbeddedCacheManager cm2 = cacheManagers.get(1); cm2.defineConfiguration(cacheName, buildConfiguration()); c2 = cache(1, cacheName); waitForClusterToForm(); DataContainer dc2 = c2.getAdvancedCache().getDataContainer(); assert dc2.size(null) == NUM_KEYS : "Expected all the cache store entries to be preloaded on the second cache"; for (int i = 0; i < NUM_KEYS; i++) { assertOwnershipAndNonOwnership("k" + i, true); } }
public void applyState(Address sender, int topologyId, Collection<StateChunk> stateChunks) { if (trace) { log.tracef( "Before applying the received state the data container of cache %s has %d keys", cacheName, dataContainer.size()); } for (StateChunk stateChunk : stateChunks) { // it's possible to receive a late message so we must be prepared to ignore segments we no // longer own // todo [anistor] this check should be based on topologyId if (!cacheTopology .getWriteConsistentHash() .getSegmentsForOwner(rpcManager.getAddress()) .contains(stateChunk.getSegmentId())) { log.warnf( "Discarding received cache entries for segment %d of cache %s because they do not belong to this node.", stateChunk.getSegmentId(), cacheName); continue; } // notify the inbound task that a chunk of cache entries was received InboundTransferTask inboundTransfer; synchronized (this) { inboundTransfer = transfersBySegment.get(stateChunk.getSegmentId()); } if (inboundTransfer != null) { if (stateChunk.getCacheEntries() != null) { doApplyState(sender, stateChunk.getSegmentId(), stateChunk.getCacheEntries()); } inboundTransfer.onStateReceived(stateChunk.getSegmentId(), stateChunk.isLastChunk()); } else { log.warnf( "Received unsolicited state from node %s for segment %d of cache %s", sender, stateChunk.getSegmentId(), cacheName); } } if (trace) { log.tracef( "After applying the received state the data container of cache %s has %d keys", cacheName, dataContainer.size()); synchronized (this) { log.tracef("Segments not received yet for cache %s: %s", cacheName, transfersBySource); } } }
public void testKeySetWithEvictedEntriesAndFlags() { final int numKeys = 300; for (int i = 0; i < numKeys; i++) { cache.put(i, i); } AdvancedCache<Object, Object> flagCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD); DataContainer dc = flagCache.getDataContainer(); assertFalse("Data Container should not have all keys", numKeys == dc.size()); Set<Object> keySet = flagCache.keySet(); assertEquals(dc.size(), keySet.size()); for (Object key : dc.keySet()) { assertTrue("Key: " + key + " was not found!", keySet.contains(key)); } }
@Override public void markRehashCompleted(int viewId) throws InterruptedException { waitForJoinToStart(); if (viewId < lastViewId) { if (trace) log.tracef( "Ignoring old rehash completed confirmation for view %d, last view is %d", viewId, lastViewId); return; } if (viewId > lastViewId) { throw new IllegalStateException( "Received rehash completed confirmation before confirming it ourselves"); } if (trace) log.tracef( "Rehash completed on node %s, data container has %d keys", getSelf(), dataContainer.size()); receivedRehashCompletedNotification = true; synchronized (rehashInProgressMonitor) { // we know for sure the rehash task is waiting for this confirmation, so the CH hasn't been // replaced if (trace) log.tracef("Updating last rehashed CH to %s", this.lastSuccessfulCH); lastSuccessfulCH = this.consistentHash; rehashInProgressMonitor.notifyAll(); } joinCompletedLatch.countDown(); }
@Override public void applyState( ConsistentHash consistentHash, Map<Object, InternalCacheValue> state, Address sender, int viewId) throws InterruptedException { waitForJoinToStart(); if (viewId < lastViewId) { log.debugf( "Rejecting state pushed by node %s for old rehash %d (last view id is %d)", sender, viewId, lastViewId); return; } log.debugf("Applying new state from %s: received %d keys", sender, state.size()); if (trace) log.tracef("Received keys: %s", state.keySet()); int retryCount = 3; // in case we have issues applying state. Map<Object, InternalCacheValue> pendingApplications = state; for (int i = 0; i < retryCount; i++) { pendingApplications = applyStateMap(pendingApplications, true); if (pendingApplications.isEmpty()) break; } // one last go if (!pendingApplications.isEmpty()) applyStateMap(pendingApplications, false); if (trace) log.tracef("After applying state data container has %d keys", dataContainer.size()); }
public void testValuesWithEvictedEntriesAndFlags() { final int numKeys = 300; for (int i = 0; i < numKeys; i++) { cache.put(i, i); } AdvancedCache<Object, Object> flagCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD); DataContainer dc = flagCache.getDataContainer(); assertFalse("Data Container should not have all keys", numKeys == dc.size()); Collection<Object> values = flagCache.values(); assertEquals(dc.size(), values.size()); Collection<Object> dcValues = dc.values(); for (Object dcValue : dcValues) { assertTrue("Value: " + dcValue + " was not found!", values.contains(dcValue)); } }
protected void assertNumberOfEntries(int cacheIndex) throws Exception { CacheStore cacheStore = TestingUtil.extractComponent(cache(cacheIndex), CacheLoaderManager.class).getCacheStore(); assertEquals(2, cacheStore.loadAllKeys(null).size()); // two entries in store DataContainer dataContainer = cache(cacheIndex).getAdvancedCache().getDataContainer(); assertEquals( 1, dataContainer.size(null)); // only one entry in memory (the other one was evicted) }
public void testPreloading() throws CacheLoaderException { Configuration preloadingCfg = cfg.clone(); preloadingCfg.getCacheLoaderManagerConfig().setPreload(true); ((DummyInMemoryCacheStore.Cfg) preloadingCfg.getCacheLoaderManagerConfig().getFirstCacheLoaderConfig()) .setStoreName("preloadingCache"); cm.defineConfiguration("preloadingCache", preloadingCfg); Cache preloadingCache = cm.getCache("preloadingCache"); CacheStore preloadingStore = TestingUtil.extractComponent(preloadingCache, CacheLoaderManager.class).getCacheStore(); assert preloadingCache.getConfiguration().getCacheLoaderManagerConfig().isPreload(); assertNotInCacheAndStore(preloadingCache, preloadingStore, "k1", "k2", "k3", "k4"); preloadingCache.put("k1", "v1"); preloadingCache.put("k2", "v2", lifespan, MILLISECONDS); preloadingCache.put("k3", "v3"); preloadingCache.put("k4", "v4", lifespan, MILLISECONDS); for (int i = 1; i < 5; i++) { if (i % 2 == 1) assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i); else assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i, lifespan); } DataContainer c = preloadingCache.getAdvancedCache().getDataContainer(); assert c.size(null) == 4; preloadingCache.stop(); assert c.size(null) == 0; preloadingCache.start(); assert preloadingCache.getConfiguration().getCacheLoaderManagerConfig().isPreload(); c = preloadingCache.getAdvancedCache().getDataContainer(); assert c.size(null) == 4; for (int i = 1; i < 5; i++) { if (i % 2 == 1) assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i); else assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i, lifespan); } }
public void testEntrySetWithEvictedEntriesAndFlags() { final int numKeys = 300; for (int i = 0; i < numKeys; i++) { cache.put(i, i); } AdvancedCache<Object, Object> flagCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD); DataContainer dc = flagCache.getDataContainer(); assertFalse("Data Container should not have all keys", numKeys == dc.size()); Set<Map.Entry<Object, Object>> entrySet = flagCache.entrySet(); assertEquals(dc.size(), entrySet.size()); Set<InternalCacheEntry> entries = dc.entrySet(); Map<Object, Object> map = new HashMap<Object, Object>(entrySet.size()); for (Map.Entry<Object, Object> entry : entrySet) { map.put(entry.getKey(), entry.getValue()); } for (InternalCacheEntry entry : entries) { assertEquals("Key/Value mismatch!", entry.getValue(), map.get(entry.getKey())); } }
private void invalidateSegments(Set<Integer> newSegments, Set<Integer> segmentsToL1) { // The actual owners keep track of the nodes that hold a key in L1 ("requestors") and // they invalidate the key on every requestor after a change. // But this information is only present on the owners where the ClusteredGetKeyValueCommand // got executed - if the requestor only contacted one owner, and that node is no longer an owner // (perhaps because it left the cluster), the other owners will not know to invalidate the key // on that requestor. Furthermore, the requestors list is not copied to the new owners during // state transfers. // To compensate for this, we delete all L1 entries in segments that changed ownership during // this topology update. We can't actually differentiate between L1 entries and regular entries, // so we delete all entries that don't belong to this node in the current OR previous topology. Set<Object> keysToL1 = new HashSet<Object>(); Set<Object> keysToRemove = new HashSet<Object>(); // gather all keys from data container that belong to the segments that are being removed/moved // to L1 for (InternalCacheEntry ice : dataContainer) { Object key = ice.getKey(); int keySegment = getSegment(key); if (segmentsToL1.contains(keySegment)) { keysToL1.add(key); } else if (!newSegments.contains(keySegment)) { keysToRemove.add(key); } } // gather all keys from cache store that belong to the segments that are being removed/moved to // L1 CacheStore cacheStore = getCacheStore(); if (cacheStore != null) { // todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys // (ie. keys should belong to desired segments) try { Set<Object> storedKeys = cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer)); for (Object key : storedKeys) { int keySegment = getSegment(key); if (segmentsToL1.contains(keySegment)) { keysToL1.add(key); } else if (!newSegments.contains(keySegment)) { keysToRemove.add(key); } } } catch (CacheLoaderException e) { log.failedLoadingKeysFromCacheStore(e); } } if (configuration.clustering().l1().onRehash()) { log.debugf("Moving to L1 state for segments %s of cache %s", segmentsToL1, cacheName); } else { log.debugf("Removing state for segments %s of cache %s", segmentsToL1, cacheName); } if (!keysToL1.isEmpty()) { try { InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateFromL1Command( true, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToL1); InvocationContext ctx = icc.createNonTxInvocationContext(); interceptorChain.invoke(ctx, invalidateCmd); log.debugf( "Invalidated %d keys, data container now has %d keys", keysToL1.size(), dataContainer.size()); if (trace) log.tracef("Invalidated keys: %s", keysToL1); } catch (CacheException e) { log.failedToInvalidateKeys(e); } } log.debugf( "Removing L1 state for segments not in %s or %s for cache %s", newSegments, segmentsToL1, cacheName); if (!keysToRemove.isEmpty()) { try { InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateFromL1Command( false, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToRemove); InvocationContext ctx = icc.createNonTxInvocationContext(); interceptorChain.invoke(ctx, invalidateCmd); log.debugf( "Invalidated %d keys, data container of cache %s now has %d keys", keysToRemove.size(), cacheName, dataContainer.size()); if (trace) log.tracef("Invalidated keys: %s", keysToRemove); } catch (CacheException e) { log.failedToInvalidateKeys(e); } } // todo [anistor] call CacheNotifier.notifyDataRehashed }