public void testPreloadOnStart() throws CacheLoaderException { for (int i = 0; i < NUM_KEYS; i++) { c1.put("k" + i, "v" + i); } DataContainer dc1 = c1.getAdvancedCache().getDataContainer(); assert dc1.size(null) == NUM_KEYS; DummyInMemoryCacheStore cs = (DummyInMemoryCacheStore) TestingUtil.extractComponent(c1, CacheLoaderManager.class).getCacheStore(); assert cs.loadAllKeys(Collections.emptySet()).size() == NUM_KEYS; addClusterEnabledCacheManager(); EmbeddedCacheManager cm2 = cacheManagers.get(1); cm2.defineConfiguration(cacheName, buildConfiguration()); c2 = cache(1, cacheName); waitForClusterToForm(); DataContainer dc2 = c2.getAdvancedCache().getDataContainer(); assert dc2.size(null) == NUM_KEYS : "Expected all the cache store entries to be preloaded on the second cache"; for (int i = 0; i < NUM_KEYS; i++) { assertOwnershipAndNonOwnership("k" + i, true); } }
protected void assertNumberOfEntries(int cacheIndex) throws Exception { CacheStore cacheStore = TestingUtil.extractComponent(cache(cacheIndex), CacheLoaderManager.class).getCacheStore(); assertEquals(2, cacheStore.loadAllKeys(null).size()); // two entries in store DataContainer dataContainer = cache(cacheIndex).getAdvancedCache().getDataContainer(); assertEquals( 1, dataContainer.size(null)); // only one entry in memory (the other one was evicted) }
public static String printCache(Cache cache) { DataContainer dataContainer = TestingUtil.extractComponent(cache, DataContainer.class); Iterator it = dataContainer.iterator(); StringBuilder builder = new StringBuilder(cache.getName() + "["); while (it.hasNext()) { CacheEntry ce = (CacheEntry) it.next(); builder.append(ce.getKey() + "=" + ce.getValue() + ",l=" + ce.getLifespan() + "; "); } builder.append("]"); return builder.toString(); }
/** * In case of a remotely originating transactions we don't have a chance to visit the single * commands but receive this "batch". We then need the before-apply snapshot of some types to * route the cleanup commands to the correct indexes. Note we don't need to visit the * CommitCommand as the indexing context is registered as a transaction sync. */ @Override public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable { final WriteCommand[] writeCommands = command.getModifications(); final Object[] stateBeforePrepare = new Object[writeCommands.length]; for (int i = 0; i < writeCommands.length; i++) { final WriteCommand writeCommand = writeCommands[i]; if (writeCommand instanceof PutKeyValueCommand) { InternalCacheEntry internalCacheEntry = dataContainer.get(((PutKeyValueCommand) writeCommand).getKey()); stateBeforePrepare[i] = internalCacheEntry != null ? internalCacheEntry.getValue() : null; } else if (writeCommand instanceof PutMapCommand) { stateBeforePrepare[i] = getPreviousValues(((PutMapCommand) writeCommand).getMap().keySet()); } else if (writeCommand instanceof RemoveCommand) { InternalCacheEntry internalCacheEntry = dataContainer.get(((RemoveCommand) writeCommand).getKey()); stateBeforePrepare[i] = internalCacheEntry != null ? internalCacheEntry.getValue() : null; } else if (writeCommand instanceof ReplaceCommand) { InternalCacheEntry internalCacheEntry = dataContainer.get(((ReplaceCommand) writeCommand).getKey()); stateBeforePrepare[i] = internalCacheEntry != null ? internalCacheEntry.getValue() : null; } } final Object toReturn = super.visitPrepareCommand(ctx, command); if (ctx.isTransactionValid()) { final TransactionContext transactionContext = makeTransactionalEventContext(); for (int i = 0; i < writeCommands.length; i++) { final WriteCommand writeCommand = writeCommands[i]; if (writeCommand instanceof PutKeyValueCommand) { processPutKeyValueCommand( (PutKeyValueCommand) writeCommand, ctx, stateBeforePrepare[i], transactionContext); } else if (writeCommand instanceof PutMapCommand) { processPutMapCommand( (PutMapCommand) writeCommand, ctx, (Map<Object, Object>) stateBeforePrepare[i], transactionContext); } else if (writeCommand instanceof RemoveCommand) { processRemoveCommand( (RemoveCommand) writeCommand, ctx, stateBeforePrepare[i], transactionContext); } else if (writeCommand instanceof ReplaceCommand) { processReplaceCommand( (ReplaceCommand) writeCommand, ctx, stateBeforePrepare[i], transactionContext); } else if (writeCommand instanceof ClearCommand) { processClearCommand((ClearCommand) writeCommand, ctx, transactionContext); } } } return toReturn; }
private static void removeInMemoryData(Cache cache) { EmbeddedCacheManager mgr = cache.getCacheManager(); Address a = mgr.getAddress(); String str; if (a == null) str = "a non-clustered cache manager"; else str = "a cache manager at address " + a; log.debugf("Cleaning data for cache '%s' on %s", cache.getName(), str); DataContainer dataContainer = TestingUtil.extractComponent(cache, DataContainer.class); log.debugf("removeInMemoryData(): dataContainerBefore == %s", dataContainer.entrySet()); dataContainer.clear(); log.debugf("removeInMemoryData(): dataContainerAfter == %s", dataContainer.entrySet()); }
// We need to intercept PrepareCommand, not InvalidateCommand since the interception takes // place beforeQuery EntryWrappingInterceptor and the PrepareCommand is multiplexed into // InvalidateCommands // as part of EntryWrappingInterceptor @Override public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable { if (ctx.isOriginLocal()) { // We can't wait to commit phase to remove the entry locally (invalidations are processed in // 1pc // on remote nodes, so only local case matters here). The problem is that while the entry is // locked // reads still can take place and we can read outdated collection afterQuery reading updated // entity // owning this collection from DB; when this happens, the version lock on entity cannot // protect // us against concurrent modification of the collection. Therefore, we need to remove the // entry // here (even without lock!) and let possible update happen in commit phase. for (WriteCommand wc : command.getModifications()) { if (wc instanceof InvalidateCommand) { // ISPN-5605 InvalidateCommand does not correctly implement getAffectedKeys() for (Object key : ((InvalidateCommand) wc).getKeys()) { dataContainer.remove(key); } } else { for (Object key : wc.getAffectedKeys()) { dataContainer.remove(key); } } } } else { for (WriteCommand wc : command.getModifications()) { if (wc instanceof InvalidateCommand) { // ISPN-5605 InvalidateCommand does not correctly implement getAffectedKeys() for (Object key : ((InvalidateCommand) wc).getKeys()) { if (log.isTraceEnabled()) { log.tracef("Invalidating key %s with lock owner %s", key, ctx.getLockOwner()); } putFromLoadValidator.beginInvalidatingKey(ctx.getLockOwner(), key); } } else { Set<Object> keys = wc.getAffectedKeys(); if (log.isTraceEnabled()) { log.tracef("Invalidating keys %s with lock owner %s", keys, ctx.getLockOwner()); } for (Object key : keys) { putFromLoadValidator.beginInvalidatingKey(ctx.getLockOwner(), key); } } } } return invokeNextInterceptor(ctx, command); }
public void applyState(Address sender, int topologyId, Collection<StateChunk> stateChunks) { if (trace) { log.tracef( "Before applying the received state the data container of cache %s has %d keys", cacheName, dataContainer.size()); } for (StateChunk stateChunk : stateChunks) { // it's possible to receive a late message so we must be prepared to ignore segments we no // longer own // todo [anistor] this check should be based on topologyId if (!cacheTopology .getWriteConsistentHash() .getSegmentsForOwner(rpcManager.getAddress()) .contains(stateChunk.getSegmentId())) { log.warnf( "Discarding received cache entries for segment %d of cache %s because they do not belong to this node.", stateChunk.getSegmentId(), cacheName); continue; } // notify the inbound task that a chunk of cache entries was received InboundTransferTask inboundTransfer; synchronized (this) { inboundTransfer = transfersBySegment.get(stateChunk.getSegmentId()); } if (inboundTransfer != null) { if (stateChunk.getCacheEntries() != null) { doApplyState(sender, stateChunk.getSegmentId(), stateChunk.getCacheEntries()); } inboundTransfer.onStateReceived(stateChunk.getSegmentId(), stateChunk.isLastChunk()); } else { log.warnf( "Received unsolicited state from node %s for segment %d of cache %s", sender, stateChunk.getSegmentId(), cacheName); } } if (trace) { log.tracef( "After applying the received state the data container of cache %s has %d keys", cacheName, dataContainer.size()); synchronized (this) { log.tracef("Segments not received yet for cache %s: %s", cacheName, transfersBySource); } } }
@Override public void applyState( ConsistentHash consistentHash, Map<Object, InternalCacheValue> state, Address sender, int viewId) throws InterruptedException { waitForJoinToStart(); if (viewId < lastViewId) { log.debugf( "Rejecting state pushed by node %s for old rehash %d (last view id is %d)", sender, viewId, lastViewId); return; } log.debugf("Applying new state from %s: received %d keys", sender, state.size()); if (trace) log.tracef("Received keys: %s", state.keySet()); int retryCount = 3; // in case we have issues applying state. Map<Object, InternalCacheValue> pendingApplications = state; for (int i = 0; i < retryCount; i++) { pendingApplications = applyStateMap(pendingApplications, true); if (pendingApplications.isEmpty()) break; } // one last go if (!pendingApplications.isEmpty()) applyStateMap(pendingApplications, false); if (trace) log.tracef("After applying state data container has %d keys", dataContainer.size()); }
public void testKeySetWithEvictedEntriesAndFlags() { final int numKeys = 300; for (int i = 0; i < numKeys; i++) { cache.put(i, i); } AdvancedCache<Object, Object> flagCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD); DataContainer dc = flagCache.getDataContainer(); assertFalse("Data Container should not have all keys", numKeys == dc.size()); Set<Object> keySet = flagCache.keySet(); assertEquals(dc.size(), keySet.size()); for (Object key : dc.keySet()) { assertTrue("Key: " + key + " was not found!", keySet.contains(key)); } }
@Override public void copyForUpdate(DataContainer container, boolean writeSkewCheck) { if (isChanged()) return; // already copied // mark entry as changed. setChanged(); if (writeSkewCheck) { // check for write skew. InternalCacheEntry ice = container.get(key); Object actualValue = ice == null ? null : ice.getValue(); // Note that this identity-check is intentional. We don't *want* to call actualValue.equals() // since that defeats the purpose. // the implicit "versioning" we have in R_R creates a new wrapper "value" instance for every // update. if (actualValue != null && actualValue != value) { String errormsg = new StringBuilder() .append("Detected write skew on key [") .append(getKey()) .append("]. Another process has changed the entry since we last read it!") .toString(); if (log.isWarnEnabled()) log.warn(errormsg + ". Unable to copy entry for update."); throw new CacheException(errormsg); } } // make a backup copy oldValue = value; }
@Override public CompletableFuture<Void> visitGetKeysInGroupCommand( final InvocationContext ctx, GetKeysInGroupCommand command) throws Throwable { final String groupName = command.getGroupName(); if (!command.isGroupOwner()) { return ctx.continueInvocation(); } final KeyFilter<Object> keyFilter = new CompositeKeyFilter<>( new GroupFilter<>(groupName, groupManager), new CollectionKeyFilter<>(ctx.getLookedUpEntries().keySet())); dataContainer.executeTask( keyFilter, (o, internalCacheEntry) -> { synchronized (ctx) { // the process can be made in multiple threads, so we need to synchronize in the // context. entryFactory.wrapExternalEntry( ctx, internalCacheEntry.getKey(), internalCacheEntry, EntryFactory.Wrap.STORE, false); } }); return ctx.continueInvocation(); }
@Override public void markRehashCompleted(int viewId) throws InterruptedException { waitForJoinToStart(); if (viewId < lastViewId) { if (trace) log.tracef( "Ignoring old rehash completed confirmation for view %d, last view is %d", viewId, lastViewId); return; } if (viewId > lastViewId) { throw new IllegalStateException( "Received rehash completed confirmation before confirming it ourselves"); } if (trace) log.tracef( "Rehash completed on node %s, data container has %d keys", getSelf(), dataContainer.size()); receivedRehashCompletedNotification = true; synchronized (rehashInProgressMonitor) { // we know for sure the rehash task is waiting for this confirmation, so the CH hasn't been // replaced if (trace) log.tracef("Updating last rehashed CH to %s", this.lastSuccessfulCH); lastSuccessfulCH = this.consistentHash; rehashInProgressMonitor.notifyAll(); } joinCompletedLatch.countDown(); }
public void testIdleExpiryInPut() throws InterruptedException { Cache<String, String> cache = cm.getCache(); long idleTime = IDLE_TIMEOUT; cache.put("k", "v", -1, MILLISECONDS, idleTime, MILLISECONDS); DataContainer dc = cache.getAdvancedCache().getDataContainer(); InternalCacheEntry se = dc.get("k", null); assert se.getKey().equals("k"); assert se.getValue().equals("v"); assert se.getLifespan() == -1; assert se.getMaxIdle() == idleTime; assert !se.isExpired(); assert cache.get("k").equals("v"); Thread.sleep(idleTime + 100); assert se.isExpired(); assert cache.get("k") == null; }
private Map<Object, Object> getPreviousValues(Set<Object> keySet) { HashMap<Object, Object> previousValues = new HashMap<Object, Object>(); for (Object key : keySet) { InternalCacheEntry internalCacheEntry = dataContainer.get(key); Object previousValue = internalCacheEntry != null ? internalCacheEntry.getValue() : null; previousValues.put(key, previousValue); } return previousValues; }
public void testValuesWithEvictedEntriesAndFlags() { final int numKeys = 300; for (int i = 0; i < numKeys; i++) { cache.put(i, i); } AdvancedCache<Object, Object> flagCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD); DataContainer dc = flagCache.getDataContainer(); assertFalse("Data Container should not have all keys", numKeys == dc.size()); Collection<Object> values = flagCache.values(); assertEquals(dc.size(), values.size()); Collection<Object> dcValues = dc.values(); for (Object dcValue : dcValues) { assertTrue("Value: " + dcValue + " was not found!", values.contains(dcValue)); } }
public void testPreloading() throws CacheLoaderException { Configuration preloadingCfg = cfg.clone(); preloadingCfg.getCacheLoaderManagerConfig().setPreload(true); ((DummyInMemoryCacheStore.Cfg) preloadingCfg.getCacheLoaderManagerConfig().getFirstCacheLoaderConfig()) .setStoreName("preloadingCache"); cm.defineConfiguration("preloadingCache", preloadingCfg); Cache preloadingCache = cm.getCache("preloadingCache"); CacheStore preloadingStore = TestingUtil.extractComponent(preloadingCache, CacheLoaderManager.class).getCacheStore(); assert preloadingCache.getConfiguration().getCacheLoaderManagerConfig().isPreload(); assertNotInCacheAndStore(preloadingCache, preloadingStore, "k1", "k2", "k3", "k4"); preloadingCache.put("k1", "v1"); preloadingCache.put("k2", "v2", lifespan, MILLISECONDS); preloadingCache.put("k3", "v3"); preloadingCache.put("k4", "v4", lifespan, MILLISECONDS); for (int i = 1; i < 5; i++) { if (i % 2 == 1) assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i); else assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i, lifespan); } DataContainer c = preloadingCache.getAdvancedCache().getDataContainer(); assert c.size(null) == 4; preloadingCache.stop(); assert c.size(null) == 0; preloadingCache.start(); assert preloadingCache.getConfiguration().getCacheLoaderManagerConfig().isPreload(); c = preloadingCache.getAdvancedCache().getDataContainer(); assert c.size(null) == 4; for (int i = 1; i < 5; i++) { if (i % 2 == 1) assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i); else assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i, lifespan); } }
private void commitClearCommand( DataContainer<Object, Object> dataContainer, ClearCacheEntry<Object, Object> cacheEntry, InvocationContext context, FlagAffectedCommand command) { List<InternalCacheEntry<Object, Object>> copyEntries = new ArrayList<>(dataContainer.entrySet()); cacheEntry.commit(dataContainer, null); for (InternalCacheEntry entry : copyEntries) { notifier.notifyCacheEntryRemoved( entry.getKey(), entry.getValue(), entry.getMetadata(), false, context, command); } }
public void testEntrySetWithEvictedEntriesAndFlags() { final int numKeys = 300; for (int i = 0; i < numKeys; i++) { cache.put(i, i); } AdvancedCache<Object, Object> flagCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD); DataContainer dc = flagCache.getDataContainer(); assertFalse("Data Container should not have all keys", numKeys == dc.size()); Set<Map.Entry<Object, Object>> entrySet = flagCache.entrySet(); assertEquals(dc.size(), entrySet.size()); Set<InternalCacheEntry> entries = dc.entrySet(); Map<Object, Object> map = new HashMap<Object, Object>(entrySet.size()); for (Map.Entry<Object, Object> entry : entrySet) { map.put(entry.getKey(), entry.getValue()); } for (InternalCacheEntry entry : entries) { assertEquals("Key/Value mismatch!", entry.getValue(), map.get(entry.getKey())); } }
/** * Attempts to the L1 update and set the value. If the L1 update was marked as being skipped this * will instead just set the value to release blockers. A null value can be provided which will * not run the L1 update but will just alert other waiters that a null was given. */ public void runL1UpdateIfPossible(InternalCacheEntry ice) { Object value = null; try { if (ice != null) { value = ice.getValue(); Object key; if (sync.attemptUpdateToRunning() && !dc.containsKey((key = ice.getKey()))) { // Acquire the transfer lock to ensure that we don't have a rehash and change to become an // owner, // note we check the ownership in following if stateTransferLock.acquireSharedTopologyLock(); try { // Now we can update the L1 if there isn't a value already there and we haven't now // become a write // owner if (!dc.containsKey(key) && !cdl.localNodeIsOwner(key)) { log.tracef("Caching remotely retrieved entry for key %s in L1", key); long lifespan = ice.getLifespan() < 0 ? l1Lifespan : Math.min(ice.getLifespan(), l1Lifespan); // Make a copy of the metadata stored internally, adjust // lifespan/maxIdle settings and send them a modification Metadata newMetadata = ice.getMetadata().builder().lifespan(lifespan).maxIdle(-1).build(); dc.put(key, ice.getValue(), newMetadata); } else { log.tracef("Data container contained value after rehash for key %s", key); } } finally { stateTransferLock.releaseSharedTopologyLock(); } } } } finally { sync.innerSet(value); } }
/** * Tests whether a key is in the L1 cache if L1 is enabled. * * @param key key to check for * @return true if the key is not in L1, or L1 caching is not enabled. false the key is in L1. */ private boolean isNotInL1(Object key) { return !isL1CacheEnabled || !dataContainer.containsKey(key); }
private void invalidateSegments(Set<Integer> newSegments, Set<Integer> segmentsToL1) { // The actual owners keep track of the nodes that hold a key in L1 ("requestors") and // they invalidate the key on every requestor after a change. // But this information is only present on the owners where the ClusteredGetKeyValueCommand // got executed - if the requestor only contacted one owner, and that node is no longer an owner // (perhaps because it left the cluster), the other owners will not know to invalidate the key // on that requestor. Furthermore, the requestors list is not copied to the new owners during // state transfers. // To compensate for this, we delete all L1 entries in segments that changed ownership during // this topology update. We can't actually differentiate between L1 entries and regular entries, // so we delete all entries that don't belong to this node in the current OR previous topology. Set<Object> keysToL1 = new HashSet<Object>(); Set<Object> keysToRemove = new HashSet<Object>(); // gather all keys from data container that belong to the segments that are being removed/moved // to L1 for (InternalCacheEntry ice : dataContainer) { Object key = ice.getKey(); int keySegment = getSegment(key); if (segmentsToL1.contains(keySegment)) { keysToL1.add(key); } else if (!newSegments.contains(keySegment)) { keysToRemove.add(key); } } // gather all keys from cache store that belong to the segments that are being removed/moved to // L1 CacheStore cacheStore = getCacheStore(); if (cacheStore != null) { // todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys // (ie. keys should belong to desired segments) try { Set<Object> storedKeys = cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer)); for (Object key : storedKeys) { int keySegment = getSegment(key); if (segmentsToL1.contains(keySegment)) { keysToL1.add(key); } else if (!newSegments.contains(keySegment)) { keysToRemove.add(key); } } } catch (CacheLoaderException e) { log.failedLoadingKeysFromCacheStore(e); } } if (configuration.clustering().l1().onRehash()) { log.debugf("Moving to L1 state for segments %s of cache %s", segmentsToL1, cacheName); } else { log.debugf("Removing state for segments %s of cache %s", segmentsToL1, cacheName); } if (!keysToL1.isEmpty()) { try { InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateFromL1Command( true, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToL1); InvocationContext ctx = icc.createNonTxInvocationContext(); interceptorChain.invoke(ctx, invalidateCmd); log.debugf( "Invalidated %d keys, data container now has %d keys", keysToL1.size(), dataContainer.size()); if (trace) log.tracef("Invalidated keys: %s", keysToL1); } catch (CacheException e) { log.failedToInvalidateKeys(e); } } log.debugf( "Removing L1 state for segments not in %s or %s for cache %s", newSegments, segmentsToL1, cacheName); if (!keysToRemove.isEmpty()) { try { InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateFromL1Command( false, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToRemove); InvocationContext ctx = icc.createNonTxInvocationContext(); interceptorChain.invoke(ctx, invalidateCmd); log.debugf( "Invalidated %d keys, data container of cache %s now has %d keys", keysToRemove.size(), cacheName, dataContainer.size()); if (trace) log.tracef("Invalidated keys: %s", keysToRemove); } catch (CacheException e) { log.failedToInvalidateKeys(e); } } // todo [anistor] call CacheNotifier.notifyDataRehashed }
@SuppressWarnings("UnusedParameters") protected EntryVersion getEntryVersion(InvocationContext ctx, Object key) { CacheEntry cacheEntry = dataContainer.get(key); return (cacheEntry != null) ? cacheEntry.getVersion() : null; }