@Override public final void store(InternalCacheEntry ed) throws CacheLoaderException { if (trace) { log.tracef("store(%s)", ed); } if (ed == null) { return; } if (ed.canExpire() && ed.isExpired(System.currentTimeMillis())) { if (containsKey(ed.getKey())) { if (trace) { log.tracef("Entry %s is expired! Removing!", ed); } remove(ed.getKey()); } else { if (trace) { log.tracef("Entry %s is expired! Not doing anything.", ed); } } return; } L keyHashCode = getLockFromKey(ed.getKey()); lockForWriting(keyHashCode); try { storeLockSafe(ed, keyHashCode); } finally { unlock(keyHashCode); } if (trace) { log.tracef("exit store(%s)", ed); } }
public void compact() { for (InternalCacheEntry e : dataContainer) { if (e.getKey() instanceof MarshalledValue) { ((MarshalledValue) e.getKey()).compact(true, true); } if (e.getValue() instanceof MarshalledValue) { ((MarshalledValue) e.getValue()).compact(true, true); } } }
@Override public void notifyCacheEntriesEvicted( Collection<InternalCacheEntry<? extends K, ? extends V>> entries, InvocationContext ctx, FlagAffectedCommand command) { if (!entries.isEmpty()) { if (isNotificationAllowed(command, cacheEntriesEvictedListeners)) { EventImpl<K, V> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED); Map<K, V> evictedKeysAndValues = transformCollectionToMap( entries, new InfinispanCollections.MapMakerFunction< K, V, InternalCacheEntry<? extends K, ? extends V>>() { @Override public Map.Entry<K, V> transform( final InternalCacheEntry<? extends K, ? extends V> input) { return new Map.Entry<K, V>() { @Override public K getKey() { return input.getKey(); } @Override public V getValue() { return input.getValue(); } @Override public V setValue(V value) { throw new UnsupportedOperationException(); } }; } }); e.setEntries(evictedKeysAndValues); for (CacheEntryListenerInvocation<K, V> listener : cacheEntriesEvictedListeners) listener.invoke(e); } // For backward compat if (isNotificationAllowed(command, cacheEntryEvictedListeners)) { for (InternalCacheEntry<? extends K, ? extends V> ice : entries) { EventImpl<K, V> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED); e.setKey(ice.getKey()); e.setValue(ice.getValue()); boolean isLocalNodePrimaryOwner = clusteringDependentLogic.localNodeIsPrimaryOwner(ice.getKey()); for (CacheEntryListenerInvocation<K, V> listener : cacheEntryEvictedListeners) listener.invoke(e, isLocalNodePrimaryOwner); } } } }
@Override public void put(K k, V v, Metadata metadata) { boolean l1Entry = false; if (metadata instanceof L1Metadata) { metadata = ((L1Metadata) metadata).metadata(); l1Entry = true; } InternalCacheEntry<K, V> e = entries.get(k); if (trace) { log.tracef( "Creating new ICE for writing. Existing=%s, metadata=%s, new value=%s", e, metadata, toStr(v)); } final InternalCacheEntry<K, V> copy; if (l1Entry) { copy = entryFactory.createL1(k, v, metadata); } else if (e != null) { copy = entryFactory.update(e, v, metadata); } else { // this is a brand-new entry copy = entryFactory.create(k, v, metadata); } if (trace) log.tracef("Store %s in container", copy); entries.compute( copy.getKey(), (key, entry) -> { activator.onUpdate(key, entry == null); return copy; }); }
@Override protected String toString(InternalCacheEntry ice) { if (ice == null) return null; StringBuilder sb = new StringBuilder(256); sb.append(ice.getClass().getSimpleName()); sb.append("[key=").append(ice.getKey()).append(", value=").append(ice.getValue()); sb.append(", created=").append(ice.getCreated()).append(", isCreated=").append(ice.isCreated()); sb.append(", lastUsed=") .append(ice.getLastUsed()) .append(", isChanged=") .append(ice.isChanged()); sb.append(", expires=") .append(ice.getExpiryTime()) .append(", isExpired=") .append(ice.isExpired(System.currentTimeMillis())); sb.append(", canExpire=") .append(ice.canExpire()) .append(", isEvicted=") .append(ice.isEvicted()); sb.append(", isRemoved=").append(ice.isRemoved()).append(", isValid=").append(ice.isValid()); sb.append(", lifespan=") .append(ice.getLifespan()) .append(", maxIdle=") .append(ice.getMaxIdle()); return sb.append(']').toString(); }
private void testStoredEntry( InternalCacheEntry entry, Object expectedValue, long expectedLifespan, String src, Object key) { assert entry != null : src + " entry for key " + key + " should NOT be null"; assert entry.getValue().equals(expectedValue) : src + " should contain value " + expectedValue + " under key " + entry.getKey() + " but was " + entry.getValue() + ". Entry is " + entry; assert entry.getLifespan() == expectedLifespan : src + " expected lifespan for key " + key + " to be " + expectedLifespan + " but was " + entry.getLifespan() + ". Entry is " + entry; }
private Object realRemoteGet( InvocationContext ctx, Object key, boolean storeInL1, boolean isWrite) throws Throwable { if (trace) log.tracef("Doing a remote get for key %s", key); boolean acquireRemoteLock = false; if (ctx.isInTxScope()) { TxInvocationContext txContext = (TxInvocationContext) ctx; acquireRemoteLock = isWrite && isPessimisticCache && !txContext.getAffectedKeys().contains(key); } // attempt a remote lookup InternalCacheEntry ice = dm.retrieveFromRemoteSource(key, ctx, acquireRemoteLock); if (acquireRemoteLock) { ((TxInvocationContext) ctx).addAffectedKey(key); } if (ice != null) { if (storeInL1) { if (isL1CacheEnabled) { if (trace) log.tracef("Caching remotely retrieved entry for key %s in L1", key); // This should be fail-safe try { long lifespan = ice.getLifespan() < 0 ? configuration.getL1Lifespan() : Math.min(ice.getLifespan(), configuration.getL1Lifespan()); PutKeyValueCommand put = cf.buildPutKeyValueCommand( ice.getKey(), ice.getValue(), lifespan, -1, ctx.getFlags()); lockAndWrap(ctx, key, ice); invokeNextInterceptor(ctx, put); } catch (Exception e) { // Couldn't store in L1 for some reason. But don't fail the transaction! log.infof("Unable to store entry %s in L1 cache", key); log.debug("Inability to store in L1 caused by", e); } } else { CacheEntry ce = ctx.lookupEntry(key); if (ce == null || ce.isNull() || ce.isLockPlaceholder() || ce.getValue() == null) { if (ce != null && ce.isChanged()) { ce.setValue(ice.getValue()); } else { if (isWrite) lockAndWrap(ctx, key, ice); else ctx.putLookedUpEntry(key, ice); } } } } else { if (trace) log.tracef("Not caching remotely retrieved entry for key %s in L1", key); } return ice.getValue(); } return null; }
private void commitClearCommand( DataContainer<Object, Object> dataContainer, ClearCacheEntry<Object, Object> cacheEntry, InvocationContext context, FlagAffectedCommand command) { List<InternalCacheEntry<Object, Object>> copyEntries = new ArrayList<>(dataContainer.entrySet()); cacheEntry.commit(dataContainer, null); for (InternalCacheEntry entry : copyEntries) { notifier.notifyCacheEntryRemoved( entry.getKey(), entry.getValue(), entry.getMetadata(), false, context, command); } }
private MVCCEntry wrapEntry(InvocationContext ctx, Object key) { CacheEntry cacheEntry = getFromContext(ctx, key); MVCCEntry mvccEntry = null; if (cacheEntry != null) { mvccEntry = wrapMvccEntryForPut(ctx, key, cacheEntry); } else { InternalCacheEntry ice = getFromContainer(key); if (ice != null) { mvccEntry = wrapInternalCacheEntryForPut(ctx, ice.getKey(), ice); } } if (mvccEntry != null) mvccEntry.copyForUpdate(container, localModeWriteSkewCheck); return mvccEntry; }
public Object getValueAt(int rowIndex, int columnIndex) { if (data.size() > rowIndex) { InternalCacheEntry e = data.get(rowIndex); switch (columnIndex) { case 0: return e.getKey(); case 1: return e.getValue(); case 2: return e.getLifespan(); case 3: return e.getMaxIdle(); } } return "NULL!"; }
public void testIdleExpiryInPut() throws InterruptedException { Cache<String, String> cache = cm.getCache(); long idleTime = IDLE_TIMEOUT; cache.put("k", "v", -1, MILLISECONDS, idleTime, MILLISECONDS); DataContainer dc = cache.getAdvancedCache().getDataContainer(); InternalCacheEntry se = dc.get("k", null); assert se.getKey().equals("k"); assert se.getValue().equals("v"); assert se.getLifespan() == -1; assert se.getMaxIdle() == idleTime; assert !se.isExpired(); assert cache.get("k").equals("v"); Thread.sleep(idleTime + 100); assert se.isExpired(); assert cache.get("k") == null; }
@Override public void notifyCacheEntriesEvicted( Collection<InternalCacheEntry> entries, InvocationContext ctx, FlagAffectedCommand command) { if (!entries.isEmpty()) { if (isNotificationAllowed(command, cacheEntriesEvictedListeners)) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED); Map<Object, Object> evictedKeysAndValues = transformCollectionToMap( entries, new InfinispanCollections.MapMakerFunction<Object, Object, InternalCacheEntry>() { @Override public Map.Entry<Object, Object> transform(final InternalCacheEntry input) { return new Map.Entry<Object, Object>() { @Override public Object getKey() { return input.getKey(); } @Override public Object getValue() { return input.getValue(); } @Override public Object setValue(Object value) { throw new UnsupportedOperationException(); } }; } }); e.setEntries(evictedKeysAndValues); for (ListenerInvocation listener : cacheEntriesEvictedListeners) listener.invoke(e); } // For backward compat if (isNotificationAllowed(command, cacheEntryEvictedListeners)) { for (InternalCacheEntry ice : entries) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED); e.setKey(ice.getKey()); e.setValue(ice.getValue()); for (ListenerInvocation listener : cacheEntryEvictedListeners) listener.invoke(e); } } } }
public void testContainerIteration() { dc.put("k1", "v", 6000000, -1); dc.put("k2", "v", -1, -1); dc.put("k3", "v", -1, 6000000); dc.put("k4", "v", 6000000, 6000000); Set expected = new HashSet(); expected.add("k1"); expected.add("k2"); expected.add("k3"); expected.add("k4"); for (InternalCacheEntry ice : dc) { assert expected.remove(ice.getKey()); } assert expected.isEmpty() : "Did not see keys " + expected + " in iterator!"; }
public void testGetsFromMultipleSrcs() throws Exception { assert cs.load("k1") == null; assert cs.load("k2") == null; assert cs.load("k3") == null; assert cs.load("k4") == null; // k1 is on store1 store1.store(InternalEntryFactory.create("k1", "v1")); assertEquals(cs.loadAll().size(), 1); // k2 is on store2 store2.store(InternalEntryFactory.create("k2", "v2")); assertEquals(cs.loadAll().size(), 2); // k3 is on both store1.store(InternalEntryFactory.create("k3", "v3")); assertEquals(cs.loadAll().size(), 3); store2.store(InternalEntryFactory.create("k3", "v3")); assertEquals(cs.loadAll().size(), 3); // k4 is on neither assert cs.load("k1").getValue().equals("v1"); assert cs.load("k2").getValue().equals("v2"); assert cs.load("k3").getValue().equals("v3"); assert cs.load("k4") == null; Set<InternalCacheEntry> all = cs.loadAll(); assertEquals(all.size(), 3); Set<Object> expectedKeys = new HashSet<Object>(); expectedKeys.add("k1"); expectedKeys.add("k2"); expectedKeys.add("k3"); for (InternalCacheEntry a : all) assert expectedKeys.remove(a.getKey()); assert expectedKeys.isEmpty(); cs.remove("k3"); assert !store1.containsKey("k3"); assert !store2.containsKey("k3"); }
private void store0( InternalCacheEntry entry, Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap) throws IOException, UnsupportedKeyTypeException { Object key = entry.getKey(); if (trace) log.tracef("store(\"%s\") ", key); String cassandraKey = hashKey(key); try { addMutation( mutationMap, ByteBufferUtil.bytes(cassandraKey), config.entryColumnFamily, ByteBuffer.wrap(entryColumnPath.getColumn()), ByteBuffer.wrap(marshall(entry))); if (entry.canExpire()) { addExpiryEntry(cassandraKey, entry.getExpiryTime(), mutationMap); } } catch (InterruptedException ie) { if (trace) log.trace("Interrupted while trying to marshall entry"); Thread.currentThread().interrupt(); } }
public void testEntrySetWithEvictedEntriesAndFlags() { final int numKeys = 300; for (int i = 0; i < numKeys; i++) { cache.put(i, i); } AdvancedCache<Object, Object> flagCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD); DataContainer dc = flagCache.getDataContainer(); assertFalse("Data Container should not have all keys", numKeys == dc.size()); Set<Map.Entry<Object, Object>> entrySet = flagCache.entrySet(); assertEquals(dc.size(), entrySet.size()); Set<InternalCacheEntry> entries = dc.entrySet(); Map<Object, Object> map = new HashMap<Object, Object>(entrySet.size()); for (Map.Entry<Object, Object> entry : entrySet) { map.put(entry.getKey(), entry.getValue()); } for (InternalCacheEntry entry : entries) { assertEquals("Key/Value mismatch!", entry.getValue(), map.get(entry.getKey())); } }
/** * Attempts to the L1 update and set the value. If the L1 update was marked as being skipped this * will instead just set the value to release blockers. A null value can be provided which will * not run the L1 update but will just alert other waiters that a null was given. */ public void runL1UpdateIfPossible(InternalCacheEntry ice) { Object value = null; try { if (ice != null) { value = ice.getValue(); Object key; if (sync.attemptUpdateToRunning() && !dc.containsKey((key = ice.getKey()))) { // Acquire the transfer lock to ensure that we don't have a rehash and change to become an // owner, // note we check the ownership in following if stateTransferLock.acquireSharedTopologyLock(); try { // Now we can update the L1 if there isn't a value already there and we haven't now // become a write // owner if (!dc.containsKey(key) && !cdl.localNodeIsOwner(key)) { log.tracef("Caching remotely retrieved entry for key %s in L1", key); long lifespan = ice.getLifespan() < 0 ? l1Lifespan : Math.min(ice.getLifespan(), l1Lifespan); // Make a copy of the metadata stored internally, adjust // lifespan/maxIdle settings and send them a modification Metadata newMetadata = ice.getMetadata().builder().lifespan(lifespan).maxIdle(-1).build(); dc.put(key, ice.getValue(), newMetadata); } else { log.tracef("Data container contained value after rehash for key %s", key); } } finally { stateTransferLock.releaseSharedTopologyLock(); } } } } finally { sync.innerSet(value); } }
private void doApplyState( Address sender, int segmentId, Collection<InternalCacheEntry> cacheEntries) { log.debugf( "Applying new state for segment %d of cache %s from node %s: received %d cache entries", segmentId, cacheName, sender, cacheEntries.size()); if (trace) { List<Object> keys = new ArrayList<Object>(cacheEntries.size()); for (InternalCacheEntry e : cacheEntries) { keys.add(e.getKey()); } log.tracef( "Received keys %s for segment %d of cache %s from node %s", keys, segmentId, cacheName, sender); } // CACHE_MODE_LOCAL avoids handling by StateTransferInterceptor and any potential locks in // StateTransferLock EnumSet<Flag> flags = EnumSet.of( PUT_FOR_STATE_TRANSFER, CACHE_MODE_LOCAL, IGNORE_RETURN_VALUES, SKIP_REMOTE_LOOKUP, SKIP_SHARED_CACHE_STORE, SKIP_OWNERSHIP_CHECK, SKIP_XSITE_BACKUP); for (InternalCacheEntry e : cacheEntries) { try { InvocationContext ctx; if (transactionManager != null) { // cache is transactional transactionManager.begin(); Transaction transaction = transactionManager.getTransaction(); ctx = icc.createInvocationContext(transaction); ((TxInvocationContext) ctx).setImplicitTransaction(true); } else { // non-tx cache ctx = icc.createSingleKeyNonTxInvocationContext(); } PutKeyValueCommand put = useVersionedPut ? commandsFactory.buildVersionedPutKeyValueCommand( e.getKey(), e.getValue(), e.getLifespan(), e.getMaxIdle(), e.getVersion(), flags) : commandsFactory.buildPutKeyValueCommand( e.getKey(), e.getValue(), e.getLifespan(), e.getMaxIdle(), flags); boolean success = false; try { interceptorChain.invoke(ctx, put); success = true; } finally { if (ctx.isInTxScope()) { if (success) { ((LocalTransaction) ((TxInvocationContext) ctx).getCacheTransaction()) .setFromStateTransfer(true); try { transactionManager.commit(); } catch (Throwable ex) { log.errorf( ex, "Could not commit transaction created by state transfer of key %s", e.getKey()); if (transactionManager.getTransaction() != null) { transactionManager.rollback(); } } } else { transactionManager.rollback(); } } } } catch (Exception ex) { log.problemApplyingStateForKey(ex.getMessage(), e.getKey(), ex); } } log.debugf("Finished applying state for segment %d of cache %s", segmentId, cacheName); }
private void invalidateSegments(Set<Integer> newSegments, Set<Integer> segmentsToL1) { // The actual owners keep track of the nodes that hold a key in L1 ("requestors") and // they invalidate the key on every requestor after a change. // But this information is only present on the owners where the ClusteredGetKeyValueCommand // got executed - if the requestor only contacted one owner, and that node is no longer an owner // (perhaps because it left the cluster), the other owners will not know to invalidate the key // on that requestor. Furthermore, the requestors list is not copied to the new owners during // state transfers. // To compensate for this, we delete all L1 entries in segments that changed ownership during // this topology update. We can't actually differentiate between L1 entries and regular entries, // so we delete all entries that don't belong to this node in the current OR previous topology. Set<Object> keysToL1 = new HashSet<Object>(); Set<Object> keysToRemove = new HashSet<Object>(); // gather all keys from data container that belong to the segments that are being removed/moved // to L1 for (InternalCacheEntry ice : dataContainer) { Object key = ice.getKey(); int keySegment = getSegment(key); if (segmentsToL1.contains(keySegment)) { keysToL1.add(key); } else if (!newSegments.contains(keySegment)) { keysToRemove.add(key); } } // gather all keys from cache store that belong to the segments that are being removed/moved to // L1 CacheStore cacheStore = getCacheStore(); if (cacheStore != null) { // todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys // (ie. keys should belong to desired segments) try { Set<Object> storedKeys = cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer)); for (Object key : storedKeys) { int keySegment = getSegment(key); if (segmentsToL1.contains(keySegment)) { keysToL1.add(key); } else if (!newSegments.contains(keySegment)) { keysToRemove.add(key); } } } catch (CacheLoaderException e) { log.failedLoadingKeysFromCacheStore(e); } } if (configuration.clustering().l1().onRehash()) { log.debugf("Moving to L1 state for segments %s of cache %s", segmentsToL1, cacheName); } else { log.debugf("Removing state for segments %s of cache %s", segmentsToL1, cacheName); } if (!keysToL1.isEmpty()) { try { InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateFromL1Command( true, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToL1); InvocationContext ctx = icc.createNonTxInvocationContext(); interceptorChain.invoke(ctx, invalidateCmd); log.debugf( "Invalidated %d keys, data container now has %d keys", keysToL1.size(), dataContainer.size()); if (trace) log.tracef("Invalidated keys: %s", keysToL1); } catch (CacheException e) { log.failedToInvalidateKeys(e); } } log.debugf( "Removing L1 state for segments not in %s or %s for cache %s", newSegments, segmentsToL1, cacheName); if (!keysToRemove.isEmpty()) { try { InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateFromL1Command( false, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToRemove); InvocationContext ctx = icc.createNonTxInvocationContext(); interceptorChain.invoke(ctx, invalidateCmd); log.debugf( "Invalidated %d keys, data container of cache %s now has %d keys", keysToRemove.size(), cacheName, dataContainer.size()); if (trace) log.tracef("Invalidated keys: %s", keysToRemove); } catch (CacheException e) { log.failedToInvalidateKeys(e); } } // todo [anistor] call CacheNotifier.notifyDataRehashed }