private void invalidateSegments(Set<Integer> newSegments, Set<Integer> segmentsToL1) { // The actual owners keep track of the nodes that hold a key in L1 ("requestors") and // they invalidate the key on every requestor after a change. // But this information is only present on the owners where the ClusteredGetKeyValueCommand // got executed - if the requestor only contacted one owner, and that node is no longer an owner // (perhaps because it left the cluster), the other owners will not know to invalidate the key // on that requestor. Furthermore, the requestors list is not copied to the new owners during // state transfers. // To compensate for this, we delete all L1 entries in segments that changed ownership during // this topology update. We can't actually differentiate between L1 entries and regular entries, // so we delete all entries that don't belong to this node in the current OR previous topology. Set<Object> keysToL1 = new HashSet<Object>(); Set<Object> keysToRemove = new HashSet<Object>(); // gather all keys from data container that belong to the segments that are being removed/moved // to L1 for (InternalCacheEntry ice : dataContainer) { Object key = ice.getKey(); int keySegment = getSegment(key); if (segmentsToL1.contains(keySegment)) { keysToL1.add(key); } else if (!newSegments.contains(keySegment)) { keysToRemove.add(key); } } // gather all keys from cache store that belong to the segments that are being removed/moved to // L1 CacheStore cacheStore = getCacheStore(); if (cacheStore != null) { // todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys // (ie. keys should belong to desired segments) try { Set<Object> storedKeys = cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer)); for (Object key : storedKeys) { int keySegment = getSegment(key); if (segmentsToL1.contains(keySegment)) { keysToL1.add(key); } else if (!newSegments.contains(keySegment)) { keysToRemove.add(key); } } } catch (CacheLoaderException e) { log.failedLoadingKeysFromCacheStore(e); } } if (configuration.clustering().l1().onRehash()) { log.debugf("Moving to L1 state for segments %s of cache %s", segmentsToL1, cacheName); } else { log.debugf("Removing state for segments %s of cache %s", segmentsToL1, cacheName); } if (!keysToL1.isEmpty()) { try { InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateFromL1Command( true, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToL1); InvocationContext ctx = icc.createNonTxInvocationContext(); interceptorChain.invoke(ctx, invalidateCmd); log.debugf( "Invalidated %d keys, data container now has %d keys", keysToL1.size(), dataContainer.size()); if (trace) log.tracef("Invalidated keys: %s", keysToL1); } catch (CacheException e) { log.failedToInvalidateKeys(e); } } log.debugf( "Removing L1 state for segments not in %s or %s for cache %s", newSegments, segmentsToL1, cacheName); if (!keysToRemove.isEmpty()) { try { InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateFromL1Command( false, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToRemove); InvocationContext ctx = icc.createNonTxInvocationContext(); interceptorChain.invoke(ctx, invalidateCmd); log.debugf( "Invalidated %d keys, data container of cache %s now has %d keys", keysToRemove.size(), cacheName, dataContainer.size()); if (trace) log.tracef("Invalidated keys: %s", keysToRemove); } catch (CacheException e) { log.failedToInvalidateKeys(e); } } // todo [anistor] call CacheNotifier.notifyDataRehashed }
private void doApplyState( Address sender, int segmentId, Collection<InternalCacheEntry> cacheEntries) { log.debugf( "Applying new state for segment %d of cache %s from node %s: received %d cache entries", segmentId, cacheName, sender, cacheEntries.size()); if (trace) { List<Object> keys = new ArrayList<Object>(cacheEntries.size()); for (InternalCacheEntry e : cacheEntries) { keys.add(e.getKey()); } log.tracef( "Received keys %s for segment %d of cache %s from node %s", keys, segmentId, cacheName, sender); } // CACHE_MODE_LOCAL avoids handling by StateTransferInterceptor and any potential locks in // StateTransferLock EnumSet<Flag> flags = EnumSet.of( PUT_FOR_STATE_TRANSFER, CACHE_MODE_LOCAL, IGNORE_RETURN_VALUES, SKIP_REMOTE_LOOKUP, SKIP_SHARED_CACHE_STORE, SKIP_OWNERSHIP_CHECK, SKIP_XSITE_BACKUP); for (InternalCacheEntry e : cacheEntries) { try { InvocationContext ctx; if (transactionManager != null) { // cache is transactional transactionManager.begin(); Transaction transaction = transactionManager.getTransaction(); ctx = icc.createInvocationContext(transaction); ((TxInvocationContext) ctx).setImplicitTransaction(true); } else { // non-tx cache ctx = icc.createSingleKeyNonTxInvocationContext(); } PutKeyValueCommand put = useVersionedPut ? commandsFactory.buildVersionedPutKeyValueCommand( e.getKey(), e.getValue(), e.getLifespan(), e.getMaxIdle(), e.getVersion(), flags) : commandsFactory.buildPutKeyValueCommand( e.getKey(), e.getValue(), e.getLifespan(), e.getMaxIdle(), flags); boolean success = false; try { interceptorChain.invoke(ctx, put); success = true; } finally { if (ctx.isInTxScope()) { if (success) { ((LocalTransaction) ((TxInvocationContext) ctx).getCacheTransaction()) .setFromStateTransfer(true); try { transactionManager.commit(); } catch (Throwable ex) { log.errorf( ex, "Could not commit transaction created by state transfer of key %s", e.getKey()); if (transactionManager.getTransaction() != null) { transactionManager.rollback(); } } } else { transactionManager.rollback(); } } } } catch (Exception ex) { log.problemApplyingStateForKey(ex.getMessage(), e.getKey(), ex); } } log.debugf("Finished applying state for segment %d of cache %s", segmentId, cacheName); }