/** Clears swap entries for evicted partition. */ private void clearSwap() { assert state() == EVICTED; assert !GridQueryProcessor.isEnabled(cctx.config()) : "Indexing needs to have unswapped values."; try { GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> it = cctx.swap().iterator(id); boolean isLocStore = cctx.store().isLocal(); if (it != null) { // We can safely remove these values because no entries will be created for evicted // partition. while (it.hasNext()) { Map.Entry<byte[], GridCacheSwapEntry> entry = it.next(); byte[] keyBytes = entry.getKey(); KeyCacheObject key = cctx.toCacheKeyObject(keyBytes); cctx.swap().remove(key); if (isLocStore) cctx.store().remove(null, key.value(cctx.cacheObjectContext(), false)); } } } catch (IgniteCheckedException e) { U.error(log, "Failed to clear swap for evicted partition: " + this, e); } }
/** * @param key Key. * @param val Value. * @param ver Cache version. * @param p Optional predicate. * @param topVer Topology version. * @param replicate Replication flag. * @param plc Expiry policy. */ private void loadEntry( KeyCacheObject key, Object val, GridCacheVersion ver, @Nullable IgniteBiPredicate<K, V> p, AffinityTopologyVersion topVer, boolean replicate, @Nullable ExpiryPolicy plc) { if (p != null && !p.apply(key.<K>value(ctx.cacheObjectContext(), false), (V) val)) return; try { GridDhtLocalPartition part = top.localPartition(ctx.affinity().partition(key), AffinityTopologyVersion.NONE, true); // Reserve to make sure that partition does not get unloaded. if (part.reserve()) { GridCacheEntryEx entry = null; try { long ttl = CU.ttlForLoad(plc); if (ttl == CU.TTL_ZERO) return; CacheObject cacheVal = ctx.toCacheObject(val); entry = entryEx(key, false); entry.initialValue( cacheVal, ver, ttl, CU.EXPIRE_TIME_CALCULATE, false, topVer, replicate ? DR_LOAD : DR_NONE, false); } catch (IgniteCheckedException e) { throw new IgniteException("Failed to put cache value: " + entry, e); } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Got removed entry during loadCache (will ignore): " + entry); } finally { if (entry != null) entry.context().evicts().touch(entry, topVer); part.release(); } } else if (log.isDebugEnabled()) log.debug("Will node load entry into cache (partition is invalid): " + part); } catch (GridDhtInvalidPartitionException e) { if (log.isDebugEnabled()) log.debug( "Ignoring entry for partition that does not belong [key=" + key + ", val=" + val + ", err=" + e + ']'); } }
/** * @param key Key. * @param topVer Topology version. * @param create Create flag. * @return Local partition. */ @Nullable private GridDhtLocalPartition localPartition( KeyCacheObject key, AffinityTopologyVersion topVer, boolean create) { int p = key.partition(); if (p == -1) p = ctx.affinity().partition(key); return ctx.topology().localPartition(p, topVer, create); }
/** * @param cctx Cache context. * @param ldr Class loader. * @throws IgniteCheckedException In case of error. */ void unmarshal(GridCacheContext cctx, @Nullable ClassLoader ldr) throws IgniteCheckedException { if (!isFiltered()) { key.finishUnmarshal(cctx.cacheObjectContext(), ldr); if (newVal != null) newVal.finishUnmarshal(cctx.cacheObjectContext(), ldr); if (oldVal != null) oldVal.finishUnmarshal(cctx.cacheObjectContext(), ldr); } }
/** * @param cctx Cache context. * @throws IgniteCheckedException In case of error. */ void prepareMarshal(GridCacheContext cctx) throws IgniteCheckedException { assert key != null; key.prepareMarshal(cctx.cacheObjectContext()); if (newVal != null) newVal.prepareMarshal(cctx.cacheObjectContext()); if (oldVal != null) oldVal.prepareMarshal(cctx.cacheObjectContext()); }
/** {@inheritDoc} */ @Override public boolean onDone(@Nullable Void res, @Nullable Throwable err) { if (super.onDone(res, err)) { cctx.mvcc().removeAtomicFuture(version()); if (err != null) { if (!mappings.isEmpty()) { Collection<KeyCacheObject> hndKeys = new ArrayList<>(keys.size()); exit: for (GridDhtAtomicUpdateRequest req : mappings.values()) { for (int i = 0; i < req.size(); i++) { KeyCacheObject key = req.key(i); if (!hndKeys.contains(key)) { updateRes.addFailedKey(key, err); cctx.continuousQueries() .skipUpdateEvent( key, req.partitionId(i), req.updateCounter(i), updateReq.topologyVersion()); hndKeys.add(key); if (hndKeys.size() == keys.size()) break exit; } } } } else for (KeyCacheObject key : keys) updateRes.addFailedKey(key, err); } else { Collection<KeyCacheObject> hndKeys = new ArrayList<>(keys.size()); exit: for (GridDhtAtomicUpdateRequest req : mappings.values()) { for (int i = 0; i < req.size(); i++) { KeyCacheObject key = req.key(i); if (!hndKeys.contains(key)) { try { cctx.continuousQueries() .onEntryUpdated( key, req.value(i), req.localPreviousValue(i), key.internal() || !cctx.userCache(), req.partitionId(i), true, false, req.updateCounter(i), updateReq.topologyVersion()); } catch (IgniteCheckedException e) { U.warn( log, "Failed to send continuous query message. [key=" + key + ", newVal=" + req.value(i) + ", err=" + e + "]"); } hndKeys.add(key); if (hndKeys.size() == keys.size()) break exit; } } } } if (updateReq.writeSynchronizationMode() == FULL_SYNC) completionCb.apply(updateReq, updateRes); return true; } return false; }
/** * @param key Key for which entry should be returned. * @return Cache entry. */ protected GridDistributedCacheEntry createEntry(KeyCacheObject key) { return new GridDhtDetachedCacheEntry(ctx, key, key.hashCode(), null, null, 0); }