/** * Starts multi-update lock. Will wait for topology future is ready. * * @return Topology version. * @throws IgniteCheckedException If failed. */ public AffinityTopologyVersion beginMultiUpdate() throws IgniteCheckedException { IgniteBiTuple<IgniteUuid, GridDhtTopologyFuture> tup = multiTxHolder.get(); if (tup != null) throw new IgniteCheckedException("Nested multi-update locks are not supported"); top.readLock(); GridDhtTopologyFuture topFut; AffinityTopologyVersion topVer; try { // While we are holding read lock, register lock future for partition release future. IgniteUuid lockId = IgniteUuid.fromUuid(ctx.localNodeId()); topVer = top.topologyVersion(); MultiUpdateFuture fut = new MultiUpdateFuture(topVer); MultiUpdateFuture old = multiTxFuts.putIfAbsent(lockId, fut); assert old == null; topFut = top.topologyVersionFuture(); multiTxHolder.set(F.t(lockId, topFut)); } finally { top.readUnlock(); } topFut.get(); return topVer; }
/** * @param key Key. * @param val Value. * @param ver Cache version. * @param p Optional predicate. * @param topVer Topology version. * @param replicate Replication flag. * @param plc Expiry policy. */ private void loadEntry( KeyCacheObject key, Object val, GridCacheVersion ver, @Nullable IgniteBiPredicate<K, V> p, AffinityTopologyVersion topVer, boolean replicate, @Nullable ExpiryPolicy plc) { if (p != null && !p.apply(key.<K>value(ctx.cacheObjectContext(), false), (V) val)) return; try { GridDhtLocalPartition part = top.localPartition(ctx.affinity().partition(key), AffinityTopologyVersion.NONE, true); // Reserve to make sure that partition does not get unloaded. if (part.reserve()) { GridCacheEntryEx entry = null; try { long ttl = CU.ttlForLoad(plc); if (ttl == CU.TTL_ZERO) return; CacheObject cacheVal = ctx.toCacheObject(val); entry = entryEx(key, false); entry.initialValue( cacheVal, ver, ttl, CU.EXPIRE_TIME_CALCULATE, false, topVer, replicate ? DR_LOAD : DR_NONE, false); } catch (IgniteCheckedException e) { throw new IgniteException("Failed to put cache value: " + entry, e); } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Got removed entry during loadCache (will ignore): " + entry); } finally { if (entry != null) entry.context().evicts().touch(entry, topVer); part.release(); } } else if (log.isDebugEnabled()) log.debug("Will node load entry into cache (partition is invalid): " + part); } catch (GridDhtInvalidPartitionException e) { if (log.isDebugEnabled()) log.debug( "Ignoring entry for partition that does not belong [key=" + key + ", val=" + val + ", err=" + e + ']'); } }
/** {@inheritDoc} */ @Override public void onReconnected() { super.onReconnected(); ctx.affinity().onReconnected(); top.onReconnected(); if (preldr != null) preldr.onReconnected(); }
/** * Ends multi-update lock. * * @throws IgniteCheckedException If failed. */ public void endMultiUpdate() throws IgniteCheckedException { IgniteBiTuple<IgniteUuid, GridDhtTopologyFuture> tup = multiTxHolder.get(); if (tup == null) throw new IgniteCheckedException("Multi-update was not started or released twice."); top.readLock(); try { IgniteUuid lockId = tup.get1(); MultiUpdateFuture multiFut = multiTxFuts.remove(lockId); multiTxHolder.set(null); // Finish future. multiFut.onDone(lockId); } finally { top.readUnlock(); } }
/** {@inheritDoc} */ @Override public void printMemoryStats() { super.printMemoryStats(); top.printMemoryStats(1024); }