/** * @param idx Index. * @return Conflict version. */ @Nullable public GridCacheVersion conflictVersion(int idx) { if (conflictVers != null) { assert idx >= 0 && idx < conflictVers.size(); return conflictVers.get(idx); } return null; }
private void spawnRareDeposit( final List<AbstractDepositEffectArea> depositAreas, final short casterLevel) { AbstractDepositEffectArea rareDeposit = null; for (int i = 0, n = depositAreas.size(); i < n; ++i) { final AbstractDepositEffectArea area = depositAreas.get(i); if (rareDeposit == null) { rareDeposit = area; } else if (area.getDepositLevel() > rareDeposit.getDepositLevel()) { rareDeposit = area; } } this.spawnDeposit((int) rareDeposit.getBaseId()); }
private void spawnDeposit( final List<AbstractDepositEffectArea> depositAreas, final short casterLevel) { final int roll = MathHelper.random(100); AbstractDepositEffectArea deposit = null; for (int i = 0, n = depositAreas.size(); i < n; ++i) { final AbstractDepositEffectArea area = depositAreas.get(i); if (deposit == null) { deposit = area; } else if ((roll < deposit.getDepositLevel() && roll > area.getDepositLevel()) || (roll > area.getDepositLevel() && area.getDepositLevel() > deposit.getDepositLevel())) { deposit = area; } } this.spawnDeposit((int) deposit.getBaseId()); }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** * @param nodeId Primary node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. * @throws GridDistributedLockCancelledException If lock has been cancelled. */ @SuppressWarnings({"RedundantTypeArguments"}) @Nullable public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req) throws GridException, GridDistributedLockCancelledException { List<byte[]> nearKeyBytes = req.nearKeyBytes(); GridNearTxRemote<K, V> tx = null; ClassLoader ldr = ctx.deploy().globalLoader(); if (ldr != null) { for (int i = 0; i < nearKeyBytes.size(); i++) { byte[] bytes = nearKeyBytes.get(i); if (bytes == null) continue; K key = req.nearKeys().get(i); Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i); if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key); GridNearCacheEntry<K, V> entry = null; while (true) { try { entry = peekExx(key); if (entry != null) { entry.keyBytes(bytes); // Handle implicit locks for pessimistic transactions. if (req.inTx()) { tx = ctx.tm().tx(req.version()); if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/); else { tx = new GridNearTxRemote<K, V>( nodeId, req.nearNodeId(), req.threadId(), req.version(), null, PESSIMISTIC, req.isolation(), req.isInvalidate(), req.timeout(), key, bytes, null, // Value. null, // Value bytes. ctx); if (tx.empty()) return tx; tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + req.version()); } } // Add remote candidate before reordering. entry.addRemote( req.nodeId(), nodeId, req.threadId(), req.version(), req.timeout(), tx != null && tx.ec(), tx != null, tx != null && tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( cands, req.version(), req.committedVersions(), req.rolledbackVersions()); entry.orderOwned(req.version(), req.owned(entry.key())); } // Double-check in case if sender node left the grid. if (ctx.discovery().node(req.nodeId()) == null) { if (log.isDebugEnabled()) log.debug("Node requesting lock left grid (lock request will be ignored): " + req); if (tx != null) tx.rollback(); return null; } // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug("Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class loader for message: " + req; U.warn(log, err); throw new GridException(err); } return tx; }
/** * Performs flush. * * @throws GridException If failed. */ private void doFlush() throws GridException { lastFlushTime = U.currentTimeMillis(); List<GridFuture> activeFuts0 = null; int doneCnt = 0; for (GridFuture<?> f : activeFuts) { if (!f.isDone()) { if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2)); activeFuts0.add(f); } else { f.get(); doneCnt++; } } if (activeFuts0 == null || activeFuts0.isEmpty()) return; while (true) { Queue<GridFuture<?>> q = null; for (Buffer buf : bufMappings.values()) { GridFuture<?> flushFut = buf.flush(); if (flushFut != null) { if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2); q.add(flushFut); } } if (q != null) { assert !q.isEmpty(); boolean err = false; for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) { try { fut.get(); } catch (GridException e) { if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e); err = true; } } if (err) // Remaps needed - flush buffers. continue; } doneCnt = 0; for (int i = 0; i < activeFuts0.size(); i++) { GridFuture f = activeFuts0.get(i); if (f == null) doneCnt++; else if (f.isDone()) { f.get(); doneCnt++; activeFuts0.set(i, null); } else break; } if (doneCnt == activeFuts0.size()) return; } }
/** @throws Exception If failed. */ public void testCreateFileFragmented() throws Exception { GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs"); GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer(); GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false); GridGgfsPath path = new GridGgfsPath("/file"); try { GridGgfs fs0 = grid(0).ggfs("ggfs"); GridGgfs fs1 = grid(1).ggfs("ggfs"); GridGgfs fs2 = grid(2).ggfs("ggfs"); try (GridGgfsOutputStream out = fs0.create( path, 128, false, 1, CFG_GRP_SIZE, F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) { // 1.5 blocks byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2]; Arrays.fill(data, (byte) 1); out.write(data); } try (GridGgfsOutputStream out = fs1.append(path, false)) { // 1.5 blocks. byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2]; Arrays.fill(data, (byte) 2); out.write(data); } // After this we should have first two block colocated with grid 0 and last block colocated // with grid 1. GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path); GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME); GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId()); GridGgfsFileMap map = fileInfo.fileMap(); List<GridGgfsFileAffinityRange> ranges = map.ranges(); assertEquals(2, ranges.size()); assertTrue(ranges.get(0).startOffset() == 0); assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1); assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE); assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1); // Validate data read after colocated writes. try (GridGgfsInputStream in = fs2.open(path)) { // Validate first part of file. for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read()); // Validate second part of file. for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read()); assertEquals(-1, in.read()); } } finally { GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true); boolean hasData = false; for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty(); assertTrue(hasData); fs.delete(path, true); } GridTestUtils.retryAssert( log, ASSERT_RETRIES, ASSERT_RETRY_INTERVAL, new CAX() { @Override public void applyx() { for (int i = 0; i < NODES_CNT; i++) assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty()); } }); }
/** * Processes lock request. * * @param nodeId Sender node ID. * @param msg Lock request. */ @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"}) private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) { assert !nodeId.equals(locNodeId); List<byte[]> keys = msg.keyBytes(); int cnt = keys.size(); GridReplicatedTxRemote<K, V> tx = null; GridDistributedLockResponse res; ClassLoader ldr = null; try { ldr = ctx.deploy().globalLoader(); if (ldr != null) { res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt); for (int i = 0; i < keys.size(); i++) { byte[] bytes = keys.get(i); K key = msg.keys().get(i); Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i); if (bytes == null) continue; if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key); GridDistributedCacheEntry<K, V> entry = null; while (true) { try { entry = entryexx(key); // Handle implicit locks for pessimistic transactions. if (msg.inTx()) { tx = ctx.tm().tx(msg.version()); if (tx != null) { if (msg.txRead()) tx.addRead(key, bytes); else tx.addWrite(key, bytes); } else { tx = new GridReplicatedTxRemote<K, V>( nodeId, msg.threadId(), msg.version(), null, PESSIMISTIC, msg.isolation(), msg.isInvalidate(), msg.timeout(), key, bytes, msg.txRead(), ctx); tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + msg.version()); } } // Add remote candidate before reordering. entry.addRemote( msg.nodeId(), null, msg.threadId(), msg.version(), msg.timeout(), tx != null && tx.ec(), tx != null, tx != null && tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions()); // Double-check in case if sender node left the grid. if (ctx.discovery().node(msg.nodeId()) == null) { if (log.isDebugEnabled()) log.debug( "Node requesting lock left grid (lock request will be ignored): " + msg); if (tx != null) tx.rollback(); return; } res.setCandidates( i, entry.localCandidates(), ctx.tm().committedVersions(msg.version()), ctx.tm().rolledbackVersions(msg.version())); res.addValueBytes( entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx); // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug( "Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class for message: " + msg; U.warn(log, err); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err)); } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Received lock request for completed transaction (will ignore): " + e); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e); } catch (GridException e) { String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg; log.error(err, e); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e)); if (tx != null) tx.rollback(); } catch (GridDistributedLockCancelledException ignored) { // Received lock request for cancelled lock. if (log.isDebugEnabled()) log.debug("Received lock request for canceled lock (will ignore): " + msg); if (tx != null) tx.rollback(); // Don't send response back. return; } GridNode node = ctx.discovery().node(msg.nodeId()); boolean releaseAll = false; if (node != null) { try { // Reply back to sender. ctx.io().send(node, res); } catch (GridException e) { U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e); releaseAll = ldr != null; } } // If sender left grid, release all locks acquired so far. else releaseAll = ldr != null; // Release all locks because sender node left grid. if (releaseAll) { for (K key : msg.keys()) { while (true) { GridDistributedCacheEntry<K, V> entry = peekexx(key); try { if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId()); break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock on removed entity during failure " + "of replicated lock request handling (will retry): " + entry); } } } U.warn( log, "Sender node left grid in the midst of lock acquisition (locks will be released)."); } }
/** * This method is called to map or split grid task into multiple grid jobs. This is the first * method that gets called when task execution starts. * * @param data Task execution argument. Can be {@code null}. This is the same argument as the one * passed into {@code Grid#execute(...)} methods. * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed * to be randomized by container. This ensures that every time you simply iterate through grid * nodes, the order of nodes will be random which over time should result into all nodes being * used equally. * @return Map of grid jobs assigned to subgrid node. Unless {@link * GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is * returned, exception will be thrown. * @throws GridException If mapping could not complete successfully. This exception will be thrown * out of {@link GridComputeTaskFuture#get()} method. */ @Override public Map<? extends GridComputeJob, GridNode> map( List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException { assert !subgrid.isEmpty(); // Give preference to wanted node. Otherwise, take the first one. GridNode targetNode = F.find( subgrid, subgrid.get(0), new GridPredicate<GridNode>() { @Override public boolean apply(GridNode e) { return preferredNode.equals(e.id()); } }); return Collections.singletonMap( new GridComputeJobAdapter() { @GridLoggerResource private GridLogger log; @GridInstanceResource private Grid grid; @Override public Object execute() throws GridException { log.info("Going to put data: " + data.size()); GridCache<Object, Object> cache = grid.cache(cacheName); assert cache != null; Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data); for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) { T2<Integer, Collection<Integer>> pair = entry.getValue(); Object affKey = pair.get1(); // Group lock partition. try (GridCacheTx tx = cache.txStartPartition( cache.affinity().partition(affKey), optimistic ? OPTIMISTIC : PESSIMISTIC, REPEATABLE_READ, 0, pair.get2().size())) { for (Integer val : pair.get2()) cache.put(val, val); tx.commit(); } } log.info("Finished put data: " + data.size()); return data; } /** * Groups values by partitions. * * @param data Data to put. * @return Grouped map. */ private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) { GridCache<Object, Object> cache = grid.cache(cacheName); Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>(); for (Integer val : data) { int part = cache.affinity().partition(val); T2<Integer, Collection<Integer>> tup = res.get(part); if (tup == null) { tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>()); res.put(part, tup); } tup.get2().add(val); } return res; } }, targetNode); }
/** * @param idx Index to get. * @return Write value - either value, or transform closure. */ public CacheObject writeValue(int idx) { if (vals != null) return vals.get(idx); return null; }
/** * @param idx Key index. * @return Value. */ @SuppressWarnings("unchecked") public CacheObject value(int idx) { assert op == UPDATE : op; return vals.get(idx); }
/** * @param updateSeq Update sequence. * @return Checks if any of the local partitions need to be evicted. */ private boolean checkEvictions(long updateSeq) { assert lock.isWriteLockedByCurrentThread(); boolean changed = false; UUID locId = cctx.nodeId(); for (GridDhtLocalPartition part : locParts.values()) { GridDhtPartitionState state = part.state(); if (state.active()) { int p = part.id(); List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); if (!affNodes.contains(cctx.localNode())) { Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING)); // If all affinity nodes are owners, then evict partition from local node. if (nodeIds.containsAll(F.nodeIds(affNodes))) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug("Evicted local partition (all affinity nodes are owners): " + part); } else { int ownerCnt = nodeIds.size(); int affCnt = affNodes.size(); if (ownerCnt > affCnt) { List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds)); // Sort by node orders in ascending order. Collections.sort(sorted, CU.nodeComparator(true)); int diff = sorted.size() - affCnt; for (int i = 0; i < diff; i++) { ClusterNode n = sorted.get(i); if (locId.equals(n.id())) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug( "Evicted local partition (this node is oldest non-affinity node): " + part); break; } } } } } } } return changed; }