/** @throws Exception If failed. */ public void testAffinityPut() throws Exception { Thread.sleep(2 * TOP_REFRESH_FREQ); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT); GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); GridClientCompute compute = client.compute(); for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i)); for (int i = 0; i < 100; i++) { String key = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key)); assertEquals(primaryNodeId, partitioned.affinity(key)); // Must go to primary node only. Since backup count is 0, value must present on // primary node only. partitioned.put(key, "val" + key); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key); if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val); else assertNull(val); } } // Now check that we will see value in near cache in pinned mode. for (int i = 100; i < 200; i++) { String pinnedKey = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id(); UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId))); GridClientNode node = compute.node(pinnedNodeId); partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey); if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey())) assertEquals("val" + pinnedKey, val); else assertNull(val); } } }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public boolean equals(Object obj) { if (obj == this) return true; CancelMessageId other = (CancelMessageId) obj; return reqId == other.reqId && nodeId.equals(other.nodeId); }
/** * Processes lock request. * * @param nodeId Sender node ID. * @param msg Lock request. */ @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"}) private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) { assert !nodeId.equals(locNodeId); List<byte[]> keys = msg.keyBytes(); int cnt = keys.size(); GridReplicatedTxRemote<K, V> tx = null; GridDistributedLockResponse res; ClassLoader ldr = null; try { ldr = ctx.deploy().globalLoader(); if (ldr != null) { res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt); for (int i = 0; i < keys.size(); i++) { byte[] bytes = keys.get(i); K key = msg.keys().get(i); Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i); if (bytes == null) continue; if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key); GridDistributedCacheEntry<K, V> entry = null; while (true) { try { entry = entryexx(key); // Handle implicit locks for pessimistic transactions. if (msg.inTx()) { tx = ctx.tm().tx(msg.version()); if (tx != null) { if (msg.txRead()) tx.addRead(key, bytes); else tx.addWrite(key, bytes); } else { tx = new GridReplicatedTxRemote<K, V>( nodeId, msg.threadId(), msg.version(), null, PESSIMISTIC, msg.isolation(), msg.isInvalidate(), msg.timeout(), key, bytes, msg.txRead(), ctx); tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + msg.version()); } } // Add remote candidate before reordering. entry.addRemote( msg.nodeId(), null, msg.threadId(), msg.version(), msg.timeout(), tx != null && tx.ec(), tx != null, tx != null && tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions()); // Double-check in case if sender node left the grid. if (ctx.discovery().node(msg.nodeId()) == null) { if (log.isDebugEnabled()) log.debug( "Node requesting lock left grid (lock request will be ignored): " + msg); if (tx != null) tx.rollback(); return; } res.setCandidates( i, entry.localCandidates(), ctx.tm().committedVersions(msg.version()), ctx.tm().rolledbackVersions(msg.version())); res.addValueBytes( entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx); // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug( "Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class for message: " + msg; U.warn(log, err); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err)); } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Received lock request for completed transaction (will ignore): " + e); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e); } catch (GridException e) { String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg; log.error(err, e); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e)); if (tx != null) tx.rollback(); } catch (GridDistributedLockCancelledException ignored) { // Received lock request for cancelled lock. if (log.isDebugEnabled()) log.debug("Received lock request for canceled lock (will ignore): " + msg); if (tx != null) tx.rollback(); // Don't send response back. return; } GridNode node = ctx.discovery().node(msg.nodeId()); boolean releaseAll = false; if (node != null) { try { // Reply back to sender. ctx.io().send(node, res); } catch (GridException e) { U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e); releaseAll = ldr != null; } } // If sender left grid, release all locks acquired so far. else releaseAll = ldr != null; // Release all locks because sender node left grid. if (releaseAll) { for (K key : msg.keys()) { while (true) { GridDistributedCacheEntry<K, V> entry = peekexx(key); try { if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId()); break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock on removed entity during failure " + "of replicated lock request handling (will retry): " + entry); } } } U.warn( log, "Sender node left grid in the midst of lock acquisition (locks will be released)."); } }
/** * Checks if this candidate matches version or thread-nodeId combination. * * @param nodeId Node ID to check. * @param ver Version to check. * @param threadId Thread ID to check. * @return {@code True} if matched. */ public boolean matches(GridCacheVersion ver, UUID nodeId, long threadId) { return ver.equals(this.ver) || (nodeId.equals(this.nodeId) && threadId == this.threadId); }
/** {@inheritDoc} */ @Override public boolean isTaskNode() { return taskNodeId.equals(ctx.discovery().localNode().id()); }
/** * Updates value for single partition. * * @param p Partition. * @param nodeId Node ID. * @param state State. * @param updateSeq Update sequence. */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) { assert lock.isWriteLockedByCurrentThread(); assert nodeId.equals(cctx.nodeId()); // In case if node joins, get topology at the time of joining node. ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer); assert oldest != null; // If this node became the oldest node. if (oldest.id().equals(cctx.nodeId())) { long seq = node2part.updateSequence(); if (seq != updateSeq) { if (seq > updateSeq) { if (this.updateSeq.get() < seq) { // Update global counter if necessary. boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1); assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']'; updateSeq = seq + 1; } else updateSeq = seq; } node2part.updateSequence(updateSeq); } } GridDhtPartitionMap map = node2part.get(nodeId); if (map == null) node2part.put( nodeId, map = new GridDhtPartitionMap( nodeId, updateSeq, Collections.<Integer, GridDhtPartitionState>emptyMap(), false)); map.updateSequence(updateSeq); map.put(p, state); Set<UUID> ids = part2node.get(p); if (ids == null) part2node.put(p, ids = U.newHashSet(3)); ids.add(nodeId); }
/** * @param updateSeq Update sequence. * @return Checks if any of the local partitions need to be evicted. */ private boolean checkEvictions(long updateSeq) { assert lock.isWriteLockedByCurrentThread(); boolean changed = false; UUID locId = cctx.nodeId(); for (GridDhtLocalPartition part : locParts.values()) { GridDhtPartitionState state = part.state(); if (state.active()) { int p = part.id(); List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); if (!affNodes.contains(cctx.localNode())) { Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING)); // If all affinity nodes are owners, then evict partition from local node. if (nodeIds.containsAll(F.nodeIds(affNodes))) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug("Evicted local partition (all affinity nodes are owners): " + part); } else { int ownerCnt = nodeIds.size(); int affCnt = affNodes.size(); if (ownerCnt > affCnt) { List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds)); // Sort by node orders in ascending order. Collections.sort(sorted, CU.nodeComparator(true)); int diff = sorted.size() - affCnt; for (int i = 0; i < diff; i++) { ClusterNode n = sorted.get(i); if (locId.equals(n.id())) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug( "Evicted local partition (this node is oldest non-affinity node): " + part); break; } } } } } } } return changed; }