/** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public boolean equals(Object obj) { if (obj == this) return true; CancelMessageId other = (CancelMessageId) obj; return reqId == other.reqId && nodeId.equals(other.nodeId); }
/** * @param meta Job metadata. * @return {@code true} If local node is participating in job execution. */ public boolean isParticipating(HadoopJobMetadata meta) { UUID locNodeId = localNodeId(); if (locNodeId.equals(meta.submitNodeId())) return true; HadoopMapReducePlan plan = meta.mapReducePlan(); return plan.mapperNodeIds().contains(locNodeId) || plan.reducerNodeIds().contains(locNodeId) || jobUpdateLeader(); }
/** * Updates value for single partition. * * @param p Partition. * @param nodeId Node ID. * @param state State. * @param updateSeq Update sequence. */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) { assert lock.isWriteLockedByCurrentThread(); assert nodeId.equals(cctx.nodeId()); // In case if node joins, get topology at the time of joining node. ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer); assert oldest != null; // If this node became the oldest node. if (oldest.id().equals(cctx.nodeId())) { long seq = node2part.updateSequence(); if (seq != updateSeq) { if (seq > updateSeq) { if (this.updateSeq.get() < seq) { // Update global counter if necessary. boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1); assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']'; updateSeq = seq + 1; } else updateSeq = seq; } node2part.updateSequence(updateSeq); } } GridDhtPartitionMap map = node2part.get(nodeId); if (map == null) node2part.put( nodeId, map = new GridDhtPartitionMap( nodeId, updateSeq, Collections.<Integer, GridDhtPartitionState>emptyMap(), false)); map.updateSequence(updateSeq); map.put(p, state); Set<UUID> ids = part2node.get(p); if (ids == null) part2node.put(p, ids = U.newHashSet(3)); ids.add(nodeId); }
/** * @param updateSeq Update sequence. * @return Checks if any of the local partitions need to be evicted. */ private boolean checkEvictions(long updateSeq) { assert lock.isWriteLockedByCurrentThread(); boolean changed = false; UUID locId = cctx.nodeId(); for (GridDhtLocalPartition part : locParts.values()) { GridDhtPartitionState state = part.state(); if (state.active()) { int p = part.id(); List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); if (!affNodes.contains(cctx.localNode())) { Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING)); // If all affinity nodes are owners, then evict partition from local node. if (nodeIds.containsAll(F.nodeIds(affNodes))) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug("Evicted local partition (all affinity nodes are owners): " + part); } else { int ownerCnt = nodeIds.size(); int affCnt = affNodes.size(); if (ownerCnt > affCnt) { List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds)); // Sort by node orders in ascending order. Collections.sort(sorted, CU.nodeComparator(true)); int diff = sorted.size() - affCnt; for (int i = 0; i < diff; i++) { ClusterNode n = sorted.get(i); if (locId.equals(n.id())) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug( "Evicted local partition (this node is oldest non-affinity node): " + part); break; } } } } } } } return changed; }