/** * @param nodes Nodes. * @param id ID. * @throws IgniteCheckedException If failed. */ private void sendAllPartitions( Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id) throws IgniteCheckedException { GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) { AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion(); boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0; if (ready) m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true)); } } // It is important that client topologies be added after contexts. for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true)); if (log.isDebugEnabled()) log.debug( "Sending full partition map [nodeIds=" + F.viewReadOnly(nodes, F.node2id()) + ", exchId=" + exchId + ", msg=" + m + ']'); cctx.io().safeSend(nodes, m, SYSTEM_POOL, null); }
/** {@inheritDoc} */ @Override public GridDhtPartitionFullMap partitionMap(boolean onlyActive) { lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node2part [node2part: " + node2part + ", cache=" + cctx.name() + ", started=" + cctx.started() + ", stopping=" + stopping + ", locNodeId=" + cctx.localNode().id() + ", locName=" + cctx.gridName() + ']'; GridDhtPartitionFullMap m = node2part; return new GridDhtPartitionFullMap( m.nodeId(), m.nodeOrder(), m.updateSequence(), m, onlyActive); } finally { lock.readLock().unlock(); } }
/** * @param cctx Cache context. * @param prj Projection (optional). * @return Collection of data nodes in provided projection (if any). */ private static Collection<ClusterNode> nodes( final GridCacheContext<?, ?> cctx, @Nullable final ClusterGroup prj, @Nullable final Integer part) { assert cctx != null; final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion(); Collection<ClusterNode> affNodes = CU.affinityNodes(cctx); if (prj == null && part == null) return affNodes; final Set<ClusterNode> owners = part == null ? Collections.<ClusterNode>emptySet() : new HashSet<>(cctx.topology().owners(part, topVer)); return F.view( affNodes, new P1<ClusterNode>() { @Override public boolean apply(ClusterNode n) { return cctx.discovery().cacheAffinityNode(n, cctx.name()) && (prj == null || prj.node(n.id()) != null) && (part == null || owners.contains(n)); } }); }
/** @return Nodes to execute on. */ private Collection<ClusterNode> nodes() { CacheMode cacheMode = cctx.config().getCacheMode(); switch (cacheMode) { case LOCAL: if (prj != null) U.warn( log, "Ignoring query projection because it's executed over LOCAL cache " + "(only local node will be queried): " + this); return Collections.singletonList(cctx.localNode()); case REPLICATED: if (prj != null || partition() != null) return nodes(cctx, prj, partition()); return cctx.affinityNode() ? Collections.singletonList(cctx.localNode()) : Collections.singletonList(F.rand(nodes(cctx, null, partition()))); case PARTITIONED: return nodes(cctx, prj, partition()); default: throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode); } }
/** * Updates partition map in all caches. * * @param msg Partitions single message. */ private void updatePartitionSingleMap(GridDhtPartitionsSingleMessage msg) { for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) { Integer cacheId = entry.getKey(); GridCacheContext cacheCtx = cctx.cacheContext(cacheId); GridDhtPartitionTopology top = cacheCtx != null ? cacheCtx.topology() : cctx.exchange().clientTopology(cacheId, this); top.update(exchId, entry.getValue()); } }
/** * @param cacheCtx Cache context. * @throws IgniteCheckedException If failed. */ private void initTopology(GridCacheContext cacheCtx) throws IgniteCheckedException { if (stopping(cacheCtx.cacheId())) return; if (canCalculateAffinity(cacheCtx)) { if (log.isDebugEnabled()) log.debug( "Will recalculate affinity [locNodeId=" + cctx.localNodeId() + ", exchId=" + exchId + ']'); cacheCtx.affinity().calculateAffinity(exchId.topologyVersion(), discoEvt); } else { if (log.isDebugEnabled()) log.debug( "Will request affinity from remote node [locNodeId=" + cctx.localNodeId() + ", exchId=" + exchId + ']'); // Fetch affinity assignment from remote node. GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture( cacheCtx, exchId.topologyVersion(), CU.affinityNodes(cacheCtx, exchId.topologyVersion())); fetchFut.init(); List<List<ClusterNode>> affAssignment = fetchFut.get(); if (log.isDebugEnabled()) log.debug( "Fetched affinity from remote node, initializing affinity assignment [locNodeId=" + cctx.localNodeId() + ", topVer=" + exchId.topologyVersion() + ']'); if (affAssignment == null) { affAssignment = new ArrayList<>(cacheCtx.affinity().partitions()); List<ClusterNode> empty = Collections.emptyList(); for (int i = 0; i < cacheCtx.affinity().partitions(); i++) affAssignment.add(empty); } cacheCtx.affinity().initializeAffinity(exchId.topologyVersion(), affAssignment); } }
/** * @param cacheId Cache ID to check. * @param topVer Topology version. * @return {@code True} if cache was added during this exchange. */ public boolean isCacheAdded(int cacheId, AffinityTopologyVersion topVer) { if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (req.start() && !req.clientStartOnly()) { if (CU.cacheId(req.cacheName()) == cacheId) return true; } } } GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId); return cacheCtx != null && F.eq(cacheCtx.startTopologyVersion(), topVer); }
/** * Updates partition map in all caches. * * @param msg Partitions full messages. */ private void updatePartitionFullMap(GridDhtPartitionsFullMessage msg) { for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) { Integer cacheId = entry.getKey(); GridCacheContext cacheCtx = cctx.cacheContext(cacheId); if (cacheCtx != null) cacheCtx.topology().update(exchId, entry.getValue()); else { ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, AffinityTopologyVersion.NONE); if (oldest != null && oldest.isLocal()) cctx.exchange().clientTopology(cacheId, this).update(exchId, entry.getValue()); } } }
/** {@inheritDoc} */ @Override public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) { Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer1=" + topVer + ", topVer2=" + this.topVer + ", cache=" + cctx.name() + ", node2part=" + node2part + ']'; Collection<ClusterNode> nodes = null; Collection<UUID> nodeIds = part2node.get(p); if (!F.isEmpty(nodeIds)) { Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id())); for (UUID nodeId : nodeIds) { if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) { ClusterNode n = cctx.discovery().node(nodeId); if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) { if (nodes == null) { nodes = new ArrayList<>(affNodes.size() + 2); nodes.addAll(affNodes); } nodes.add(n); } } } } return nodes != null ? nodes : affNodes; } finally { lock.readLock().unlock(); } }
/** * @param cctx Context. * @param type Query type. * @param clsName Class name. * @param clause Clause. * @param filter Scan filter. * @param part Partition. * @param incMeta Include metadata flag. * @param keepPortable Keep portable flag. */ public GridCacheQueryAdapter( GridCacheContext<?, ?> cctx, GridCacheQueryType type, @Nullable String clsName, @Nullable String clause, @Nullable IgniteBiPredicate<Object, Object> filter, @Nullable Integer part, boolean incMeta, boolean keepPortable) { assert cctx != null; assert type != null; assert part == null || part >= 0; this.cctx = cctx; this.type = type; this.clsName = clsName; this.clause = clause; this.filter = filter; this.part = part; this.incMeta = incMeta; this.keepPortable = keepPortable; log = cctx.logger(getClass()); metrics = new GridCacheQueryMetricsAdapter(); }
/** {@inheritDoc} */ @Override public void printMemoryStats(int threshold) { X.println( ">>> Cache partition topology stats [grid=" + cctx.gridName() + ", cache=" + cctx.name() + ']'); for (GridDhtLocalPartition part : locParts.values()) { int size = part.size(); if (size >= threshold) X.println(">>> Local partition [part=" + part.id() + ", size=" + size + ']'); } }
/** @param cctx Context. */ GridDhtPartitionTopologyImpl(GridCacheContext<?, ?> cctx) { assert cctx != null; this.cctx = cctx; log = cctx.logger(getClass()); }
/** * @param p Partition. * @param topVer Topology version ({@code -1} for all nodes). * @param state Partition state. * @param states Additional partition states. * @return List of nodes for the partition. */ private List<ClusterNode> nodes( int p, AffinityTopologyVersion topVer, GridDhtPartitionState state, GridDhtPartitionState... states) { Collection<UUID> allIds = topVer.topologyVersion() > 0 ? F.nodeIds(CU.affinityNodes(cctx, topVer)) : null; lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer=" + topVer + ", allIds=" + allIds + ", node2part=" + node2part + ", cache=" + cctx.name() + ']'; Collection<UUID> nodeIds = part2node.get(p); // Node IDs can be null if both, primary and backup, nodes disappear. int size = nodeIds == null ? 0 : nodeIds.size(); if (size == 0) return Collections.emptyList(); List<ClusterNode> nodes = new ArrayList<>(size); for (UUID id : nodeIds) { if (topVer.topologyVersion() > 0 && !allIds.contains(id)) continue; if (hasState(p, id, state, states)) { ClusterNode n = cctx.discovery().node(id); if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) nodes.add(n); } } return nodes; } finally { lock.readLock().unlock(); } }
/** * @param p Partition number. * @param topVer Topology version. * @param create Create flag. * @param updateSeq Update sequence. * @return Local partition. */ private GridDhtLocalPartition localPartition( int p, AffinityTopologyVersion topVer, boolean create, boolean updateSeq) { while (true) { boolean belongs = cctx.affinity().localNode(p, topVer); GridDhtLocalPartition loc = locParts.get(p); if (loc != null && loc.state() == EVICTED) { locParts.remove(p, loc); if (!create) return null; if (!belongs) throw new GridDhtInvalidPartitionException( p, "Adding entry to evicted partition [part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.topVer + ']'); continue; } if (loc == null && create) { if (!belongs) throw new GridDhtInvalidPartitionException( p, "Creating partition which does not belong [part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.topVer + ']'); lock.writeLock().lock(); try { GridDhtLocalPartition old = locParts.putIfAbsent(p, loc = new GridDhtLocalPartition(cctx, p)); if (old != null) loc = old; else { if (updateSeq) this.updateSeq.incrementAndGet(); if (log.isDebugEnabled()) log.debug("Created local partition: " + loc); } } finally { lock.writeLock().unlock(); } } return loc; } }
/** * @param rmtReducer Optional reducer. * @param rmtTransform Optional transformer. * @param args Arguments. * @return Future. */ @SuppressWarnings("IfMayBeConditional") private <R> CacheQueryFuture<R> execute( @Nullable IgniteReducer<T, R> rmtReducer, @Nullable IgniteClosure<T, R> rmtTransform, @Nullable Object... args) { Collection<ClusterNode> nodes = nodes(); cctx.checkSecurity(SecurityPermission.CACHE_READ); if (nodes.isEmpty()) return new GridCacheQueryErrorFuture<>( cctx.kernalContext(), new ClusterGroupEmptyCheckedException()); if (log.isDebugEnabled()) log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']'); if (cctx.deploymentEnabled()) { try { cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform); cctx.deploy().registerClasses(args); } catch (IgniteCheckedException e) { return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e); } } if (subjId == null) subjId = cctx.localNodeId(); taskHash = cctx.kernalContext().job().currentTaskNameHash(); final GridCacheQueryBean bean = new GridCacheQueryBean( this, (IgniteReducer<Object, Object>) rmtReducer, (IgniteClosure<Object, Object>) rmtTransform, args); final GridCacheQueryManager qryMgr = cctx.queries(); boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId()); if (type == SQL_FIELDS || type == SPI) return (CacheQueryFuture<R>) (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes)); else if (type == SCAN && part != null && nodes.size() > 1) return new CacheQueryFallbackFuture<>(nodes, bean, qryMgr); else return (CacheQueryFuture<R>) (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes)); }
/** {@inheritDoc} */ @Override public GridDhtPartitionMap localPartitionMap() { lock.readLock().lock(); try { return new GridDhtPartitionMap( cctx.nodeId(), updateSeq.get(), F.viewReadOnly(locParts, CU.part2state()), true); } finally { lock.readLock().unlock(); } }
/** * @param cacheCtx Cache context. * @return {@code True} if local node can calculate affinity on it's own for this partition map * exchange. */ private boolean canCalculateAffinity(GridCacheContext cacheCtx) { AffinityFunction affFunc = cacheCtx.config().getAffinity(); // Do not request affinity from remote nodes if affinity function is not centralized. if (!U.hasAnnotation(affFunc, AffinityCentralizedFunction.class)) return true; // If local node did not initiate exchange or local node is the only cache node in grid. Collection<ClusterNode> affNodes = CU.affinityNodes(cacheCtx, exchId.topologyVersion()); return !exchId.nodeId().equals(cctx.localNodeId()) || (affNodes.size() == 1 && affNodes.contains(cctx.localNode())); }
/** {@inheritDoc} */ @Override public AffinityTopologyVersion topologyVersion() { lock.readLock().lock(); try { assert topVer.topologyVersion() > 0 : "Invalid topology version [topVer=" + topVer + ", cacheName=" + cctx.name() + ']'; return topVer; } finally { lock.readLock().unlock(); } }
/** {@inheritDoc} */ @Override public void onRemoved(GridDhtCacheEntry e) { /* * Make sure not to acquire any locks here as this method * may be called from sensitive synchronization blocks. * =================================================== */ GridDhtLocalPartition loc = localPartition(cctx.affinity().partition(e.key()), topologyVersion(), false); if (loc != null) loc.onRemoved(e); }
/** @param nodeId Node to remove. */ private void removeNode(UUID nodeId) { assert nodeId != null; assert lock.writeLock().isHeldByCurrentThread(); ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer); assert oldest != null; ClusterNode loc = cctx.localNode(); if (node2part != null) { if (oldest.equals(loc) && !node2part.nodeId().equals(loc.id())) { updateSeq.setIfGreater(node2part.updateSequence()); node2part = new GridDhtPartitionFullMap( loc.id(), loc.order(), updateSeq.incrementAndGet(), node2part, false); } else node2part = new GridDhtPartitionFullMap(node2part, node2part.updateSequence()); part2node = new HashMap<>(part2node); GridDhtPartitionMap parts = node2part.remove(nodeId); if (parts != null) { for (Integer p : parts.keySet()) { Set<UUID> nodeIds = part2node.get(p); if (nodeIds != null) { nodeIds.remove(nodeId); if (nodeIds.isEmpty()) part2node.remove(p); } } } consistencyCheck(); } }
/** * @param node Node. * @param id ID. * @throws IgniteCheckedException If failed. */ private void sendLocalPartitions(ClusterNode node, @Nullable GridDhtPartitionExchangeId id) throws IgniteCheckedException { GridDhtPartitionsSingleMessage m = new GridDhtPartitionsSingleMessage( id, cctx.kernalContext().clientNode(), cctx.versions().last()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) m.addLocalPartitionMap(cacheCtx.cacheId(), cacheCtx.topology().localPartitionMap()); } if (log.isDebugEnabled()) log.debug( "Sending local partitions [nodeId=" + node.id() + ", exchId=" + exchId + ", msg=" + m + ']'); cctx.io().send(node, m, SYSTEM_POOL); }
/** {@inheritDoc} */ @Override public GridDhtLocalPartition onAdded(AffinityTopologyVersion topVer, GridDhtCacheEntry e) { /* * Make sure not to acquire any locks here as this method * may be called from sensitive synchronization blocks. * =================================================== */ int p = cctx.affinity().partition(e.key()); GridDhtLocalPartition loc = localPartition(p, topVer, true); assert loc != null; loc.onAdded(e); return loc; }
/** {@inheritDoc} */ @Override public boolean onDone(AffinityTopologyVersion res, Throwable err) { Map<Integer, Boolean> m = null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name())) { if (m == null) m = new HashMap<>(); m.put( cacheCtx.cacheId(), cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes())); } } cacheValidRes = m != null ? m : Collections.<Integer, Boolean>emptyMap(); cctx.cache().onExchangeDone(exchId.topologyVersion(), reqs, err); cctx.exchange().onExchangeDone(this, err); if (super.onDone(res, err) && !dummy && !forcePreload) { if (log.isDebugEnabled()) log.debug( "Completed partition exchange [localNode=" + cctx.localNodeId() + ", exchange= " + this + ']'); initFut.onDone(err == null); GridTimeoutObject timeoutObj = this.timeoutObj; // Deschedule timeout object. if (timeoutObj != null) cctx.kernalContext().timeout().removeTimeoutObject(timeoutObj); if (exchId.isLeft()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.config().getAffinity().removeNode(exchId.nodeId()); } return true; } return dummy; }
/** {@inheritDoc} */ @Override public void onEvicted(GridDhtLocalPartition part, boolean updateSeq) { assert updateSeq || lock.isWriteLockedByCurrentThread(); lock.writeLock().lock(); try { if (stopping) return; assert part.state() == EVICTED; long seq = updateSeq ? this.updateSeq.incrementAndGet() : this.updateSeq.get(); updateLocal(part.id(), cctx.localNodeId(), part.state(), seq); consistencyCheck(); } finally { lock.writeLock().unlock(); } }
/** {@inheritDoc} */ @Override public boolean own(GridDhtLocalPartition part) { ClusterNode loc = cctx.localNode(); lock.writeLock().lock(); try { if (part.own()) { updateLocal(part.id(), loc.id(), part.state(), updateSeq.incrementAndGet()); consistencyCheck(); return true; } consistencyCheck(); return false; } finally { lock.writeLock().unlock(); } }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** {@inheritDoc} */ @Override public List<ClusterNode> moving(int p) { if (!cctx.rebalanceEnabled()) return ownersAndMoving(p, AffinityTopologyVersion.NONE); return nodes(p, AffinityTopologyVersion.NONE, MOVING); }
/** {@inheritDoc} */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) @Nullable @Override public GridDhtPartitionMap update( @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) { if (log.isDebugEnabled()) log.debug( "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']'); assert partMap != null; lock.writeLock().lock(); try { if (stopping) return null; if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) { if (log.isDebugEnabled()) log.debug( "Stale exchange id for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ']'); return null; } if (node2part != null && node2part.compareTo(partMap) >= 0) { if (log.isDebugEnabled()) log.debug( "Stale partition map for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ", curMap=" + node2part + ", newMap=" + partMap + ']'); return null; } long updateSeq = this.updateSeq.incrementAndGet(); if (exchId != null) lastExchangeId = exchId; if (node2part != null) { for (GridDhtPartitionMap part : node2part.values()) { GridDhtPartitionMap newPart = partMap.get(part.nodeId()); // If for some nodes current partition has a newer map, // then we keep the newer value. if (newPart != null && newPart.updateSequence() < part.updateSequence()) { if (log.isDebugEnabled()) log.debug( "Overriding partition map in full update map [exchId=" + exchId + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']'); partMap.put(part.nodeId(), part); } } for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) { UUID nodeId = it.next(); if (!cctx.discovery().alive(nodeId)) { if (log.isDebugEnabled()) log.debug( "Removing left node from full map update [nodeId=" + nodeId + ", partMap=" + partMap + ']'); it.remove(); } } } node2part = partMap; Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f); for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) { for (Integer p : e.getValue().keySet()) { Set<UUID> ids = p2n.get(p); if (ids == null) // Initialize HashSet to size 3 in anticipation that there won't be // more than 3 nodes per partitions. p2n.put(p, ids = U.newHashSet(3)); ids.add(e.getKey()); } } part2node = p2n; boolean changed = checkEvictions(updateSeq); consistencyCheck(); if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString()); return changed ? localPartitionMap() : null; } finally { lock.writeLock().unlock(); } }
/** @throws IgniteCheckedException If query is invalid. */ public void validate() throws IgniteCheckedException { if ((type != SCAN && type != SET) && !GridQueryProcessor.isEnabled(cctx.config())) throw new IgniteCheckedException("Indexing is disabled for cache: " + cctx.cache().name()); }
/** {@inheritDoc} */ @Override public boolean isCacheTopologyValid(GridCacheContext cctx) { return cctx.config().getTopologyValidator() != null && cacheValidRes.containsKey(cctx.cacheId()) ? cacheValidRes.get(cctx.cacheId()) : true; }