/** * @param p Partition number. * @param topVer Topology version. * @param create Create flag. * @param updateSeq Update sequence. * @return Local partition. */ private GridDhtLocalPartition localPartition( int p, AffinityTopologyVersion topVer, boolean create, boolean updateSeq) { while (true) { boolean belongs = cctx.affinity().localNode(p, topVer); GridDhtLocalPartition loc = locParts.get(p); if (loc != null && loc.state() == EVICTED) { locParts.remove(p, loc); if (!create) return null; if (!belongs) throw new GridDhtInvalidPartitionException( p, "Adding entry to evicted partition [part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.topVer + ']'); continue; } if (loc == null && create) { if (!belongs) throw new GridDhtInvalidPartitionException( p, "Creating partition which does not belong [part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.topVer + ']'); lock.writeLock().lock(); try { GridDhtLocalPartition old = locParts.putIfAbsent(p, loc = new GridDhtLocalPartition(cctx, p)); if (old != null) loc = old; else { if (updateSeq) this.updateSeq.incrementAndGet(); if (log.isDebugEnabled()) log.debug("Created local partition: " + loc); } } finally { lock.writeLock().unlock(); } } return loc; } }
/** * Waits for renting partitions. * * @return {@code True} if mapping was changed. * @throws IgniteCheckedException If failed. */ private boolean waitForRent() throws IgniteCheckedException { boolean changed = false; // Synchronously wait for all renting partitions to complete. for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) { GridDhtLocalPartition p = it.next(); GridDhtPartitionState state = p.state(); if (state == RENTING || state == EVICTED) { if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p); // Wait for partition to empty out. p.rent(true).get(); if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p); // Remove evicted partition. it.remove(); changed = true; } } return changed; }
/** {@inheritDoc} */ @Override public void printMemoryStats() { super.printMemoryStats(); X.println(">>> threadsSize: " + threads.size()); X.println(">>> futsSize: " + futs.size()); }
/** * Processes cache query request. * * @param sndId Sender node id. * @param req Query request. */ @SuppressWarnings("unchecked") @Override void processQueryRequest(UUID sndId, GridCacheQueryRequest req) { if (req.cancel()) { cancelIds.add(new CancelMessageId(req.id(), sndId)); if (req.fields()) removeFieldsQueryResult(sndId, req.id()); else removeQueryResult(sndId, req.id()); } else { if (!cancelIds.contains(new CancelMessageId(req.id(), sndId))) { if (!F.eq(req.cacheName(), cctx.name())) { GridCacheQueryResponse res = new GridCacheQueryResponse( cctx.cacheId(), req.id(), new IgniteCheckedException( "Received request for incorrect cache [expected=" + cctx.name() + ", actual=" + req.cacheName())); sendQueryResponse(sndId, res, 0); } else { threads.put(req.id(), Thread.currentThread()); try { GridCacheQueryInfo info = distributedQueryInfo(sndId, req); if (info == null) return; if (req.fields()) runFieldsQuery(info); else runQuery(info); } catch (Throwable e) { U.error(log(), "Failed to run query.", e); sendQueryResponse( sndId, new GridCacheQueryResponse(cctx.cacheId(), req.id(), e.getCause()), 0); if (e instanceof Error) throw (Error) e; } finally { threads.remove(req.id()); } } } } }
/** {@inheritDoc} */ @Override void onCancelAtStop() { super.onCancelAtStop(); for (GridCacheQueryFutureAdapter fut : futs.values()) try { fut.cancel(); } catch (IgniteCheckedException e) { U.error(log, "Failed to cancel running query future: " + fut, e); } U.interrupt(threads.values()); }
/** {@inheritDoc} */ @Override public void printMemoryStats(int threshold) { X.println( ">>> Cache partition topology stats [grid=" + cctx.gridName() + ", cache=" + cctx.name() + ']'); for (GridDhtLocalPartition part : locParts.values()) { int size = part.size(); if (size >= threshold) X.println(">>> Local partition [part=" + part.id() + ", size=" + size + ']'); } }
/** {@inheritDoc} */ @Override public Collection<GridDhtLocalPartition> currentLocalPartitions() { return locParts.values(); }
/** {@inheritDoc} */ @Override public List<GridDhtLocalPartition> localPartitions() { return new LinkedList<>(locParts.values()); }
/** * @param updateSeq Update sequence. * @return Checks if any of the local partitions need to be evicted. */ private boolean checkEvictions(long updateSeq) { assert lock.isWriteLockedByCurrentThread(); boolean changed = false; UUID locId = cctx.nodeId(); for (GridDhtLocalPartition part : locParts.values()) { GridDhtPartitionState state = part.state(); if (state.active()) { int p = part.id(); List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); if (!affNodes.contains(cctx.localNode())) { Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING)); // If all affinity nodes are owners, then evict partition from local node. if (nodeIds.containsAll(F.nodeIds(affNodes))) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug("Evicted local partition (all affinity nodes are owners): " + part); } else { int ownerCnt = nodeIds.size(); int affCnt = affNodes.size(); if (ownerCnt > affCnt) { List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds)); // Sort by node orders in ascending order. Collections.sort(sorted, CU.nodeComparator(true)); int diff = sorted.size() - affCnt; for (int i = 0; i < diff; i++) { ClusterNode n = sorted.get(i); if (locId.equals(n.id())) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug( "Evicted local partition (this node is oldest non-affinity node): " + part); break; } } } } } } } return changed; }
/** * Grabs local events and detects if events was lost since last poll. * * @param ignite Target grid. * @param evtOrderKey Unique key to take last order key from node local map. * @param evtThrottleCntrKey Unique key to take throttle count from node local map. * @param evtTypes Event types to collect. * @param evtMapper Closure to map grid events to Visor data transfer objects. * @return Collections of node events */ public static Collection<VisorGridEvent> collectEvents( Ignite ignite, String evtOrderKey, String evtThrottleCntrKey, final int[] evtTypes, IgniteClosure<Event, VisorGridEvent> evtMapper) { assert ignite != null; assert evtTypes != null && evtTypes.length > 0; ConcurrentMap<String, Long> nl = ignite.cluster().nodeLocalMap(); final long lastOrder = getOrElse(nl, evtOrderKey, -1L); final long throttle = getOrElse(nl, evtThrottleCntrKey, 0L); // When we first time arrive onto a node to get its local events, // we'll grab only last those events that not older than given period to make sure we are // not grabbing GBs of data accidentally. final long notOlderThan = System.currentTimeMillis() - EVENTS_COLLECT_TIME_WINDOW; // Flag for detecting gaps between events. final AtomicBoolean lastFound = new AtomicBoolean(lastOrder < 0); IgnitePredicate<Event> p = new IgnitePredicate<Event>() { /** */ private static final long serialVersionUID = 0L; @Override public boolean apply(Event e) { // Detects that events were lost. if (!lastFound.get() && (lastOrder == e.localOrder())) lastFound.set(true); // Retains events by lastOrder, period and type. return e.localOrder() > lastOrder && e.timestamp() > notOlderThan && F.contains(evtTypes, e.type()); } }; Collection<Event> evts = ignite.events().localQuery(p); // Update latest order in node local, if not empty. if (!evts.isEmpty()) { Event maxEvt = Collections.max(evts, EVTS_ORDER_COMPARATOR); nl.put(evtOrderKey, maxEvt.localOrder()); } // Update throttle counter. if (!lastFound.get()) nl.put(evtThrottleCntrKey, throttle == 0 ? EVENTS_LOST_THROTTLE : throttle - 1); boolean lost = !lastFound.get() && throttle == 0; Collection<VisorGridEvent> res = new ArrayList<>(evts.size() + (lost ? 1 : 0)); if (lost) res.add(new VisorGridEventsLost(ignite.cluster().localNode().id())); for (Event e : evts) { VisorGridEvent visorEvt = evtMapper.apply(e); if (visorEvt != null) res.add(visorEvt); } return res; }