/** * Waits for renting partitions. * * @return {@code True} if mapping was changed. * @throws IgniteCheckedException If failed. */ private boolean waitForRent() throws IgniteCheckedException { boolean changed = false; // Synchronously wait for all renting partitions to complete. for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) { GridDhtLocalPartition p = it.next(); GridDhtPartitionState state = p.state(); if (state == RENTING || state == EVICTED) { if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p); // Wait for partition to empty out. p.rent(true).get(); if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p); // Remove evicted partition. it.remove(); changed = true; } } return changed; }
/** @throws Exception If failed. */ public void testRangeSplit() throws Exception { IgniteUuid affKey = IgniteUuid.randomUuid(); IgfsFileAffinityRange range = new IgfsFileAffinityRange(0, 9999, affKey); Collection<IgfsFileAffinityRange> split = range.split(10000); assertEquals(1, split.size()); assertTrue(range.regionEqual(F.first(split))); split = range.split(5000); assertEquals(2, split.size()); Iterator<IgfsFileAffinityRange> it = split.iterator(); IgfsFileAffinityRange part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(0, 4999, affKey))); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(5000, 9999, affKey))); split = range.split(3000); assertEquals(4, split.size()); it = split.iterator(); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(0, 2999, affKey))); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(3000, 5999, affKey))); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(6000, 8999, affKey))); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(9000, 9999, affKey))); }
/** Clears values for this partition. */ private void clearAll() { GridCacheVersion clearVer = cctx.versions().next(); boolean swap = cctx.isSwapOrOffheapEnabled(); boolean rec = cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED); Iterator<GridDhtCacheEntry> it = map.values().iterator(); GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> swapIt = null; if (swap && GridQueryProcessor.isEnabled(cctx.config())) { // Indexing needs to unswap cache values. Iterator<GridDhtCacheEntry> unswapIt = null; try { swapIt = cctx.swap().iterator(id); unswapIt = unswapIterator(swapIt); } catch (Exception e) { U.error(log, "Failed to clear swap for evicted partition: " + this, e); } if (unswapIt != null) it = F.concat(it, unswapIt); } try { while (it.hasNext()) { GridDhtCacheEntry cached = it.next(); try { if (cached.clearInternal(clearVer, swap)) { map.remove(cached.key(), cached); if (!cached.isInternal()) { mapPubSize.decrement(); if (rec) cctx.events() .addEvent( cached.partition(), cached.key(), cctx.localNodeId(), (IgniteUuid) null, null, EVT_CACHE_REBALANCE_OBJECT_UNLOADED, null, false, cached.rawGet(), cached.hasValue(), null, null, null); } } } catch (IgniteCheckedException e) { U.error(log, "Failed to clear cache entry for evicted partition: " + cached, e); } } } finally { U.close(swapIt, log); } }
/** @throws Exception Thrown if test failed. */ public void testA() throws Exception { Collection<Integer> set = new GridConcurrentWeakHashSet<>(); Integer i = 1; assert set.add(i); assert !set.add(i); assert set.contains(i); assert set.size() == 1; Collection<Integer> c = F.asList(2, 3, 4, 5); assert set.addAll(c); assert !set.addAll(c); assert set.containsAll(c); assert set.size() == 1 + c.size(); assert set.remove(i); assert !set.remove(i); assert !set.contains(i); assert set.size() == c.size(); assert set.removeAll(c); assert !set.removeAll(c); assert !set.containsAll(c); assert set.isEmpty(); Collection<Integer> c1 = Arrays.asList(1, 3, 5, 7, 9); int cnt = 0; for (Iterator<Integer> iter = set.iterator(); iter.hasNext(); cnt++) c1.contains(iter.next()); assert set.size() == cnt; assert set.size() == set.toArray().length; assert set.addAll(c1); assert set.retainAll(c); assert !set.retainAll(c); Collection<Integer> c2 = F.retain(c1, true, c); assert set.containsAll(c2); assert !set.containsAll(c1); assert !set.containsAll(c); assert set.size() == c2.size(); set.clear(); assert set.isEmpty(); try { set.iterator().next(); assert false; } catch (NoSuchElementException ignored) { assert true; } try { set.add(null); assert false; } catch (NullPointerException ignored) { assert true; } }
/** @throws Exception Thrown if test failed. */ @SuppressWarnings({"UnusedAssignment"}) public void testB() throws Exception { Collection<SampleBean> set = new GridConcurrentWeakHashSet<>(); SampleBean bean1 = new SampleBean(1); assert set.add(bean1); assert !set.add(bean1); assert set.size() == 1; assert set.contains(bean1); bean1 = null; gc(); assert set.isEmpty(); Collection<SampleBean> c = F.asList(new SampleBean(1), new SampleBean(2), new SampleBean(3), new SampleBean(4)); assert set.addAll(c); assert !set.addAll(c); assert set.size() == c.size(); assert set.containsAll(c); c = null; gc(); assert set.isEmpty(); SampleBean b1 = new SampleBean(1); SampleBean b2 = new SampleBean(2); SampleBean b3 = new SampleBean(3); SampleBean b4 = new SampleBean(4); SampleBean b5 = new SampleBean(5); set.add(b1); set.add(b2); set.add(b3); set.add(b4); set.add(b5); Iterator iter = set.iterator(); assert iter.hasNext(); b2 = null; b3 = null; b4 = null; gc(); int cnt = 0; while (iter.hasNext()) { info(iter.next().toString()); cnt++; } assert set.size() == cnt; }
/** {@inheritDoc} */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) @Nullable @Override public GridDhtPartitionMap update( @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) { if (log.isDebugEnabled()) log.debug( "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']'); assert partMap != null; lock.writeLock().lock(); try { if (stopping) return null; if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) { if (log.isDebugEnabled()) log.debug( "Stale exchange id for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ']'); return null; } if (node2part != null && node2part.compareTo(partMap) >= 0) { if (log.isDebugEnabled()) log.debug( "Stale partition map for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ", curMap=" + node2part + ", newMap=" + partMap + ']'); return null; } long updateSeq = this.updateSeq.incrementAndGet(); if (exchId != null) lastExchangeId = exchId; if (node2part != null) { for (GridDhtPartitionMap part : node2part.values()) { GridDhtPartitionMap newPart = partMap.get(part.nodeId()); // If for some nodes current partition has a newer map, // then we keep the newer value. if (newPart != null && newPart.updateSequence() < part.updateSequence()) { if (log.isDebugEnabled()) log.debug( "Overriding partition map in full update map [exchId=" + exchId + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']'); partMap.put(part.nodeId(), part); } } for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) { UUID nodeId = it.next(); if (!cctx.discovery().alive(nodeId)) { if (log.isDebugEnabled()) log.debug( "Removing left node from full map update [nodeId=" + nodeId + ", partMap=" + partMap + ']'); it.remove(); } } } node2part = partMap; Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f); for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) { for (Integer p : e.getValue().keySet()) { Set<UUID> ids = p2n.get(p); if (ids == null) // Initialize HashSet to size 3 in anticipation that there won't be // more than 3 nodes per partitions. p2n.put(p, ids = U.newHashSet(3)); ids.add(e.getKey()); } } part2node = p2n; boolean changed = checkEvictions(updateSeq); consistencyCheck(); if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString()); return changed ? localPartitionMap() : null; } finally { lock.writeLock().unlock(); } }