/** {@inheritDoc} */ @Override public GridTuple3<GridAffinityMessage, GridAffinityMessage, GridAffinityAssignment> call() throws Exception { assert ignite != null; assert log != null; IgniteKernal kernal = ((IgniteKernal) ignite); GridCacheContext<Object, Object> cctx = kernal.internalCache(cacheName).context(); assert cctx != null; GridKernalContext ctx = kernal.context(); cctx.affinity().affinityReadyFuture(topVer).get(); AffinityAssignment assign0 = cctx.affinity().assignment(topVer); GridAffinityAssignment assign = assign0 instanceof GridAffinityAssignment ? (GridAffinityAssignment) assign0 : new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment()); return F.t( affinityMessage(ctx, cctx.config().getAffinity()), affinityMessage(ctx, cctx.config().getAffinityMapper()), assign); }
/** @throws Exception If any error occurs. */ public void testTopologyVersion() throws Exception { clientFlagGlobal = false; startGridsMultiThreaded(GRID_CNT); long prev = 0; for (Ignite g : G.allGrids()) { IgniteKernal kernal = (IgniteKernal) g; long ver = kernal.context().discovery().topologyVersion(); info("Top ver: " + ver); if (prev == 0) prev = ver; } info("Test finished."); }
/** @throws InterruptedException If interrupted. */ @SuppressWarnings("BusyWait") protected void awaitPartitionMapExchange() throws InterruptedException { for (Ignite g : G.allGrids()) { IgniteKernal g0 = (IgniteKernal) g; for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) { CacheConfiguration cfg = c.context().config(); if (cfg.getCacheMode() == PARTITIONED && cfg.getRebalanceMode() != NONE && g.cluster().nodes().size() > 1) { AffinityFunction aff = cfg.getAffinity(); GridDhtCacheAdapter<?, ?> dht = dht(c); GridDhtPartitionTopology top = dht.topology(); for (int p = 0; p < aff.partitions(); p++) { long start = 0; for (int i = 0; ; i++) { boolean match = false; AffinityTopologyVersion readyVer = dht.context().shared().exchange().readyAffinityVersion(); if (readyVer.topologyVersion() > 0 && c.context().started()) { // Must map on updated version of topology. Collection<ClusterNode> affNodes = g0.affinity(cfg.getName()).mapPartitionToPrimaryAndBackups(p); int exp = affNodes.size(); GridDhtTopologyFuture topFut = top.topologyVersionFuture(); Collection<ClusterNode> owners = (topFut != null && topFut.isDone()) ? top.nodes(p, AffinityTopologyVersion.NONE) : Collections.<ClusterNode>emptyList(); int actual = owners.size(); if (affNodes.size() != owners.size() || !affNodes.containsAll(owners)) { LT.warn( log(), null, "Waiting for topology map update [" + "grid=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.topologyVersion() + ", topFut=" + topFut + ", p=" + p + ", affNodesCnt=" + exp + ", ownersCnt=" + actual + ", affNodes=" + affNodes + ", owners=" + owners + ", locNode=" + g.cluster().localNode() + ']'); } else match = true; } else { LT.warn( log(), null, "Waiting for topology map update [" + "grid=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.topologyVersion() + ", started=" + dht.context().started() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']'); } if (!match) { if (i == 0) start = System.currentTimeMillis(); if (System.currentTimeMillis() - start > 30_000) throw new IgniteException( "Timeout of waiting for topology map update [" + "grid=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.topologyVersion() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']'); Thread.sleep(200); // Busy wait. continue; } if (i > 0) log() .warning( "Finished waiting for topology map update [grid=" + g.name() + ", p=" + p + ", duration=" + (System.currentTimeMillis() - start) + "ms]"); break; } } } } } }
/** * @param cacheMode Cache mode. * @param sameAff If {@code false} uses different number of partitions for caches. * @param concurrency Transaction concurrency. * @param isolation Transaction isolation. * @throws Exception If failed. */ private void crossCacheTxFailover( CacheMode cacheMode, boolean sameAff, final TransactionConcurrency concurrency, final TransactionIsolation isolation) throws Exception { IgniteKernal ignite0 = (IgniteKernal) ignite(0); final AtomicBoolean stop = new AtomicBoolean(); try { ignite0.createCache(cacheConfiguration(CACHE1, cacheMode, 256)); ignite0.createCache(cacheConfiguration(CACHE2, cacheMode, sameAff ? 256 : 128)); final AtomicInteger threadIdx = new AtomicInteger(); IgniteInternalFuture<?> fut = GridTestUtils.runMultiThreadedAsync( new Callable<Void>() { @Override public Void call() throws Exception { int idx = threadIdx.getAndIncrement(); Ignite ignite = ignite(idx % GRID_CNT); log.info( "Started update thread [node=" + ignite.name() + ", client=" + ignite.configuration().isClientMode() + ']'); IgniteCache<TestKey, TestValue> cache1 = ignite.cache(CACHE1); IgniteCache<TestKey, TestValue> cache2 = ignite.cache(CACHE2); assertNotSame(cache1, cache2); IgniteTransactions txs = ignite.transactions(); ThreadLocalRandom rnd = ThreadLocalRandom.current(); long iter = 0; while (!stop.get()) { boolean sameKey = rnd.nextBoolean(); try { try (Transaction tx = txs.txStart(concurrency, isolation)) { if (sameKey) { TestKey key = new TestKey(rnd.nextLong(KEY_RANGE)); cacheOperation(rnd, cache1, key); cacheOperation(rnd, cache2, key); } else { TestKey key1 = new TestKey(rnd.nextLong(KEY_RANGE)); TestKey key2 = new TestKey(key1.key() + 1); cacheOperation(rnd, cache1, key1); cacheOperation(rnd, cache2, key2); } tx.commit(); } } catch (CacheException | IgniteException e) { log.info("Update error: " + e); } if (iter++ % 500 == 0) log.info("Iteration: " + iter); } return null; } /** * @param rnd Random. * @param cache Cache. * @param key Key. */ private void cacheOperation( ThreadLocalRandom rnd, IgniteCache<TestKey, TestValue> cache, TestKey key) { switch (rnd.nextInt(4)) { case 0: cache.put(key, new TestValue(rnd.nextLong())); break; case 1: cache.remove(key); break; case 2: cache.invoke(key, new TestEntryProcessor(rnd.nextBoolean() ? 1L : null)); break; case 3: cache.get(key); break; default: assert false; } } }, 10, "tx-thread"); long stopTime = System.currentTimeMillis() + 3 * 60_000; long topVer = ignite0.cluster().topologyVersion(); boolean failed = false; while (System.currentTimeMillis() < stopTime) { log.info("Start node."); IgniteKernal ignite = (IgniteKernal) startGrid(GRID_CNT); assertFalse(ignite.configuration().isClientMode()); topVer++; IgniteInternalFuture<?> affFut = ignite .context() .cache() .context() .exchange() .affinityReadyFuture(new AffinityTopologyVersion(topVer)); try { if (affFut != null) affFut.get(30_000); } catch (IgniteFutureTimeoutCheckedException e) { log.error("Failed to wait for affinity future after start: " + topVer); failed = true; break; } Thread.sleep(500); log.info("Stop node."); stopGrid(GRID_CNT); topVer++; affFut = ignite0 .context() .cache() .context() .exchange() .affinityReadyFuture(new AffinityTopologyVersion(topVer)); try { if (affFut != null) affFut.get(30_000); } catch (IgniteFutureTimeoutCheckedException e) { log.error("Failed to wait for affinity future after stop: " + topVer); failed = true; break; } } stop.set(true); fut.get(); assertFalse("Test failed, see log for details.", failed); } finally { stop.set(true); ignite0.destroyCache(CACHE1); ignite0.destroyCache(CACHE2); awaitPartitionMapExchange(); } }