/** * Check if flags in correct state. * * @param msg Message. */ private void checkSyncFlags(GridIoMessage msg) { if (!commSpiEnabled) return; Object o = msg.message(); if (!(o instanceof GridDistributedLockRequest)) return; GridKernal g = (GridKernal) G.grid(nodeId); GridCacheTxManager<Object, Object> tm = g.internalCache(REPLICATED_ASYNC_CACHE_NAME).context().tm(); GridCacheVersion v = ((GridCacheVersionable) o).version(); GridCacheTxEx t = tm.tx(v); if (t.hasWriteKey("x1")) { assertFalse(t.syncCommit()); } else if (t.hasWriteKey("x2")) { assertTrue(t.syncCommit()); } else if (t.hasWriteKey("x3")) { assertFalse(t.syncCommit()); } else if (t.hasWriteKey("x4")) { assertTrue(t.syncCommit()); } }
/** * @param seq Start/stop sequence. * @throws Exception If failed. */ private void checkSequence0(boolean[] seq) throws Exception { try { startGrid(0); TreeSet<Integer> started = new TreeSet<>(); started.add(0); int topVer = 1; for (boolean start : seq) { if (start) { int nextIdx = nextIndex(started); startGrid(nextIdx); started.add(nextIdx); } else { int idx = started.last(); stopGrid(idx); started.remove(idx); } topVer++; info("Grid 0: " + grid(0).localNode().id()); ((GridKernal) grid(0)) .internalCache() .context() .affinity() .affinityReadyFuture(topVer) .get(); for (int i : started) { if (i != 0) { GridEx grid = grid(i); ((GridKernal) grid) .internalCache() .context() .affinity() .affinityReadyFuture(topVer) .get(); info("Grid " + i + ": " + grid.localNode().id()); for (int part = 0; part < parts; part++) { List<GridNode> firstNodes = (List<GridNode>) grid(0).cache(null).affinity().mapPartitionToPrimaryAndBackups(part); List<GridNode> secondNodes = (List<GridNode>) grid.cache(null).affinity().mapPartitionToPrimaryAndBackups(part); assertEquals(firstNodes.size(), secondNodes.size()); for (int n = 0; n < firstNodes.size(); n++) assertEquals(firstNodes.get(n), secondNodes.get(n)); } } } } } finally { stopAllGrids(); } }