/** @throws Exception If failed. */ public void testInvalidateFlag() throws Exception { GridEx g0 = grid(0); GridCache<String, String> cache = g0.cache(PARTITIONED_CACHE_NAME); String key = null; for (int i = 0; i < 10_000; i++) { if (!cache.affinity().isPrimaryOrBackup(g0.localNode(), String.valueOf(i))) { key = String.valueOf(i); break; } } assertNotNull(key); cache.put(key, key); // Create entry in near cache, it is invalidated if INVALIDATE flag is set. assertNotNull(cache.peek(key)); GridClientData d = client.data(PARTITIONED_CACHE_NAME); d.flagsOn(GridClientCacheFlag.INVALIDATE).put(key, "zzz"); for (Grid g : G.allGrids()) { cache = g.cache(PARTITIONED_CACHE_NAME); if (cache.affinity().isPrimaryOrBackup(g.localNode(), key)) assertEquals("zzz", cache.peek(key)); else assertNull(cache.peek(key)); } }
/** * Test GGFS construction. * * @throws GridException In case of exception. */ public void testConfiguration() throws GridException { GridCache metaCache = getFieldValue(fs, "meta", "metaCache"); GridCache dataCache = getFieldValue(fs, "data", "dataCache"); assertNotNull(metaCache); assertEquals(META_CACHE_NAME, metaCache.name()); assertEquals(REPLICATED, metaCache.configuration().getCacheMode()); assertNotNull(dataCache); assertEquals(DATA_CACHE_NAME, dataCache.name()); assertEquals(PARTITIONED, dataCache.configuration().getCacheMode()); }
/** @throws Exception If failed. */ public void testNodeLeave() throws Exception { try { cache = true; for (int i = 0; i < 2; i++) { nearOnly = i == 0; startGrid(i); } for (int i = 0; i < 10; i++) grid(1).cache(null).put(i, i); final GridCache<Object, Object> nearOnly = grid(0).cache(null); // Populate near cache. for (int i = 0; i < 10; i++) { assertEquals(i, nearOnly.get(i)); assertEquals(i, nearOnly.peek(i)); } // Stop the only dht node. stopGrid(1); for (int i = 0; i < 10; i++) { assertNull(nearOnly.peek(i)); final int key = i; GridTestUtils.assertThrows( log, new Callable<Object>() { @Override public Object call() throws Exception { return nearOnly.get(key); } }, GridTopologyException.class, null); } // Test optimistic transaction. GridTestUtils.assertThrows( log, new Callable<Object>() { @Override public Object call() throws Exception { try (GridCacheTx tx = nearOnly.txStart(OPTIMISTIC, REPEATABLE_READ)) { nearOnly.putx("key", "val"); tx.commit(); } return null; } }, GridTopologyException.class, null); // Test pessimistic transaction. GridTestUtils.assertThrows( log, new Callable<Object>() { @Override public Object call() throws Exception { try (GridCacheTx tx = nearOnly.txStart(PESSIMISTIC, REPEATABLE_READ)) { nearOnly.put("key", "val"); tx.commit(); } return null; } }, GridTopologyException.class, null); } finally { stopAllGrids(); } }
/** @throws Exception If failed. */ public void testCreateFileFragmented() throws Exception { GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs"); GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer(); GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false); GridGgfsPath path = new GridGgfsPath("/file"); try { GridGgfs fs0 = grid(0).ggfs("ggfs"); GridGgfs fs1 = grid(1).ggfs("ggfs"); GridGgfs fs2 = grid(2).ggfs("ggfs"); try (GridGgfsOutputStream out = fs0.create( path, 128, false, 1, CFG_GRP_SIZE, F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) { // 1.5 blocks byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2]; Arrays.fill(data, (byte) 1); out.write(data); } try (GridGgfsOutputStream out = fs1.append(path, false)) { // 1.5 blocks. byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2]; Arrays.fill(data, (byte) 2); out.write(data); } // After this we should have first two block colocated with grid 0 and last block colocated // with grid 1. GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path); GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME); GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId()); GridGgfsFileMap map = fileInfo.fileMap(); List<GridGgfsFileAffinityRange> ranges = map.ranges(); assertEquals(2, ranges.size()); assertTrue(ranges.get(0).startOffset() == 0); assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1); assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE); assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1); // Validate data read after colocated writes. try (GridGgfsInputStream in = fs2.open(path)) { // Validate first part of file. for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read()); // Validate second part of file. for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read()); assertEquals(-1, in.read()); } } finally { GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true); boolean hasData = false; for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty(); assertTrue(hasData); fs.delete(path, true); } GridTestUtils.retryAssert( log, ASSERT_RETRIES, ASSERT_RETRY_INTERVAL, new CAX() { @Override public void applyx() { for (int i = 0; i < NODES_CNT; i++) assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty()); } }); }