/** @throws Exception If failed. */ public void testDisabledRest() throws Exception { restEnabled = false; final Grid g = startGrid("disabled-rest"); try { Thread.sleep(2 * TOP_REFRESH_FREQ); // As long as we have round robin load balancer this will cause every node to be queried. for (int i = 0; i < NODES_CNT + 1; i++) assertEquals(NODES_CNT + 1, client.compute().refreshTopology(false, false).size()); final GridClientData data = client.data(PARTITIONED_CACHE_NAME); // Check rest-disabled node is unavailable. try { String affKey; do { affKey = UUID.randomUUID().toString(); } while (!data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "asdf"); assertEquals("asdf", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } catch (GridServerUnreachableException e) { // Thrown for direct client-node connections. assertTrue( "Unexpected exception message: " + e.getMessage(), e.getMessage() .startsWith("No available endpoints to connect (is rest enabled for this node?)")); } catch (GridClientException e) { // Thrown for routed client-router-node connections. String msg = e.getMessage(); assertTrue( "Unexpected exception message: " + msg, protocol() == GridClientProtocol.TCP ? msg.contains("No available endpoints to connect (is rest enabled for this node?)") : // TCP router. msg.startsWith( "No available nodes on the router for destination node ID")); // HTTP router. } // Check rest-enabled nodes are available. String affKey; do { affKey = UUID.randomUUID().toString(); } while (data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "fdsa"); assertEquals("fdsa", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } finally { restEnabled = true; G.stop(g.name(), true); } }
/** @throws Exception If failed. */ public void testClientAffinity() throws Exception { GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); Collection<Object> keys = new ArrayList<>(); keys.addAll(Arrays.asList(Boolean.TRUE, Boolean.FALSE, 1, Integer.MAX_VALUE)); Random rnd = new Random(); StringBuilder sb = new StringBuilder(); // Generate some random strings. for (int i = 0; i < 100; i++) { sb.setLength(0); for (int j = 0; j < 255; j++) // Only printable ASCII symbols for test. sb.append((char) (rnd.nextInt(0x7f - 0x20) + 0x20)); keys.add(sb.toString()); } // Generate some more keys to achieve better coverage. for (int i = 0; i < 100; i++) keys.add(UUID.randomUUID()); for (Object key : keys) { UUID nodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); UUID clientNodeId = partitioned.affinity(key); assertEquals( "Invalid affinity mapping for REST response for key: " + key, nodeId, clientNodeId); } }
/** @throws Exception If failed. */ public void testInvalidateFlag() throws Exception { GridEx g0 = grid(0); GridCache<String, String> cache = g0.cache(PARTITIONED_CACHE_NAME); String key = null; for (int i = 0; i < 10_000; i++) { if (!cache.affinity().isPrimaryOrBackup(g0.localNode(), String.valueOf(i))) { key = String.valueOf(i); break; } } assertNotNull(key); cache.put(key, key); // Create entry in near cache, it is invalidated if INVALIDATE flag is set. assertNotNull(cache.peek(key)); GridClientData d = client.data(PARTITIONED_CACHE_NAME); d.flagsOn(GridClientCacheFlag.INVALIDATE).put(key, "zzz"); for (Grid g : G.allGrids()) { cache = g.cache(PARTITIONED_CACHE_NAME); if (cache.affinity().isPrimaryOrBackup(g.localNode(), key)) assertEquals("zzz", cache.peek(key)); else assertNull(cache.peek(key)); } }
/** @throws Exception If failed. */ public void testSyncCommitRollbackFlags() throws Exception { commSpiEnabled = true; try { GridClientData data = client.data(REPLICATED_ASYNC_CACHE_NAME); info("Before put x1"); data.put("x1", "y1"); info("Before put x2"); data.flagsOn(GridClientCacheFlag.SYNC_COMMIT).put("x2", "y2"); info("Before put x3"); data.put("x3", "y3"); info("Before put x4"); data.flagsOn(GridClientCacheFlag.SYNC_COMMIT).put("x4", "y4"); } finally { commSpiEnabled = false; } }
/** @throws Exception If failed. */ public void testMultithreadedCommand() throws Exception { final GridClientData data = client.data(PARTITIONED_CACHE_NAME); final GridClientCompute compute = client.compute(); final AtomicInteger cnt = new AtomicInteger(0); multithreaded( new Callable<Object>() { @Override public Object call() throws Exception { for (int i = 0; i < 20; i++) { String key = UUID.randomUUID().toString(); String val = UUID.randomUUID().toString(); switch (cnt.incrementAndGet() % 4) { case 0: { assertTrue(data.put(key, val)); assertEquals(val, data.get(key)); assertTrue(data.remove(key)); break; } case 1: { assertNotNull(data.metrics()); break; } case 2: { String nodeId = compute.execute(TestTask.class.getName(), null); assertNotNull(nodeId); assertNotNull(compute.refreshNode(UUID.fromString(nodeId), true, true)); break; } case 3: { assertEquals(NODES_CNT, compute.refreshTopology(true, true).size()); break; } } } return null; } }, 50, "multithreaded-client-access"); }
/** @throws Exception If failed. */ public void testAffinityPut() throws Exception { Thread.sleep(2 * TOP_REFRESH_FREQ); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT); GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); GridClientCompute compute = client.compute(); for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i)); for (int i = 0; i < 100; i++) { String key = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key)); assertEquals(primaryNodeId, partitioned.affinity(key)); // Must go to primary node only. Since backup count is 0, value must present on // primary node only. partitioned.put(key, "val" + key); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key); if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val); else assertNull(val); } } // Now check that we will see value in near cache in pinned mode. for (int i = 100; i < 200; i++) { String pinnedKey = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id(); UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId))); GridClientNode node = compute.node(pinnedNodeId); partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey); if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey())) assertEquals("val" + pinnedKey, val); else assertNull(val); } } }
/** @throws Exception If failed. */ public void testAffinityExecute() throws Exception { GridClientCompute dflt = client.compute(); GridClientData data = client.data(PARTITIONED_CACHE_NAME); Collection<? extends GridClientNode> nodes = dflt.nodes(); assertEquals(NODES_CNT, nodes.size()); for (int i = 0; i < NODES_CNT; i++) { Grid g = grid(i); assert g != null; int affinityKey = -1; for (int key = 0; key < 10000; key++) { if (g.localNode().id().equals(data.affinity(key))) { affinityKey = key; break; } } if (affinityKey == -1) throw new Exception("Unable to found key for which node is primary: " + g.localNode().id()); GridClientNode clientNode = dflt.node(g.localNode().id()); assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode); String res = dflt.affinityExecute(TestTask.class.getName(), PARTITIONED_CACHE_NAME, affinityKey, null); assertNotNull(res); assertEquals(g.localNode().id().toString(), res); } }