/** * @param concurrency Concurrency. * @param isolation Isolation. * @throws GridException If test failed. */ private void checkTransactionTimeout( GridCacheTxConcurrency concurrency, GridCacheTxIsolation isolation) throws Exception { boolean wasEx = false; GridCacheTx tx = null; try { GridCache<Integer, String> cache = grid.cache(null); tx = cache.txStart(concurrency, isolation, 50, 0); cache.put(1, "1"); Thread.sleep(100); cache.put(1, "2"); tx.commit(); } catch (GridCacheTxOptimisticException e) { info("Received expected optimistic exception: " + e.getMessage()); wasEx = true; tx.rollback(); } catch (GridCacheTxTimeoutException e) { info("Received expected timeout exception: " + e.getMessage()); wasEx = true; tx.rollback(); } assert wasEx; }
/** @throws Exception If failed. */ public void testDisabledRest() throws Exception { restEnabled = false; final Grid g = startGrid("disabled-rest"); try { Thread.sleep(2 * TOP_REFRESH_FREQ); // As long as we have round robin load balancer this will cause every node to be queried. for (int i = 0; i < NODES_CNT + 1; i++) assertEquals(NODES_CNT + 1, client.compute().refreshTopology(false, false).size()); final GridClientData data = client.data(PARTITIONED_CACHE_NAME); // Check rest-disabled node is unavailable. try { String affKey; do { affKey = UUID.randomUUID().toString(); } while (!data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "asdf"); assertEquals("asdf", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } catch (GridServerUnreachableException e) { // Thrown for direct client-node connections. assertTrue( "Unexpected exception message: " + e.getMessage(), e.getMessage() .startsWith("No available endpoints to connect (is rest enabled for this node?)")); } catch (GridClientException e) { // Thrown for routed client-router-node connections. String msg = e.getMessage(); assertTrue( "Unexpected exception message: " + msg, protocol() == GridClientProtocol.TCP ? msg.contains("No available endpoints to connect (is rest enabled for this node?)") : // TCP router. msg.startsWith( "No available nodes on the router for destination node ID")); // HTTP router. } // Check rest-enabled nodes are available. String affKey; do { affKey = UUID.randomUUID().toString(); } while (data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "fdsa"); assertEquals("fdsa", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } finally { restEnabled = true; G.stop(g.name(), true); } }
/** @throws Exception If failed. */ public void testAffinityPut() throws Exception { Thread.sleep(2 * TOP_REFRESH_FREQ); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT); GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); GridClientCompute compute = client.compute(); for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i)); for (int i = 0; i < 100; i++) { String key = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key)); assertEquals(primaryNodeId, partitioned.affinity(key)); // Must go to primary node only. Since backup count is 0, value must present on // primary node only. partitioned.put(key, "val" + key); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key); if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val); else assertNull(val); } } // Now check that we will see value in near cache in pinned mode. for (int i = 100; i < 200; i++) { String pinnedKey = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id(); UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId))); GridClientNode node = compute.node(pinnedNodeId); partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey); if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey())) assertEquals("val" + pinnedKey, val); else assertNull(val); } } }
/** * JUnit. * * @throws Exception In case of error. */ @SuppressWarnings({"TooBroadScope"}) public void testH2Text() throws Exception { int duration = 60 * 1000; final int keyCnt = 5000; final int logFreq = 50; final String txt = "Value"; final GridCache<Integer, H2TextValue> c = grid(0).cache(null); GridFuture<?> fut1 = multithreadedAsync( new Callable() { @Override public Object call() throws Exception { for (int i = 0; i < keyCnt; i++) { c.putx(i, new H2TextValue(txt)); if (i % logFreq == 0) X.println("Stored values: " + i); } return null; } }, 1); // Create query. final GridCacheQuery<Map.Entry<Integer, H2TextValue>> qry = c.queries().createFullTextQuery(H2TextValue.class, txt); qry.enableDedup(false); qry.includeBackups(false); qry.timeout(TEST_TIMEOUT); final AtomicBoolean stop = new AtomicBoolean(); GridFuture<?> fut2 = multithreadedAsync( new Callable() { @Override public Object call() throws Exception { int cnt = 0; while (!stop.get()) { Collection<Map.Entry<Integer, H2TextValue>> res = qry.execute().get(); cnt++; if (cnt % logFreq == 0) { X.println("Result set: " + res.size()); X.println("Executed queries: " + cnt); } } return null; } }, 1); Thread.sleep(duration); fut1.get(); stop.set(true); fut2.get(); }