/** @throws Exception If failed. */ public void testDisabledRest() throws Exception { restEnabled = false; final Grid g = startGrid("disabled-rest"); try { Thread.sleep(2 * TOP_REFRESH_FREQ); // As long as we have round robin load balancer this will cause every node to be queried. for (int i = 0; i < NODES_CNT + 1; i++) assertEquals(NODES_CNT + 1, client.compute().refreshTopology(false, false).size()); final GridClientData data = client.data(PARTITIONED_CACHE_NAME); // Check rest-disabled node is unavailable. try { String affKey; do { affKey = UUID.randomUUID().toString(); } while (!data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "asdf"); assertEquals("asdf", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } catch (GridServerUnreachableException e) { // Thrown for direct client-node connections. assertTrue( "Unexpected exception message: " + e.getMessage(), e.getMessage() .startsWith("No available endpoints to connect (is rest enabled for this node?)")); } catch (GridClientException e) { // Thrown for routed client-router-node connections. String msg = e.getMessage(); assertTrue( "Unexpected exception message: " + msg, protocol() == GridClientProtocol.TCP ? msg.contains("No available endpoints to connect (is rest enabled for this node?)") : // TCP router. msg.startsWith( "No available nodes on the router for destination node ID")); // HTTP router. } // Check rest-enabled nodes are available. String affKey; do { affKey = UUID.randomUUID().toString(); } while (data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "fdsa"); assertEquals("fdsa", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } finally { restEnabled = true; G.stop(g.name(), true); } }
/** @throws Exception If failed. */ public void testInvalidateFlag() throws Exception { GridEx g0 = grid(0); GridCache<String, String> cache = g0.cache(PARTITIONED_CACHE_NAME); String key = null; for (int i = 0; i < 10_000; i++) { if (!cache.affinity().isPrimaryOrBackup(g0.localNode(), String.valueOf(i))) { key = String.valueOf(i); break; } } assertNotNull(key); cache.put(key, key); // Create entry in near cache, it is invalidated if INVALIDATE flag is set. assertNotNull(cache.peek(key)); GridClientData d = client.data(PARTITIONED_CACHE_NAME); d.flagsOn(GridClientCacheFlag.INVALIDATE).put(key, "zzz"); for (Grid g : G.allGrids()) { cache = g.cache(PARTITIONED_CACHE_NAME); if (cache.affinity().isPrimaryOrBackup(g.localNode(), key)) assertEquals("zzz", cache.peek(key)); else assertNull(cache.peek(key)); } }
/** @throws Exception If failed. */ public void testProjectionRun() throws Exception { GridClientCompute dflt = client.compute(); Collection<? extends GridClientNode> nodes = dflt.nodes(); assertEquals(NODES_CNT, nodes.size()); for (int i = 0; i < NODES_CNT; i++) { Grid g = grid(i); assert g != null; GridClientNode clientNode = dflt.node(g.localNode().id()); assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode); GridClientCompute prj = dflt.projection(clientNode); String res = prj.execute(TestTask.class.getName(), null); assertNotNull(res); assertEquals(g.localNode().id().toString(), res); } }
/** * Ensure that {@link GridComputeJobMasterLeaveAware} callback is invoked on job which is * initiated by master and is currently running on it. * * @throws Exception If failed. */ public void testLocalJobOnMaster() throws Exception { invokeLatch = new CountDownLatch(1); jobLatch = new CountDownLatch(1); Grid g = startGrid(0); g.compute().execute(new TestTask(1), null); jobLatch.await(); // Count down the latch in a separate thread. new Thread( new Runnable() { @Override public void run() { try { U.sleep(500); } catch (GridInterruptedException ignore) { // No-op. } latch.countDown(); } }) .start(); stopGrid(0, true); latch.countDown(); assert invokeLatch.await(5000, MILLISECONDS); }
/** * Listen to events coming from all grid nodes. * * @throws GridException If failed. */ private static void remoteListen() throws GridException { Grid g = GridGain.grid(); GridCache<Integer, String> cache = g.cache(CACHE_NAME); // Register remote event listeners on all nodes running cache. GridFuture<?> fut = g.forCache(CACHE_NAME) .events() .remoteListen( // This optional local callback is called for each event notification // that passed remote predicate filter. new GridBiPredicate<UUID, GridCacheEvent>() { @Override public boolean apply(UUID nodeId, GridCacheEvent evt) { System.out.println(); System.out.println( "Received event [evt=" + evt.name() + ", key=" + evt.key() + ", oldVal=" + evt.oldValue() + ", newVal=" + evt.newValue()); return true; // Return true to continue listening. } }, // Remote filter which only accepts events for keys that are // greater or equal than 10 and if local node is primary for this key. new GridPredicate<GridCacheEvent>() { /** Auto-inject grid instance. */ @GridInstanceResource private Grid g; @Override public boolean apply(GridCacheEvent evt) { Integer key = evt.key(); return key >= 10 && g.cache(CACHE_NAME).affinity().isPrimary(g.localNode(), key); } }, // Types of events for which listeners are registered. EVT_CACHE_OBJECT_PUT, EVT_CACHE_OBJECT_READ, EVT_CACHE_OBJECT_REMOVED); // Wait until event listeners are subscribed on all nodes. fut.get(); int keyCnt = 20; // Generate cache events. for (int i = 0; i < keyCnt; i++) cache.putx(i, Integer.toString(i)); }
/** @throws Exception If failed. */ public void testTopologyListener() throws Exception { final Collection<UUID> added = new ArrayList<>(1); final Collection<UUID> rmvd = new ArrayList<>(1); final CountDownLatch addedLatch = new CountDownLatch(1); final CountDownLatch rmvLatch = new CountDownLatch(1); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); GridClientTopologyListener lsnr = new GridClientTopologyListener() { @Override public void onNodeAdded(GridClientNode node) { added.add(node.nodeId()); addedLatch.countDown(); } @Override public void onNodeRemoved(GridClientNode node) { rmvd.add(node.nodeId()); rmvLatch.countDown(); } }; client.addTopologyListener(lsnr); try { Grid g = startGrid(NODES_CNT + 1); UUID id = g.localNode().id(); assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, added.size()); assertEquals(id, F.first(added)); stopGrid(NODES_CNT + 1); assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, rmvd.size()); assertEquals(id, F.first(rmvd)); } finally { client.removeTopologyListener(lsnr); stopGrid(NODES_CNT + 1); } }
/** * @param concurrency Concurrency. * @param isolation Isolation. * @throws GridException If test failed. */ private void checkTransactionTimeout( GridCacheTxConcurrency concurrency, GridCacheTxIsolation isolation) throws Exception { boolean wasEx = false; GridCacheTx tx = null; try { GridCache<Integer, String> cache = grid.cache(null); tx = cache.txStart(concurrency, isolation, 50, 0); cache.put(1, "1"); Thread.sleep(100); cache.put(1, "2"); tx.commit(); } catch (GridCacheTxOptimisticException e) { info("Received expected optimistic exception: " + e.getMessage()); wasEx = true; tx.rollback(); } catch (GridCacheTxTimeoutException e) { info("Received expected timeout exception: " + e.getMessage()); wasEx = true; tx.rollback(); } assert wasEx; }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, Object arg) throws GridException { Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize); this.gridSize = gridSize; final String locNodeId = grid.localNode().id().toString(); for (int i = 0; i < gridSize; i++) { jobs.add( new GridComputeJobAdapter() { @SuppressWarnings("OverlyStrongTypeCast") @Override public Object execute() { try { Thread.sleep(1000); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); } return new GridBiTuple<>(locNodeId, 1); } }); } return jobs; }
/** * @param g Grid. * @return Non-system caches. */ private Collection<GridCacheConfiguration> caches(Grid g) { return F.view( Arrays.asList(g.configuration().getCacheConfiguration()), new GridPredicate<GridCacheConfiguration>() { @Override public boolean apply(GridCacheConfiguration c) { return c.getName() == null || !c.getName().equals(CU.UTILITY_CACHE_NAME); } }); }
/** * Runs JDBC example. * * @param args Command line arguments. * @throws Exception In case of error. */ public static void main(String[] args) throws Exception { Grid grid = G.start("examples/config/spring-cache.xml"); Connection conn = null; try { // Populate cache with data. populate(grid.cache(CACHE_NAME)); // Register JDBC driver. Class.forName("org.gridgain.jdbc.GridJdbcDriver"); // Open JDBC connection. conn = DriverManager.getConnection("jdbc:gridgain://localhost/" + CACHE_NAME, configuration()); X.println(">>>"); // Query all persons. queryAllPersons(conn); X.println(">>>"); // Query person older than 30 years. queryPersons(conn, 30); X.println(">>>"); // Query persons working in GridGain. queryPersonsInOrganization(conn, "GridGain"); X.println(">>>"); } finally { // Close JDBC connection. if (conn != null) conn.close(); G.stop(true); } }
/** * Listen to events that happen only on local node. * * @throws GridException If failed. */ private static void localListen() throws GridException { Grid g = GridGain.grid(); // Register event listener for all local task execution events. g.events() .localListen( new GridPredicate<GridEvent>() { @Override public boolean apply(GridEvent evt) { GridTaskEvent taskEvt = (GridTaskEvent) evt; System.out.println(); System.out.println( "Git event notification [evt=" + evt.name() + ", taskName=" + taskEvt.taskName() + ']'); return true; } }, EVTS_TASK_EXECUTION); // Generate task events. g.compute() .withName("example-event-task") .run( new GridRunnable() { @Override public void run() { System.out.println(); System.out.println("Executing sample job."); } }) .get(); }
/** @throws Exception If failed. */ public void testKeyMappingOnComputeNode() throws Exception { try { cache = true; for (int i = 0; i < 4; i++) { nearOnly = i == 0; startGrid(i); } cache = false; Grid compute = startGrid(4); for (int i = 0; i < 100; i++) { GridNode node = compute.mapKeyToNode(null, i); assertFalse("For key: " + i, node.id().equals(compute.localNode().id())); assertFalse("For key: " + i, node.id().equals(grid(0).localNode().id())); } } finally { stopAllGrids(); } }
/** @throws GridException If failed. */ public void testAffinity() throws GridException { Grid g1 = grid(1); Grid g2 = grid(2); assert caches(g1).size() == 0; assert F.first(caches(g2)).getCacheMode() == PARTITIONED; Map<GridNode, Collection<String>> map = g1.mapKeysToNodes(null, F.asList("1")); assertNotNull(map); assertEquals("Invalid map size: " + map.size(), 1, map.size()); assertEquals(F.first(map.keySet()), g2.localNode()); UUID id1 = g1.mapKeyToNode(null, "2").id(); assertNotNull(id1); assertEquals(g2.localNode().id(), id1); UUID id2 = g1.mapKeyToNode(null, "3").id(); assertNotNull(id2); assertEquals(g2.localNode().id(), id2); }
/** @throws Exception If failed. */ public void testAffinityExecute() throws Exception { GridClientCompute dflt = client.compute(); GridClientData data = client.data(PARTITIONED_CACHE_NAME); Collection<? extends GridClientNode> nodes = dflt.nodes(); assertEquals(NODES_CNT, nodes.size()); for (int i = 0; i < NODES_CNT; i++) { Grid g = grid(i); assert g != null; int affinityKey = -1; for (int key = 0; key < 10000; key++) { if (g.localNode().id().equals(data.affinity(key))) { affinityKey = key; break; } } if (affinityKey == -1) throw new Exception("Unable to found key for which node is primary: " + g.localNode().id()); GridClientNode clientNode = dflt.node(g.localNode().id()); assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode); String res = dflt.affinityExecute(TestTask.class.getName(), PARTITIONED_CACHE_NAME, affinityKey, null); assertNotNull(res); assertEquals(g.localNode().id().toString(), res); } }
/** * Runs basic cache example. * * @param args Command line arguments, none required. * @throws Exception If example execution failed. */ public static void main(String[] args) throws Exception { final Grid g = args.length == 0 ? G.start("examples/config/spring-cache.xml") : G.start(args[0]); try { // Subscribe to events on every node, so we can visualize what's // happening in remote caches. g.run( BROADCAST, new CA() { @Override public void apply() { GridLocalEventListener lsnr = new GridLocalEventListener() { @Override public void onEvent(GridEvent event) { switch (event.type()) { case EVT_CACHE_OBJECT_PUT: case EVT_CACHE_OBJECT_READ: case EVT_CACHE_OBJECT_REMOVED: { GridCacheEvent e = (GridCacheEvent) event; X.println("Cache event [name=" + e.name() + ", key=" + e.key() + ']'); } } } }; GridNodeLocal<String, GridLocalEventListener> loc = g.nodeLocal(); GridLocalEventListener prev = loc.remove("lsnr"); // If there is a listener subscribed from previous runs, unsubscribe it. if (prev != null) g.removeLocalEventListener(prev); // Record new listener, so we can check it on next run. loc.put("lsnr", lsnr); // Subscribe listener. g.addLocalEventListener(lsnr, EVTS_CACHE); } }); final GridCacheProjection<Integer, String> cache = g.cache(CACHE_NAME).projection(Integer.class, String.class); final int keyCnt = 20; // Store keys in cache. for (int i = 0; i < keyCnt; i++) cache.putx(i, Integer.toString(i)); // Peek and get on local node. for (int i = 0; i < keyCnt; i++) { X.println("Peeked [key=" + i + ", val=" + cache.peek(i) + ']'); X.println("Got [key=" + i + ", val=" + cache.get(i) + ']'); } // Projection (view) for remote nodes. GridProjection rmts = g.remoteProjection(); if (!rmts.isEmpty()) { // Peek and get on remote nodes (comment it out if output gets too crowded). g.remoteProjection() .run( BROADCAST, new GridAbsClosureX() { @Override public void applyx() throws GridException { for (int i = 0; i < keyCnt; i++) { X.println("Peeked [key=" + i + ", val=" + cache.peek(i) + ']'); X.println("Got [key=" + i + ", val=" + cache.get(i) + ']'); } } }); } } finally { G.stop(true); } }