/** @throws Exception If failed. */ public void testDisabledRest() throws Exception { restEnabled = false; final Grid g = startGrid("disabled-rest"); try { Thread.sleep(2 * TOP_REFRESH_FREQ); // As long as we have round robin load balancer this will cause every node to be queried. for (int i = 0; i < NODES_CNT + 1; i++) assertEquals(NODES_CNT + 1, client.compute().refreshTopology(false, false).size()); final GridClientData data = client.data(PARTITIONED_CACHE_NAME); // Check rest-disabled node is unavailable. try { String affKey; do { affKey = UUID.randomUUID().toString(); } while (!data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "asdf"); assertEquals("asdf", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } catch (GridServerUnreachableException e) { // Thrown for direct client-node connections. assertTrue( "Unexpected exception message: " + e.getMessage(), e.getMessage() .startsWith("No available endpoints to connect (is rest enabled for this node?)")); } catch (GridClientException e) { // Thrown for routed client-router-node connections. String msg = e.getMessage(); assertTrue( "Unexpected exception message: " + msg, protocol() == GridClientProtocol.TCP ? msg.contains("No available endpoints to connect (is rest enabled for this node?)") : // TCP router. msg.startsWith( "No available nodes on the router for destination node ID")); // HTTP router. } // Check rest-enabled nodes are available. String affKey; do { affKey = UUID.randomUUID().toString(); } while (data.affinity(affKey).equals(g.localNode().id())); data.put(affKey, "fdsa"); assertEquals("fdsa", cache(0, PARTITIONED_CACHE_NAME).get(affKey)); } finally { restEnabled = true; G.stop(g.name(), true); } }
/** @throws Exception If failed. */ @SuppressWarnings("unchecked") public void testCancel() throws Exception { Grid grid = G.grid(getTestGridName()); grid.compute() .localDeployTask(GridCancelTestTask.class, U.detectClassLoader(GridCancelTestTask.class)); GridComputeTaskFuture<?> fut = grid.compute().execute(GridCancelTestTask.class.getName(), null); // Wait until jobs begin execution. boolean await = startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS); assert await : "Jobs did not start."; info("Test task result: " + fut); assert fut != null; // Only first job should successfully complete. Object res = fut.get(); assert (Integer) res == 1; // Wait for all jobs to finish. await = stopSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS); assert await : "Jobs did not stop."; // One is definitely processed. But there might be some more processed or cancelled or processed // and cancelled. // Thus total number should be at least SPLIT_COUNT and at most (SPLIT_COUNT - 1) *2 +1 assert (cancelCnt + processedCnt) >= SPLIT_COUNT && (cancelCnt + processedCnt) <= (SPLIT_COUNT - 1) * 2 + 1 : "Invalid cancel count value: " + cancelCnt; }
/** @throws Exception If failed. */ public void testInvalidateFlag() throws Exception { GridEx g0 = grid(0); GridCache<String, String> cache = g0.cache(PARTITIONED_CACHE_NAME); String key = null; for (int i = 0; i < 10_000; i++) { if (!cache.affinity().isPrimaryOrBackup(g0.localNode(), String.valueOf(i))) { key = String.valueOf(i); break; } } assertNotNull(key); cache.put(key, key); // Create entry in near cache, it is invalidated if INVALIDATE flag is set. assertNotNull(cache.peek(key)); GridClientData d = client.data(PARTITIONED_CACHE_NAME); d.flagsOn(GridClientCacheFlag.INVALIDATE).put(key, "zzz"); for (Grid g : G.allGrids()) { cache = g.cache(PARTITIONED_CACHE_NAME); if (cache.affinity().isPrimaryOrBackup(g.localNode(), key)) assertEquals("zzz", cache.peek(key)); else assertNull(cache.peek(key)); } }
/** @throws Exception If failed. */ public void testProjectionRun() throws Exception { GridClientCompute dflt = client.compute(); Collection<? extends GridClientNode> nodes = dflt.nodes(); assertEquals(NODES_CNT, nodes.size()); for (int i = 0; i < NODES_CNT; i++) { Grid g = grid(i); assert g != null; GridClientNode clientNode = dflt.node(g.localNode().id()); assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode); GridClientCompute prj = dflt.projection(clientNode); String res = prj.execute(TestTask.class.getName(), null); assertNotNull(res); assertEquals(g.localNode().id().toString(), res); } }
/** * Ensure that {@link GridComputeJobMasterLeaveAware} callback is invoked on job which is * initiated by master and is currently running on it. * * @throws Exception If failed. */ public void testLocalJobOnMaster() throws Exception { invokeLatch = new CountDownLatch(1); jobLatch = new CountDownLatch(1); Grid g = startGrid(0); g.compute().execute(new TestTask(1), null); jobLatch.await(); // Count down the latch in a separate thread. new Thread( new Runnable() { @Override public void run() { try { U.sleep(500); } catch (GridInterruptedException ignore) { // No-op. } latch.countDown(); } }) .start(); stopGrid(0, true); latch.countDown(); assert invokeLatch.await(5000, MILLISECONDS); }
/** * Calculates length of a given phrase on the grid. * * @param phrase Phrase to count the number of letters in. * @throws GridException If failed. */ private static void countLettersReducer(String phrase) throws GridException { X.println(">>> Starting countLettersReducer() example..."); Grid grid = G.grid(); // Logger to use in your closure. Note that even though we assign it // to a local variable, GridGain still allows to use it from remotely // executed code. final GridLogger log = grid.log(); // Execute Hello World task. int letterCnt = grid.reduce( BALANCE, new GridClosure<String, Integer>() { // Create executable logic. @Override public Integer apply(String word) { // Print out a given word, just so we can // see which node is doing what. log.info(">>> Calculating for word: " + word); // Return the length of a given word, i.e. number of letters. return word.length(); } }, Arrays.asList(phrase.split(" ")), // Collection of words. // Create custom reducer. // NOTE: Alternatively, you can use existing reducer: F.sumIntReducer() new GridReducer<Integer, Integer>() { private int sum; @Override public boolean collect(Integer res) { sum += res; return true; // True means continue collecting until last result. } @Override public Integer apply() { return sum; } }); X.println(">>>"); X.println(">>> Finished execution of counting letters with reducer based on GridGain 3.0 API."); X.println(">>> Total number of letters in the phrase is '" + letterCnt + "'."); X.println(">>> You should see individual words printed out on different nodes."); X.println(">>> Check all nodes for output (this node is also part of the grid)."); X.println(">>>"); }
/** @throws Exception If failed. */ public void testTopologyListener() throws Exception { final Collection<UUID> added = new ArrayList<>(1); final Collection<UUID> rmvd = new ArrayList<>(1); final CountDownLatch addedLatch = new CountDownLatch(1); final CountDownLatch rmvLatch = new CountDownLatch(1); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); GridClientTopologyListener lsnr = new GridClientTopologyListener() { @Override public void onNodeAdded(GridClientNode node) { added.add(node.nodeId()); addedLatch.countDown(); } @Override public void onNodeRemoved(GridClientNode node) { rmvd.add(node.nodeId()); rmvLatch.countDown(); } }; client.addTopologyListener(lsnr); try { Grid g = startGrid(NODES_CNT + 1); UUID id = g.localNode().id(); assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, added.size()); assertEquals(id, F.first(added)); stopGrid(NODES_CNT + 1); assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, rmvd.size()); assertEquals(id, F.first(rmvd)); } finally { client.removeTopologyListener(lsnr); stopGrid(NODES_CNT + 1); } }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, Object arg) throws GridException { Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize); this.gridSize = gridSize; final String locNodeId = grid.localNode().id().toString(); for (int i = 0; i < gridSize; i++) { jobs.add( new GridComputeJobAdapter() { @SuppressWarnings("OverlyStrongTypeCast") @Override public Object execute() { try { Thread.sleep(1000); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); } return new GridBiTuple<>(locNodeId, 1); } }); } return jobs; }
/** * Executes example. * * @param args Command line arguments, none required. * @throws GridException If example execution failed. */ public static void main(String[] args) throws GridException { try (Grid g = GridGain.start("examples/config/example-compute.xml")) { System.out.println(); System.out.println("Compute schedule example started."); // Schedule output message every minute. GridSchedulerFuture<?> fut = g.scheduler() .scheduleLocal( new Callable<Integer>() { private int invocations; @Override public Integer call() { invocations++; try { g.compute() .broadcast( new GridRunnable() { @Override public void run() { System.out.println(); System.out.println("Howdy! :) "); } }) .get(); } catch (GridException e) { throw new GridRuntimeException(e); } return invocations; } }, "{5, 3} * * * * *" // Cron expression. ); while (!fut.isDone()) System.out.println(">>> Invocation #: " + fut.get()); System.out.println(); System.out.println(">>> Schedule future is done and has been unscheduled."); System.out.println(">>> Check all nodes for hello message output."); } }
/** * Executes example. * * @param args Command line arguments, none required. * @throws GridException If example execution failed. */ public static void main(String[] args) throws Exception { Timer timer = new Timer("priceBars"); // Start grid. final Grid g = GridGain.start("examples/config/example-streamer.xml"); System.out.println(); System.out.println(">>> Streaming price bars example started."); try { TimerTask task = scheduleQuery(g, timer); streamData(g); // Force one more run to get final results. task.run(); timer.cancel(); // Reset all streamers on all nodes to make sure that // consecutive executions start from scratch. g.compute() .broadcast( new Runnable() { @Override public void run() { if (!ExamplesUtils.hasStreamer(g, "priceBars")) System.err.println( "Default streamer not found (is example-streamer.xml " + "configuration used on all nodes?)"); else { GridStreamer streamer = g.streamer("priceBars"); System.out.println("Clearing bars from streamer."); streamer.reset(); } } }) .get(); } finally { GridGain.stop(true); } }
/** @throws Exception If failed. */ public void testKeyMappingOnComputeNode() throws Exception { try { cache = true; for (int i = 0; i < 4; i++) { nearOnly = i == 0; startGrid(i); } cache = false; Grid compute = startGrid(4); for (int i = 0; i < 100; i++) { GridNode node = compute.mapKeyToNode(null, i); assertFalse("For key: " + i, node.id().equals(compute.localNode().id())); assertFalse("For key: " + i, node.id().equals(grid(0).localNode().id())); } } finally { stopAllGrids(); } }
/** * Streams random prices into the system. * * @param g Grid. * @throws GridException If failed. */ private static void streamData(final Grid g) throws GridException { GridStreamer streamer = g.streamer("priceBars"); for (int i = 0; i < CNT; i++) { for (int j = 0; j < INSTRUMENTS.length; j++) { // Use gaussian distribution to ensure that // numbers closer to 0 have higher probability. double price = round2(INITIAL_PRICES[j] + RAND.nextGaussian()); Quote quote = new Quote(INSTRUMENTS[j], price); streamer.addEvent(quote); } } }
/** @throws Exception If failed. */ public void testAffinityExecute() throws Exception { GridClientCompute dflt = client.compute(); GridClientData data = client.data(PARTITIONED_CACHE_NAME); Collection<? extends GridClientNode> nodes = dflt.nodes(); assertEquals(NODES_CNT, nodes.size()); for (int i = 0; i < NODES_CNT; i++) { Grid g = grid(i); assert g != null; int affinityKey = -1; for (int key = 0; key < 10000; key++) { if (g.localNode().id().equals(data.affinity(key))) { affinityKey = key; break; } } if (affinityKey == -1) throw new Exception("Unable to found key for which node is primary: " + g.localNode().id()); GridClientNode clientNode = dflt.node(g.localNode().id()); assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode); String res = dflt.affinityExecute(TestTask.class.getName(), PARTITIONED_CACHE_NAME, affinityKey, null); assertNotNull(res); assertEquals(g.localNode().id().toString(), res); } }
/** * See <a href="http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html"> * http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html</a> for more * information. * * @param str Virtual name by which the class is registered as a {@code startupClass} in the * {@code config.xml} file * @param params A hashtable that is made up of the name-value pairs supplied from the {@code * startupArgs} property * @return Result string (log message). * @throws Exception Thrown if error occurred. */ @SuppressWarnings({"unchecked", "CatchGenericClass"}) @Override public String startup(String str, Hashtable params) throws Exception { GridLogger log = new GridJavaLogger(LoggingHelper.getServerLogger()); cfgFile = (String) params.get(cfgFilePathParam); if (cfgFile == null) { throw new IllegalArgumentException("Failed to read property: " + cfgFilePathParam); } String workMgrName = (String) params.get(workMgrParam); URL cfgUrl = U.resolveGridGainUrl(cfgFile); if (cfgUrl == null) throw new ServerLifecycleException( "Failed to find Spring configuration file (path provided should be " + "either absolute, relative to GRIDGAIN_HOME, or relative to META-INF folder): " + cfgFile); GenericApplicationContext springCtx; try { springCtx = new GenericApplicationContext(); XmlBeanDefinitionReader xmlReader = new XmlBeanDefinitionReader(springCtx); xmlReader.loadBeanDefinitions(new UrlResource(cfgUrl)); springCtx.refresh(); } catch (BeansException e) { throw new ServerLifecycleException( "Failed to instantiate Spring XML application context: " + e.getMessage(), e); } Map cfgMap; try { // Note: Spring is not generics-friendly. cfgMap = springCtx.getBeansOfType(GridConfiguration.class); } catch (BeansException e) { throw new ServerLifecycleException( "Failed to instantiate bean [type=" + GridConfiguration.class + ", err=" + e.getMessage() + ']', e); } if (cfgMap == null) throw new ServerLifecycleException( "Failed to find a single grid factory configuration in: " + cfgUrl); if (cfgMap.isEmpty()) throw new ServerLifecycleException("Can't find grid factory configuration in: " + cfgUrl); try { ExecutorService execSvc = null; MBeanServer mbeanSrv = null; for (GridConfiguration cfg : (Collection<GridConfiguration>) cfgMap.values()) { assert cfg != null; GridConfigurationAdapter adapter = new GridConfigurationAdapter(cfg); // Set logger. if (cfg.getGridLogger() == null) adapter.setGridLogger(log); if (cfg.getExecutorService() == null) { if (execSvc == null) execSvc = workMgrName != null ? new GridThreadWorkManagerExecutor(workMgrName) : new GridThreadWorkManagerExecutor(J2EEWorkManager.getDefault()); adapter.setExecutorService(execSvc); } if (cfg.getMBeanServer() == null) { if (mbeanSrv == null) { InitialContext ctx = null; try { ctx = new InitialContext(); mbeanSrv = (MBeanServer) ctx.lookup("java:comp/jmx/runtime"); } catch (Exception e) { throw new IllegalArgumentException( "MBean server was not provided and failed to obtain " + "Weblogic MBean server.", e); } finally { if (ctx != null) ctx.close(); } } adapter.setMBeanServer(mbeanSrv); } Grid grid = G.start(adapter, springCtx); // Test if grid is not null - started properly. if (grid != null) gridNames.add(grid.name()); } return getClass().getSimpleName() + " started successfully."; } catch (GridException e) { // Stop started grids only. for (String name : gridNames) G.stop(name, true); throw new ServerLifecycleException("Failed to start GridGain.", e); } }