/** * Query all purchases made at a specific store for 3 specific products. This query uses * cross-cache joins between {@link DimStore}, {@link DimProduct} objects stored in {@code * 'replicated'} cache and {@link FactPurchase} objects stored in {@code 'partitioned'} cache. * * @throws IgniteException If failed. */ private static void queryProductPurchases() { IgniteCache<Integer, FactPurchase> factCache = Ignition.ignite().cache(PARTITIONED_CACHE_NAME); // All purchases for certain product made at store2. // ================================================= DimProduct p1 = rand(dataProduct.values()); DimProduct p2 = rand(dataProduct.values()); DimProduct p3 = rand(dataProduct.values()); System.out.println( "IDs of products [p1=" + p1.getId() + ", p2=" + p2.getId() + ", p3=" + p3.getId() + ']'); // Create cross cache query to get all purchases made at store2 // for specified products. QueryCursor<Cache.Entry<Integer, FactPurchase>> prodPurchases = factCache.query( new SqlQuery( FactPurchase.class, "from \"" + REPLICATED_CACHE_NAME + "\".DimStore, \"" + REPLICATED_CACHE_NAME + "\".DimProduct, " + "\"" + PARTITIONED_CACHE_NAME + "\".FactPurchase " + "where DimStore.id=FactPurchase.storeId and DimProduct.id=FactPurchase.productId " + "and DimStore.name=? and DimProduct.id in(?, ?, ?)") .setArgs("Store2", p1.getId(), p2.getId(), p3.getId())); printQueryResults( "All purchases made at store2 for 3 specific products:", prodPurchases.getAll()); }
/** * Query all purchases made at a specific store. This query uses cross-cache joins between {@link * DimStore} objects stored in {@code 'replicated'} cache and {@link FactPurchase} objects stored * in {@code 'partitioned'} cache. * * @throws IgniteException If failed. */ private static void queryStorePurchases() { IgniteCache<Integer, FactPurchase> factCache = Ignition.ignite().cache(PARTITIONED_CACHE_NAME); // All purchases for store1. // ======================== // Create cross cache query to get all purchases made at store1. QueryCursor<Cache.Entry<Integer, FactPurchase>> storePurchases = factCache.query( new SqlQuery( FactPurchase.class, "from \"" + REPLICATED_CACHE_NAME + "\".DimStore, \"" + PARTITIONED_CACHE_NAME + "\".FactPurchase " + "where DimStore.id=FactPurchase.storeId and DimStore.name=?") .setArgs("Store1")); printQueryResults("All purchases made at store1:", storePurchases.getAll()); }
/** * JUnit. * * @throws Exception If failed. */ @SuppressWarnings({"TooBroadScope"}) public void testRestarts() throws Exception { int duration = 60 * 1000; int qryThreadNum = 10; final long nodeLifeTime = 2 * 1000; final int logFreq = 20; final IgniteCache<Integer, Integer> cache = grid(0).cache(null); assert cache != null; for (int i = 0; i < KEY_CNT; i++) cache.put(i, i); assertEquals(KEY_CNT, cache.localSize()); final AtomicInteger qryCnt = new AtomicInteger(); final AtomicBoolean done = new AtomicBoolean(); IgniteInternalFuture<?> fut1 = multithreadedAsync( new CAX() { @Override public void applyx() throws IgniteCheckedException { while (!done.get()) { Collection<Cache.Entry<Integer, Integer>> res = cache.query(new SqlQuery(Integer.class, "_val >= 0")).getAll(); assertFalse(res.isEmpty()); int c = qryCnt.incrementAndGet(); if (c % logFreq == 0) info("Executed queries: " + c); } } }, qryThreadNum); final AtomicInteger restartCnt = new AtomicInteger(); CollectingEventListener lsnr = new CollectingEventListener(); for (int i = 0; i < GRID_CNT; i++) grid(i).events().localListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED); IgniteInternalFuture<?> fut2 = multithreadedAsync( new Callable<Object>() { @SuppressWarnings({"BusyWait"}) @Override public Object call() throws Exception { while (!done.get()) { int idx = GRID_CNT; startGrid(idx); Thread.sleep(nodeLifeTime); stopGrid(idx); int c = restartCnt.incrementAndGet(); if (c % logFreq == 0) info("Node restarts: " + c); } return true; } }, 1); Thread.sleep(duration); done.set(true); fut1.get(); fut2.get(); info("Awaiting rebalance events [restartCnt=" + restartCnt.get() + ']'); boolean success = lsnr.awaitEvents(GRID_CNT * 2 * restartCnt.get(), 15000); for (int i = 0; i < GRID_CNT; i++) grid(i).events().stopLocalListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED); assert success; }
/** * Check how prefetch override works. * * @throws Exception IF failed. */ public void testOpenPrefetchOverride() throws Exception { create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE)); // Write enough data to the secondary file system. final int blockSize = IGFS_BLOCK_SIZE; IgfsOutputStream out = igfsSecondary.append(FILE, false); int totalWritten = 0; while (totalWritten < blockSize * 2 + chunk.length) { out.write(chunk); totalWritten += chunk.length; } out.close(); awaitFileClose(igfsSecondary.asSecondary(), FILE); // Instantiate file system with overridden "seq reads before prefetch" property. Configuration cfg = new Configuration(); cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG)); int seqReads = SEQ_READS_BEFORE_PREFETCH + 1; cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads); FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg); // Read the first two blocks. Path fsHome = new Path(PRIMARY_URI); Path dir = new Path(fsHome, DIR.name()); Path subdir = new Path(dir, SUBDIR.name()); Path file = new Path(subdir, FILE.name()); FSDataInputStream fsIn = fs.open(file); final byte[] readBuf = new byte[blockSize * 2]; fsIn.readFully(0, readBuf, 0, readBuf.length); // Wait for a while for prefetch to finish (if any). IgfsMetaManager meta = igfs.context().meta(); IgfsFileInfo info = meta.info(meta.fileId(FILE)); IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2); IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName()); for (int i = 0; i < 10; i++) { if (dataCache.containsKey(key)) break; else U.sleep(100); } fsIn.close(); // Remove the file from the secondary file system. igfsSecondary.delete(FILE, false); // Try reading the third block. Should fail. GridTestUtils.assertThrows( log, new Callable<Object>() { @Override public Object call() throws Exception { IgfsInputStream in0 = igfs.open(FILE); in0.seek(blockSize * 2); try { in0.read(readBuf); } finally { U.closeQuiet(in0); } return null; } }, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file"); }