/** * Checks that gets work for implicit txs. * * @param cache Cache to test. * @throws Exception If failed. */ private void checkExplicitTx(Ignite ignite, IgniteCache<String, String> cache) throws Exception { IgniteCache<String, String> asyncCache = cache.withAsync(); Transaction tx = ignite.transactions().txStart(); try { assertNull(cache.get("key1")); tx.commit(); } finally { tx.close(); } tx = ignite.transactions().txStart(); try { asyncCache.get("key2"); assertNull(asyncCache.future().get()); tx.commit(); } finally { tx.close(); } tx = ignite.transactions().txStart(); try { assertTrue(cache.getAll(F.asSet("key3", "key4")).isEmpty()); tx.commit(); } finally { tx.close(); } tx = ignite.transactions().txStart(); try { asyncCache.getAll(F.asSet("key5", "key6")); assertTrue(((Map) asyncCache.future().get()).isEmpty()); tx.commit(); } finally { tx.close(); } tx = ignite.transactions().txStart(); try { cache.put("key7", "key7"); cache.remove("key7"); assertNull(cache.get("key7")); tx.commit(); } finally { tx.close(); } checkEmpty(cache); }
/** * Query all purchases made at a specific store for 3 specific products. This query uses * cross-cache joins between {@link DimStore}, {@link DimProduct} objects stored in {@code * 'replicated'} cache and {@link FactPurchase} objects stored in {@code 'partitioned'} cache. * * @throws IgniteException If failed. */ private static void queryProductPurchases() { IgniteCache<Integer, FactPurchase> factCache = Ignition.ignite().cache(PARTITIONED_CACHE_NAME); // All purchases for certain product made at store2. // ================================================= DimProduct p1 = rand(dataProduct.values()); DimProduct p2 = rand(dataProduct.values()); DimProduct p3 = rand(dataProduct.values()); System.out.println( "IDs of products [p1=" + p1.getId() + ", p2=" + p2.getId() + ", p3=" + p3.getId() + ']'); // Create cross cache query to get all purchases made at store2 // for specified products. QueryCursor<Cache.Entry<Integer, FactPurchase>> prodPurchases = factCache.query( new SqlQuery( FactPurchase.class, "from \"" + REPLICATED_CACHE_NAME + "\".DimStore, \"" + REPLICATED_CACHE_NAME + "\".DimProduct, " + "\"" + PARTITIONED_CACHE_NAME + "\".FactPurchase " + "where DimStore.id=FactPurchase.storeId and DimProduct.id=FactPurchase.productId " + "and DimStore.name=? and DimProduct.id in(?, ?, ?)") .setArgs("Store2", p1.getId(), p2.getId(), p3.getId())); printQueryResults( "All purchases made at store2 for 3 specific products:", prodPurchases.getAll()); }
/** * Tests offset and limit clauses for query. * * @throws Exception If failed. */ public void testOffsetLimit() throws Exception { IgniteCache<Integer, Integer> c = ignite(0).getOrCreateCache(cacheConfig("ints", true, Integer.class, Integer.class)); try { List<Integer> res = new ArrayList<>(); Random rnd = new GridRandom(); for (int i = 0; i < 10; i++) { int val = rnd.nextInt(100); c.put(i, val); res.add(val); } Collections.sort(res); String qry = "select _val from Integer order by _val "; assertEqualsCollections(res, columnQuery(c, qry)); assertEqualsCollections(res.subList(0, 0), columnQuery(c, qry + "limit ?", 0)); assertEqualsCollections(res.subList(0, 3), columnQuery(c, qry + "limit ?", 3)); assertEqualsCollections(res.subList(0, 9), columnQuery(c, qry + "limit ? offset ?", 9, 0)); assertEqualsCollections(res.subList(3, 7), columnQuery(c, qry + "limit ? offset ?", 4, 3)); assertEqualsCollections(res.subList(7, 9), columnQuery(c, qry + "limit ? offset ?", 2, 7)); assertEqualsCollections(res.subList(8, 10), columnQuery(c, qry + "limit ? offset ?", 2, 8)); assertEqualsCollections(res.subList(9, 10), columnQuery(c, qry + "limit ? offset ?", 1, 9)); assertEqualsCollections(res.subList(10, 10), columnQuery(c, qry + "limit ? offset ?", 1, 10)); assertEqualsCollections( res.subList(9, 10), columnQuery(c, qry + "limit ? offset abs(-(4 + ?))", 1, 5)); } finally { c.destroy(); } }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void onEntryAccessed(boolean rmv, EvictableEntry<K, V> entry) { if (!entry.isCached()) return; IgniteCache<K, V> cache = entry.unwrap(IgniteCache.class); int size = cache.localSize(CachePeekMode.ONHEAP); for (int i = max; i < size; i++) { Cache.Entry<K, V> e = cache.randomEntry(); if (e != null) e.unwrap(EvictableEntry.class).evict(); } }
/** @throws Exception If failed. */ public void testGroupIndexOperations() throws Exception { IgniteCache<Integer, GroupIndexTestValue> c = ignite(0) .getOrCreateCache(cacheConfig("grp", false, Integer.class, GroupIndexTestValue.class)); try { // Check group index usage. String qry = "select 1 from GroupIndexTestValue "; String plan = columnQuery(c, "explain " + qry + "where a = 1 and b > 0").get(0).toString(); info("Plan: " + plan); assertTrue(plan.contains("grpIdx")); // Sorted list List<GroupIndexTestValue> list = F.asList( new GroupIndexTestValue(0, 0), new GroupIndexTestValue(0, 5), new GroupIndexTestValue(1, 1), new GroupIndexTestValue(1, 3), new GroupIndexTestValue(2, -1), new GroupIndexTestValue(2, 2)); // Fill cache. for (int i = 0; i < list.size(); i++) c.put(i, list.get(i)); // Check results. assertEquals(1, columnQuery(c, qry + "where a = 1 and b = 1").size()); assertEquals(2, columnQuery(c, qry + "where a = 1 and b < 4").size()); assertEquals(2, columnQuery(c, qry + "where a = 1 and b <= 3").size()); assertEquals(1, columnQuery(c, qry + "where a = 1 and b < 3").size()); assertEquals(2, columnQuery(c, qry + "where a = 1 and b > 0").size()); assertEquals(1, columnQuery(c, qry + "where a = 1 and b > 1").size()); assertEquals(2, columnQuery(c, qry + "where a = 1 and b >= 1").size()); assertEquals(4, columnQuery(c, qry + "where a > 0 and b > 0").size()); assertEquals(4, columnQuery(c, qry + "where a > 0 and b >= 1").size()); assertEquals(3, columnQuery(c, qry + "where a > 0 and b > 1").size()); } finally { c.destroy(); } }
/** * Query all purchases made at a specific store. This query uses cross-cache joins between {@link * DimStore} objects stored in {@code 'replicated'} cache and {@link FactPurchase} objects stored * in {@code 'partitioned'} cache. * * @throws IgniteException If failed. */ private static void queryStorePurchases() { IgniteCache<Integer, FactPurchase> factCache = Ignition.ignite().cache(PARTITIONED_CACHE_NAME); // All purchases for store1. // ======================== // Create cross cache query to get all purchases made at store1. QueryCursor<Cache.Entry<Integer, FactPurchase>> storePurchases = factCache.query( new SqlQuery( FactPurchase.class, "from \"" + REPLICATED_CACHE_NAME + "\".DimStore, \"" + PARTITIONED_CACHE_NAME + "\".FactPurchase " + "where DimStore.id=FactPurchase.storeId and DimStore.name=?") .setArgs("Store1")); printQueryResults("All purchases made at store1:", storePurchases.getAll()); }
/** * Checks that cache is empty. * * @param cache Cache to check. * @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted while * sleeping. */ @SuppressWarnings({"ErrorNotRethrown", "TypeMayBeWeakened"}) private void checkEmpty(IgniteCache<String, String> cache) throws IgniteInterruptedCheckedException { for (int i = 0; i < 3; i++) { try { assertTrue(!cache.iterator().hasNext()); break; } catch (AssertionError e) { if (i == 2) throw e; info(">>> Cache is not empty, flushing evictions."); U.sleep(1000); } } }
/** {@inheritDoc} */ @Override public void testEvictExpired() throws Exception { IgniteCache<String, Integer> cache = jcache(); String key = primaryKeysForCache(cache, 1).get(0); cache.put(key, 1); assertEquals((Integer) 1, cache.get(key)); long ttl = 500; grid(0) .cache(null) .withExpiryPolicy(new TouchedExpiryPolicy(new Duration(MILLISECONDS, ttl))) .put(key, 1); Thread.sleep(ttl + 100); // Expired entry should not be swapped. cache.localEvict(Collections.singleton(key)); assertNull(cache.localPeek(key, CachePeekMode.ONHEAP)); cache.localPromote(Collections.singleton(key)); assertNull(cache.localPeek(key, CachePeekMode.ONHEAP)); assertTrue(cache.localSize() == 0); load(cache, key, true); Affinity<String> aff = ignite(0).affinity(null); for (int i = 0; i < gridCount(); i++) { if (aff.isPrimaryOrBackup(grid(i).cluster().localNode(), key)) assertEquals((Integer) 1, peek(jcache(i), key)); } }
/** {@inheritDoc} */ @Override public void testClear() throws Exception { IgniteCache<String, Integer> nearCache = jcache(); IgniteCache<String, Integer> primary = fullCache(); Collection<String> keys = primaryKeysForCache(primary, 3); Map<String, Integer> vals = new HashMap<>(); int i = 0; for (String key : keys) { nearCache.put(key, i); vals.put(key, i); i++; } i = 0; for (String key : keys) assertEquals((Integer) i++, nearCache.localPeek(key, CachePeekMode.ONHEAP)); nearCache.clear(); for (String key : keys) assertNull(nearCache.localPeek(key, CachePeekMode.ONHEAP)); for (Map.Entry<String, Integer> entry : vals.entrySet()) nearCache.put(entry.getKey(), entry.getValue()); i = 0; for (String key : keys) assertEquals((Integer) i++, nearCache.localPeek(key, CachePeekMode.ONHEAP)); }
/** * JUnit. * * @throws Exception If failed. */ @SuppressWarnings({"TooBroadScope"}) public void testRestarts() throws Exception { int duration = 60 * 1000; int qryThreadNum = 10; final long nodeLifeTime = 2 * 1000; final int logFreq = 20; final IgniteCache<Integer, Integer> cache = grid(0).cache(null); assert cache != null; for (int i = 0; i < KEY_CNT; i++) cache.put(i, i); assertEquals(KEY_CNT, cache.localSize()); final AtomicInteger qryCnt = new AtomicInteger(); final AtomicBoolean done = new AtomicBoolean(); IgniteInternalFuture<?> fut1 = multithreadedAsync( new CAX() { @Override public void applyx() throws IgniteCheckedException { while (!done.get()) { Collection<Cache.Entry<Integer, Integer>> res = cache.query(new SqlQuery(Integer.class, "_val >= 0")).getAll(); assertFalse(res.isEmpty()); int c = qryCnt.incrementAndGet(); if (c % logFreq == 0) info("Executed queries: " + c); } } }, qryThreadNum); final AtomicInteger restartCnt = new AtomicInteger(); CollectingEventListener lsnr = new CollectingEventListener(); for (int i = 0; i < GRID_CNT; i++) grid(i).events().localListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED); IgniteInternalFuture<?> fut2 = multithreadedAsync( new Callable<Object>() { @SuppressWarnings({"BusyWait"}) @Override public Object call() throws Exception { while (!done.get()) { int idx = GRID_CNT; startGrid(idx); Thread.sleep(nodeLifeTime); stopGrid(idx); int c = restartCnt.incrementAndGet(); if (c % logFreq == 0) info("Node restarts: " + c); } return true; } }, 1); Thread.sleep(duration); done.set(true); fut1.get(); fut2.get(); info("Awaiting rebalance events [restartCnt=" + restartCnt.get() + ']'); boolean success = lsnr.awaitEvents(GRID_CNT * 2 * restartCnt.get(), 15000); for (int i = 0; i < GRID_CNT; i++) grid(i).events().stopLocalListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED); assert success; }
/** * Check how prefetch override works. * * @throws Exception IF failed. */ public void testOpenPrefetchOverride() throws Exception { create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE)); // Write enough data to the secondary file system. final int blockSize = IGFS_BLOCK_SIZE; IgfsOutputStream out = igfsSecondary.append(FILE, false); int totalWritten = 0; while (totalWritten < blockSize * 2 + chunk.length) { out.write(chunk); totalWritten += chunk.length; } out.close(); awaitFileClose(igfsSecondary.asSecondary(), FILE); // Instantiate file system with overridden "seq reads before prefetch" property. Configuration cfg = new Configuration(); cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG)); int seqReads = SEQ_READS_BEFORE_PREFETCH + 1; cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads); FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg); // Read the first two blocks. Path fsHome = new Path(PRIMARY_URI); Path dir = new Path(fsHome, DIR.name()); Path subdir = new Path(dir, SUBDIR.name()); Path file = new Path(subdir, FILE.name()); FSDataInputStream fsIn = fs.open(file); final byte[] readBuf = new byte[blockSize * 2]; fsIn.readFully(0, readBuf, 0, readBuf.length); // Wait for a while for prefetch to finish (if any). IgfsMetaManager meta = igfs.context().meta(); IgfsFileInfo info = meta.info(meta.fileId(FILE)); IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2); IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName()); for (int i = 0; i < 10; i++) { if (dataCache.containsKey(key)) break; else U.sleep(100); } fsIn.close(); // Remove the file from the secondary file system. igfsSecondary.delete(FILE, false); // Try reading the third block. Should fail. GridTestUtils.assertThrows( log, new Callable<Object>() { @Override public Object call() throws Exception { IgfsInputStream in0 = igfs.open(FILE); in0.seek(blockSize * 2); try { in0.read(readBuf); } finally { U.closeQuiet(in0); } return null; } }, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file"); }
/** * Checks that gets work for implicit txs. * * @param cache Cache to test. * @throws Exception If failed. */ private void checkImplicitTx(IgniteCache<String, String> cache) throws Exception { assertNull(cache.get("key1")); IgniteCache<String, String> asyncCache = cache.withAsync(); asyncCache.get("key2"); assertNull(asyncCache.future().get()); assertTrue(cache.getAll(F.asSet("key3", "key4")).isEmpty()); asyncCache.getAll(F.asSet("key5", "key6")); assertTrue(((Map) asyncCache.future().get()).isEmpty()); cache.put("key7", "key7"); cache.remove("key7", "key7"); assertNull(cache.get("key7")); checkEmpty(cache); }
/** * @param c Cache. * @param qry Query. * @param args Arguments. * @return Column as list. */ private static <X> List<X> columnQuery(IgniteCache<?, ?> c, String qry, Object... args) { return column(0, c.query(new SqlFieldsQuery(qry).setArgs(args)).getAll()); }