/** {@inheritDoc} */ @Override public V unswap(K key) throws GridException { ctx.denyOnFlags(F.asList(READ, SKIP_SWAP)); // Unswap only from DHT. Near cache does not have swap storage. return dht.unswap(key); }
/** {@inheritDoc} */ @Override public void stopListenAsync(@Nullable GridInClosure<? super GridFuture<R>>... lsnr) { if (F.isEmpty(lsnr)) synchronized (mux) { lsnrs.clear(); } else synchronized (mux) { lsnrs.removeAll(F.asList(lsnr)); } }
/** * Checks entry for empty value. * * @param entry Entry to check. * @return {@code True} if entry is empty. */ private boolean empty(GridCacheEntry<K, V> entry) { try { return entry.peek(F.asList(GLOBAL)) == null; } catch (GridException e) { U.error(null, e.getMessage(), e); assert false : "Should never happen: " + e; return false; } }
/** * @param nodeId Node ID. * @param retryCnt Number of retries. */ private void sendAllPartitions(final UUID nodeId, final int retryCnt) { ClusterNode n = cctx.node(nodeId); try { if (n != null) sendAllPartitions(F.asList(n), exchId); } catch (IgniteCheckedException e) { if (e instanceof ClusterTopologyCheckedException || !cctx.discovery().alive(n)) { log.debug( "Failed to send full partition map to node, node left grid " + "[rmtNode=" + nodeId + ", exchangeId=" + exchId + ']'); return; } if (retryCnt > 0) { long timeout = cctx.gridConfig().getNetworkSendRetryDelay(); LT.error( log, e, "Failed to send full partition map to node (will retry after timeout) " + "[node=" + nodeId + ", exchangeId=" + exchId + ", timeout=" + timeout + ']'); cctx.time() .addTimeoutObject( new GridTimeoutObjectAdapter(timeout) { @Override public void onTimeout() { sendAllPartitions(nodeId, retryCnt - 1); } }); } else U.error( log, "Failed to send full partition map [node=" + n + ", exchangeId=" + exchId + ']', e); } }
/** * Finds all files in folder and in it's sub-tree of specified depth. * * @param file Starting folder * @param maxDepth Depth of the tree. If 1 - just look in the folder, no sub-folders. * @param filter file filter. * @return List of found files. */ public static List<VisorLogFile> fileTree(File file, int maxDepth, @Nullable FileFilter filter) { if (file.isDirectory()) { File[] files = (filter == null) ? file.listFiles() : file.listFiles(filter); if (files == null) return Collections.emptyList(); List<VisorLogFile> res = new ArrayList<>(files.length); for (File f : files) { if (f.isFile() && f.length() > 0) res.add(new VisorLogFile(f)); else if (maxDepth > 1) res.addAll(fileTree(f, maxDepth - 1, filter)); } return res; } return F.asList(new VisorLogFile(file)); }
/** @throws Exception If failed. */ public void testGroupIndexOperations() throws Exception { IgniteCache<Integer, GroupIndexTestValue> c = ignite(0) .getOrCreateCache(cacheConfig("grp", false, Integer.class, GroupIndexTestValue.class)); try { // Check group index usage. String qry = "select 1 from GroupIndexTestValue "; String plan = columnQuery(c, "explain " + qry + "where a = 1 and b > 0").get(0).toString(); info("Plan: " + plan); assertTrue(plan.contains("grpIdx")); // Sorted list List<GroupIndexTestValue> list = F.asList( new GroupIndexTestValue(0, 0), new GroupIndexTestValue(0, 5), new GroupIndexTestValue(1, 1), new GroupIndexTestValue(1, 3), new GroupIndexTestValue(2, -1), new GroupIndexTestValue(2, 2)); // Fill cache. for (int i = 0; i < list.size(); i++) c.put(i, list.get(i)); // Check results. assertEquals(1, columnQuery(c, qry + "where a = 1 and b = 1").size()); assertEquals(2, columnQuery(c, qry + "where a = 1 and b < 4").size()); assertEquals(2, columnQuery(c, qry + "where a = 1 and b <= 3").size()); assertEquals(1, columnQuery(c, qry + "where a = 1 and b < 3").size()); assertEquals(2, columnQuery(c, qry + "where a = 1 and b > 0").size()); assertEquals(1, columnQuery(c, qry + "where a = 1 and b > 1").size()); assertEquals(2, columnQuery(c, qry + "where a = 1 and b >= 1").size()); assertEquals(4, columnQuery(c, qry + "where a > 0 and b > 0").size()); assertEquals(4, columnQuery(c, qry + "where a > 0 and b >= 1").size()); assertEquals(3, columnQuery(c, qry + "where a > 0 and b > 1").size()); } finally { c.destroy(); } }
/** @throws GridException If failed. */ public void testAffinity() throws GridException { Grid g1 = grid(1); Grid g2 = grid(2); assert caches(g1).size() == 0; assert F.first(caches(g2)).getCacheMode() == PARTITIONED; Map<GridNode, Collection<String>> map = g1.mapKeysToNodes(null, F.asList("1")); assertNotNull(map); assertEquals("Invalid map size: " + map.size(), 1, map.size()); assertEquals(F.first(map.keySet()), g2.localNode()); UUID id1 = g1.mapKeyToNode(null, "2").id(); assertNotNull(id1); assertEquals(g2.localNode().id(), id1); UUID id2 = g1.mapKeyToNode(null, "3").id(); assertNotNull(id2); assertEquals(g2.localNode().id(), id2); }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** {@inheritDoc} */ @Override public Collection<GridRichNode> affinityNodes(K key) { return F.asList(ctx.localNode()); }
/** {@inheritDoc} */ @Override public GridFuture<?> addData(Map.Entry<K, V> entry) throws GridException, IllegalStateException { A.notNull(entry, "entry"); return addData(F.asList(entry)); }
/** * @param nodeId Node ID to send message to. * @param msg Message to send. * @throws GridException If send failed. */ private void send0(UUID nodeId, Object msg) throws GridException { GridNode node = ctx.kernalContext().discovery().node(nodeId); ctx.kernalContext().io().sendUserMessage(F.asList(node), msg, GridTopic.TOPIC_HADOOP, false, 0); }
/** @throws Exception Thrown if test failed. */ public void testA() throws Exception { Collection<Integer> set = new GridConcurrentWeakHashSet<>(); Integer i = 1; assert set.add(i); assert !set.add(i); assert set.contains(i); assert set.size() == 1; Collection<Integer> c = F.asList(2, 3, 4, 5); assert set.addAll(c); assert !set.addAll(c); assert set.containsAll(c); assert set.size() == 1 + c.size(); assert set.remove(i); assert !set.remove(i); assert !set.contains(i); assert set.size() == c.size(); assert set.removeAll(c); assert !set.removeAll(c); assert !set.containsAll(c); assert set.isEmpty(); Collection<Integer> c1 = Arrays.asList(1, 3, 5, 7, 9); int cnt = 0; for (Iterator<Integer> iter = set.iterator(); iter.hasNext(); cnt++) c1.contains(iter.next()); assert set.size() == cnt; assert set.size() == set.toArray().length; assert set.addAll(c1); assert set.retainAll(c); assert !set.retainAll(c); Collection<Integer> c2 = F.retain(c1, true, c); assert set.containsAll(c2); assert !set.containsAll(c1); assert !set.containsAll(c); assert set.size() == c2.size(); set.clear(); assert set.isEmpty(); try { set.iterator().next(); assert false; } catch (NoSuchElementException ignored) { assert true; } try { set.add(null); assert false; } catch (NullPointerException ignored) { assert true; } }
/** @throws Exception Thrown if test failed. */ @SuppressWarnings({"UnusedAssignment"}) public void testB() throws Exception { Collection<SampleBean> set = new GridConcurrentWeakHashSet<>(); SampleBean bean1 = new SampleBean(1); assert set.add(bean1); assert !set.add(bean1); assert set.size() == 1; assert set.contains(bean1); bean1 = null; gc(); assert set.isEmpty(); Collection<SampleBean> c = F.asList(new SampleBean(1), new SampleBean(2), new SampleBean(3), new SampleBean(4)); assert set.addAll(c); assert !set.addAll(c); assert set.size() == c.size(); assert set.containsAll(c); c = null; gc(); assert set.isEmpty(); SampleBean b1 = new SampleBean(1); SampleBean b2 = new SampleBean(2); SampleBean b3 = new SampleBean(3); SampleBean b4 = new SampleBean(4); SampleBean b5 = new SampleBean(5); set.add(b1); set.add(b2); set.add(b3); set.add(b4); set.add(b5); Iterator iter = set.iterator(); assert iter.hasNext(); b2 = null; b3 = null; b4 = null; gc(); int cnt = 0; while (iter.hasNext()) { info(iter.next().toString()); cnt++; } assert set.size() == cnt; }