/** * @param nodes Nodes. * @param id ID. * @throws IgniteCheckedException If failed. */ private void sendAllPartitions( Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id) throws IgniteCheckedException { GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) { AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion(); boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0; if (ready) m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true)); } } // It is important that client topologies be added after contexts. for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true)); if (log.isDebugEnabled()) log.debug( "Sending full partition map [nodeIds=" + F.viewReadOnly(nodes, F.node2id()) + ", exchId=" + exchId + ", msg=" + m + ']'); cctx.io().safeSend(nodes, m, SYSTEM_POOL, null); }
/** @throws Exception If failed. */ public void testCreateFileColocated() throws Exception { GridGgfsPath path = new GridGgfsPath("/colocated"); UUID uuid = UUID.randomUUID(); GridUuid affKey; long idx = 0; while (true) { affKey = new GridUuid(uuid, idx); if (grid(0).mapKeyToNode(DATA_CACHE_NAME, affKey).id().equals(grid(0).localNode().id())) break; idx++; } try (GridGgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) { // Write 5M, should be enough to test distribution. for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]); } GridGgfsFile info = fs.info(path); Collection<GridGgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length()); assertEquals(1, affNodes.size()); Collection<UUID> nodeIds = F.first(affNodes).nodeIds(); assertEquals(1, nodeIds.size()); assertEquals(grid(0).localNode().id(), F.first(nodeIds)); }
/** @throws Exception If failed. */ public void testAffinityPut() throws Exception { Thread.sleep(2 * TOP_REFRESH_FREQ); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT); GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); GridClientCompute compute = client.compute(); for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i)); for (int i = 0; i < 100; i++) { String key = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key)); assertEquals(primaryNodeId, partitioned.affinity(key)); // Must go to primary node only. Since backup count is 0, value must present on // primary node only. partitioned.put(key, "val" + key); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key); if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val); else assertNull(val); } } // Now check that we will see value in near cache in pinned mode. for (int i = 100; i < 200; i++) { String pinnedKey = "key" + i; UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id(); UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId))); GridClientNode node = compute.node(pinnedNodeId); partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey); for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) { Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey); if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey())) assertEquals("val" + pinnedKey, val); else assertNull(val); } } }
/** {@inheritDoc} */ @Override public void stopListenAsync(@Nullable GridInClosure<? super GridFuture<R>>... lsnr) { if (F.isEmpty(lsnr)) synchronized (mux) { lsnrs.clear(); } else synchronized (mux) { lsnrs.removeAll(F.asList(lsnr)); } }
/** * @param cacheId Cache ID. * @return {@code True} if local client has been added. */ public boolean isLocalClientAdded(int cacheId) { if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (req.start() && F.eq(req.initiatingNodeId(), cctx.localNodeId())) { if (CU.cacheId(req.cacheName()) == cacheId) return true; } } } return false; }
/** * Checks for explicit events configuration. * * @param ignite Grid instance. * @return {@code true} if all task events explicitly specified in configuration. */ public static boolean checkExplicitTaskMonitoring(Ignite ignite) { int[] evts = ignite.configuration().getIncludeEventTypes(); if (F.isEmpty(evts)) return false; for (int evt : VISOR_TASK_EVTS) { if (!F.contains(evts, evt)) return false; } return true; }
/** * @param cacheId Cache ID to check. * @param topVer Topology version. * @return {@code True} if cache was added during this exchange. */ public boolean isCacheAdded(int cacheId, AffinityTopologyVersion topVer) { if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (req.start() && !req.clientStartOnly()) { if (CU.cacheId(req.cacheName()) == cacheId) return true; } } } GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId); return cacheCtx != null && F.eq(cacheCtx.startTopologyVersion(), topVer); }
/** @throws Exception If failed. */ public void testTopologyListener() throws Exception { final Collection<UUID> added = new ArrayList<>(1); final Collection<UUID> rmvd = new ArrayList<>(1); final CountDownLatch addedLatch = new CountDownLatch(1); final CountDownLatch rmvLatch = new CountDownLatch(1); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); GridClientTopologyListener lsnr = new GridClientTopologyListener() { @Override public void onNodeAdded(GridClientNode node) { added.add(node.nodeId()); addedLatch.countDown(); } @Override public void onNodeRemoved(GridClientNode node) { rmvd.add(node.nodeId()); rmvLatch.countDown(); } }; client.addTopologyListener(lsnr); try { Grid g = startGrid(NODES_CNT + 1); UUID id = g.localNode().id(); assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, added.size()); assertEquals(id, F.first(added)); stopGrid(NODES_CNT + 1); assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, rmvd.size()); assertEquals(id, F.first(rmvd)); } finally { client.removeTopologyListener(lsnr); stopGrid(NODES_CNT + 1); } }
/** {@inheritDoc} */ @Override public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) { Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer1=" + topVer + ", topVer2=" + this.topVer + ", cache=" + cctx.name() + ", node2part=" + node2part + ']'; Collection<ClusterNode> nodes = null; Collection<UUID> nodeIds = part2node.get(p); if (!F.isEmpty(nodeIds)) { Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id())); for (UUID nodeId : nodeIds) { if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) { ClusterNode n = cctx.discovery().node(nodeId); if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) { if (nodes == null) { nodes = new ArrayList<>(affNodes.size() + 2); nodes.addAll(affNodes); } nodes.add(n); } } } } return nodes != null ? nodes : affNodes; } finally { lock.readLock().unlock(); } }
/** * Prints a phrase on the grid nodes running anonymous callable objects and calculating total * number of letters. * * @param phrase Phrase to print on of the grid nodes. * @throws GridException If failed. */ private static void countLettersCallable(String phrase) throws GridException { X.println(">>> Starting countLettersCallable() example..."); Collection<Callable<Integer>> calls = new HashSet<Callable<Integer>>(); for (final String word : phrase.split(" ")) calls.add( new GridCallable<Integer>() { // Create executable logic. @Override public Integer call() throws Exception { // Print out a given word, just so we can // see which node is doing what. X.println(">>> Executing word: " + word); // Return the length of a given word, i.e. number of letters. return word.length(); } }); // Explicitly execute the collection of callable objects and receive a result. Collection<Integer> results = G.grid().call(SPREAD, calls); // Add up all results using convenience 'sum()' method on GridFunc class. int letterCnt = F.sum(results); X.println(">>>"); X.println( ">>> Finished execution of counting letters with callables based on GridGain 3.0 API."); X.println(">>> You should see the phrase '" + phrase + "' printed out on the nodes."); X.println(">>> Total number of letters in the phrase is '" + letterCnt + "'."); X.println(">>> Check all nodes for output (this node is also part of the grid)."); X.println(">>>"); }
/** * Prints a phrase on the grid nodes running anonymous closure objects and calculating total * number of letters. * * @param phrase Phrase to print on of the grid nodes. * @throws GridException If failed. */ private static void countLettersClosure(String phrase) throws GridException { X.println(">>> Starting countLettersClosure() example..."); // Explicitly execute the collection of callable objects and receive a result. Collection<Integer> results = G.grid() .call( SPREAD, new GridClosure<String, Integer>() { // Create executable logic. @Override public Integer apply(String word) { // Print out a given word, just so we can // see which node is doing what. X.println(">>> Executing word: " + word); // Return the length of a given word, i.e. number of letters. return word.length(); } }, Arrays.asList(phrase.split(" "))); // Collection of arguments for closures. // Add up all results using convenience 'sum()' method. int letterCnt = F.sum(results); X.println(">>>"); X.println(">>> Finished execution of counting letters with closure based on GridGain 3.0 API."); X.println(">>> You should see the phrase '" + phrase + "' printed out on the nodes."); X.println(">>> Total number of letters in the phrase is '" + letterCnt + "'."); X.println(">>> Check all nodes for output (this node is also part of the grid)."); X.println(">>>"); }
/** * @param mapping Mapping to order. * @param committedVers Committed versions. * @param rolledbackVers Rolled back versions. */ void orderCompleted( GridDistributedTxMapping<K, V> mapping, Collection<GridCacheVersion> committedVers, Collection<GridCacheVersion> rolledbackVers) { for (GridCacheTxEntry<K, V> txEntry : F.concat(false, mapping.reads(), mapping.writes())) { while (true) { GridDistributedCacheEntry<K, V> entry = (GridDistributedCacheEntry<K, V>) txEntry.cached(); try { // Handle explicit locks. GridCacheVersion base = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer; entry.doneRemote(xidVer, base, committedVers, rolledbackVers); if (ec()) entry.recheck(); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null; if (log.isDebugEnabled()) log.debug( "Replacing obsolete entry in remote transaction [entry=" + entry + ", tx=" + this + ']'); // Replace the entry. txEntry.cached(cctx.cache().entryEx(txEntry.key()), entry.keyBytes()); } } } }
/** * Prints every word a phrase on different nodes. * * @param phrase Phrase from which to print words on different nodes. * @throws GridException If failed. */ private static void spreadWordsClosure(String phrase) throws GridException { X.println(">>> Starting spreadWordsClosure() example..."); // Splits the passed in phrase into words and prints every word // on a individual grid node. If there are more words than nodes - // some nodes will print more than one word. G.grid() .run( SPREAD, F.yield( phrase.split(" "), new GridInClosure<String>() { @Override public void apply(String word) { X.println(word); } })); // NOTE: // // Alternatively, you can use existing closure 'F.println()' to // print any yield result in 'F.yield()' like so: // // G.grid().run(SPREAD, F.yield(phrase.split(" "), F.println())); // X.println(">>>"); X.println( ">>> Finished printing individual words on different nodes based on GridGain 3.0 API."); X.println(">>> Check all nodes for output (this node is also part of the grid)."); X.println(">>>"); }
/** * Sends query request. * * @param fut Distributed future. * @param req Request. * @param nodes Nodes. * @throws IgniteCheckedException In case of error. */ @SuppressWarnings("unchecked") private void sendRequest( final GridCacheDistributedQueryFuture<?, ?, ?> fut, final GridCacheQueryRequest req, Collection<ClusterNode> nodes) throws IgniteCheckedException { assert fut != null; assert req != null; assert nodes != null; final UUID locNodeId = cctx.localNodeId(); ClusterNode locNode = null; Collection<ClusterNode> rmtNodes = null; for (ClusterNode n : nodes) { if (n.id().equals(locNodeId)) locNode = n; else { if (rmtNodes == null) rmtNodes = new ArrayList<>(nodes.size()); rmtNodes.add(n); } } // Request should be sent to remote nodes before the query is processed on the local node. // For example, a remote reducer has a state, we should not serialize and then send // the reducer changed by the local node. if (!F.isEmpty(rmtNodes)) { cctx.io() .safeSend( rmtNodes, req, cctx.ioPolicy(), new P1<ClusterNode>() { @Override public boolean apply(ClusterNode node) { fut.onNodeLeft(node.id()); return !fut.isDone(); } }); } if (locNode != null) { cctx.closures() .callLocalSafe( new Callable<Object>() { @Override public Object call() throws Exception { req.beforeLocalExecution(cctx); processQueryRequest(locNodeId, req); return null; } }); } }
/** * Resolve data affinity from string definition. * * @param affinity Data affinity string definition. * @return Resolved data affinity. * @throws GridClientException If loading failed. */ private static GridClientDataAffinity resolveAffinity(String affinity) throws GridClientException { if (F.isEmpty(affinity)) return null; if ("partitioned".equals(affinity)) return new GridClientPartitionAffinity(); return newInstance(GridClientDataAffinity.class, affinity); }
/** * Resolve load balancer from string definition. * * @param balancer Load balancer string definition. * @return Resolved load balancer. * @throws GridClientException If loading failed. */ private static GridClientLoadBalancer resolveBalancer(String balancer) throws GridClientException { if (F.isEmpty(balancer) || "random".equals(balancer)) return new GridClientRandomBalancer(); if ("roundrobin".equals(balancer)) return new GridClientRoundRobinBalancer(); return newInstance(GridClientLoadBalancer.class, balancer); }
/** {@inheritDoc} */ @Override public GridDhtPartitionMap localPartitionMap() { lock.readLock().lock(); try { return new GridDhtPartitionMap( cctx.nodeId(), updateSeq.get(), F.viewReadOnly(locParts, CU.part2state()), true); } finally { lock.readLock().unlock(); } }
/** * Starts dynamic caches. * * @throws IgniteCheckedException If failed. */ private void startCaches() throws IgniteCheckedException { cctx.cache() .prepareCachesStart( F.view( reqs, new IgnitePredicate<DynamicCacheChangeRequest>() { @Override public boolean apply(DynamicCacheChangeRequest req) { return req.start(); } }), exchId.topologyVersion()); }
/** * Run command in separated console. * * @param workFolder Work folder for command. * @param args A string array containing the program and its arguments. * @return Started process. * @throws IOException If failed to start process. */ public static Process openInConsole(@Nullable File workFolder, String... args) throws IOException { String[] commands = args; String cmd = F.concat(Arrays.asList(args), " "); if (U.isWindows()) commands = F.asArray("cmd", "/c", String.format("start %s", cmd)); if (U.isMacOs()) commands = F.asArray( "osascript", "-e", String.format("tell application \"Terminal\" to do script \"%s\"", cmd)); if (U.isUnix()) commands = F.asArray("xterm", "-sl", "1024", "-geometry", "200x50", "-e", cmd); ProcessBuilder pb = new ProcessBuilder(commands); if (workFolder != null) pb.directory(workFolder); return pb.start(); }
/** @return Involved nodes. */ @Override public Collection<? extends GridNode> nodes() { return F.viewReadOnly( futures(), new GridClosure<GridFuture<?>, GridRichNode>() { @Nullable @Override public GridRichNode apply(GridFuture<?> f) { if (isMini(f)) return ((MiniFuture) f).node(); return cctx.rich().rich(cctx.discovery().localNode()); } }); }
/** * @param cacheId Cache ID to check. * @return {@code True} if cache is stopping by this exchange. */ private boolean stopping(int cacheId) { boolean stopping = false; if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (cacheId == CU.cacheId(req.cacheName())) { stopping = req.stop(); break; } } } return stopping; }
/** @param mappings Mappings. */ void addEntryMapping(@Nullable Map<UUID, GridDistributedTxMapping<K, V>> mappings) { if (!F.isEmpty(mappings)) { this.mappings.putAll(mappings); if (log.isDebugEnabled()) log.debug( "Added mappings to transaction [locId=" + cctx.nodeId() + ", mappings=" + mappings + ", tx=" + this + ']'); } }
/** * @param nodeId Node ID. * @param retryCnt Number of retries. */ private void sendAllPartitions(final UUID nodeId, final int retryCnt) { ClusterNode n = cctx.node(nodeId); try { if (n != null) sendAllPartitions(F.asList(n), exchId); } catch (IgniteCheckedException e) { if (e instanceof ClusterTopologyCheckedException || !cctx.discovery().alive(n)) { log.debug( "Failed to send full partition map to node, node left grid " + "[rmtNode=" + nodeId + ", exchangeId=" + exchId + ']'); return; } if (retryCnt > 0) { long timeout = cctx.gridConfig().getNetworkSendRetryDelay(); LT.error( log, e, "Failed to send full partition map to node (will retry after timeout) " + "[node=" + nodeId + ", exchangeId=" + exchId + ", timeout=" + timeout + ']'); cctx.time() .addTimeoutObject( new GridTimeoutObjectAdapter(timeout) { @Override public void onTimeout() { sendAllPartitions(nodeId, retryCnt - 1); } }); } else U.error( log, "Failed to send full partition map [node=" + n + ", exchangeId=" + exchId + ']', e); } }
/** * @param cctx Context. * @param tx Transaction. * @param commit Commit flag. */ public GridNearTxFinishFuture( GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) { super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx)); assert cctx != null; this.cctx = cctx; this.tx = tx; this.commit = commit; mappings = tx.mappings(); futId = GridUuid.randomUuid(); log = U.logger(ctx, logRef, GridNearTxFinishFuture.class); }
/** * @param p Partition. * @param topVer Topology version ({@code -1} for all nodes). * @param state Partition state. * @param states Additional partition states. * @return List of nodes for the partition. */ private List<ClusterNode> nodes( int p, AffinityTopologyVersion topVer, GridDhtPartitionState state, GridDhtPartitionState... states) { Collection<UUID> allIds = topVer.topologyVersion() > 0 ? F.nodeIds(CU.affinityNodes(cctx, topVer)) : null; lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer=" + topVer + ", allIds=" + allIds + ", node2part=" + node2part + ", cache=" + cctx.name() + ']'; Collection<UUID> nodeIds = part2node.get(p); // Node IDs can be null if both, primary and backup, nodes disappear. int size = nodeIds == null ? 0 : nodeIds.size(); if (size == 0) return Collections.emptyList(); List<ClusterNode> nodes = new ArrayList<>(size); for (UUID id : nodeIds) { if (topVer.topologyVersion() > 0 && !allIds.contains(id)) continue; if (hasState(p, id, state, states)) { ClusterNode n = cctx.discovery().node(id); if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) nodes.add(n); } } return nodes; } finally { lock.readLock().unlock(); } }
/** * Processes cache query request. * * @param sndId Sender node id. * @param req Query request. */ @SuppressWarnings("unchecked") @Override void processQueryRequest(UUID sndId, GridCacheQueryRequest req) { if (req.cancel()) { cancelIds.add(new CancelMessageId(req.id(), sndId)); if (req.fields()) removeFieldsQueryResult(sndId, req.id()); else removeQueryResult(sndId, req.id()); } else { if (!cancelIds.contains(new CancelMessageId(req.id(), sndId))) { if (!F.eq(req.cacheName(), cctx.name())) { GridCacheQueryResponse res = new GridCacheQueryResponse( cctx.cacheId(), req.id(), new IgniteCheckedException( "Received request for incorrect cache [expected=" + cctx.name() + ", actual=" + req.cacheName())); sendQueryResponse(sndId, res, 0); } else { threads.put(req.id(), Thread.currentThread()); try { GridCacheQueryInfo info = distributedQueryInfo(sndId, req); if (info == null) return; if (req.fields()) runFieldsQuery(info); else runQuery(info); } catch (Throwable e) { U.error(log(), "Failed to run query.", e); sendQueryResponse( sndId, new GridCacheQueryResponse(cctx.cacheId(), req.id(), e.getCause()), 0); if (e instanceof Error) throw (Error) e; } finally { threads.remove(req.id()); } } } } }
/** {@inheritDoc} */ @Override public void isolated(boolean isolated) throws GridException { if (isolated()) return; GridNode node = F.first(ctx.grid().forCache(cacheName).nodes()); if (node == null) throw new GridException("Failed to get node for cache: " + cacheName); GridCacheAttributes a = U.cacheAttributes(node, cacheName); assert a != null; updater = a.atomicityMode() == GridCacheAtomicityMode.ATOMIC ? GridDataLoadCacheUpdaters.<K, V>batched() : GridDataLoadCacheUpdaters.<K, V>groupLocked(); }
/** @throws Exception If failed. */ public void testRangeSplit() throws Exception { IgniteUuid affKey = IgniteUuid.randomUuid(); IgfsFileAffinityRange range = new IgfsFileAffinityRange(0, 9999, affKey); Collection<IgfsFileAffinityRange> split = range.split(10000); assertEquals(1, split.size()); assertTrue(range.regionEqual(F.first(split))); split = range.split(5000); assertEquals(2, split.size()); Iterator<IgfsFileAffinityRange> it = split.iterator(); IgfsFileAffinityRange part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(0, 4999, affKey))); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(5000, 9999, affKey))); split = range.split(3000); assertEquals(4, split.size()); it = split.iterator(); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(0, 2999, affKey))); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(3000, 5999, affKey))); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(6000, 8999, affKey))); part = it.next(); assertTrue(part.regionEqual(new IgfsFileAffinityRange(9000, 9999, affKey))); }
/** * Finds all files in folder and in it's sub-tree of specified depth. * * @param file Starting folder * @param maxDepth Depth of the tree. If 1 - just look in the folder, no sub-folders. * @param filter file filter. * @return List of found files. */ public static List<VisorLogFile> fileTree(File file, int maxDepth, @Nullable FileFilter filter) { if (file.isDirectory()) { File[] files = (filter == null) ? file.listFiles() : file.listFiles(filter); if (files == null) return Collections.emptyList(); List<VisorLogFile> res = new ArrayList<>(files.length); for (File f : files) { if (f.isFile() && f.length() > 0) res.add(new VisorLogFile(f)); else if (maxDepth > 1) res.addAll(fileTree(f, maxDepth - 1, filter)); } return res; } return F.asList(new VisorLogFile(file)); }
/** @return {@code True} if succeeded. */ private boolean spreadPartitions() { try { sendAllPartitions(rmtNodes, exchId); return true; } catch (IgniteCheckedException e) { scheduleRecheck(); if (!X.hasCause(e, InterruptedException.class)) U.error( log, "Failed to send full partition map to nodes (will retry after timeout) [nodes=" + F.nodeId8s(rmtNodes) + ", exchangeId=" + exchId + ']', e); return false; } }