/** * @param newEntries Infos. * @param lsnr Listener for the operation future. * @throws GridInterruptedException If failed. * @return Future for operation. */ @Nullable GridFutureAdapter<?> update( Iterable<Map.Entry<K, V>> newEntries, GridInClosure<GridFuture<?>> lsnr) throws GridInterruptedException { List<Map.Entry<K, V>> entries0 = null; GridFutureAdapter<Object> curFut0; synchronized (this) { curFut0 = curFut; curFut0.listenAsync(lsnr); for (Map.Entry<K, V> entry : newEntries) entries.add(entry); if (entries.size() >= bufSize) { entries0 = entries; entries = newEntries(); curFut = new GridFutureAdapter<>(ctx); curFut.listenAsync(signalC); } } if (entries0 != null) { submit(entries0, curFut0); if (cancelled) curFut0.onDone( new GridException("Data loader has been cancelled: " + GridDataLoaderImpl.this)); } return curFut0; }
void cancelAll() { GridException err = new GridException("Data loader has been cancelled: " + GridDataLoaderImpl.this); for (GridFuture<?> f : locFuts) { try { f.cancel(); } catch (GridException e) { U.error(log, "Failed to cancel mini-future.", e); } } for (GridFutureAdapter<?> f : reqs.values()) f.onDone(err); }
/** * @param updateSeq Update sequence. * @return {@code True} if entry has been transitioned to state EVICTED. */ boolean tryEvict(boolean updateSeq) { if (state.getReference() != RENTING || state.getStamp() != 0 || groupReserved()) return false; // Attempt to evict partition entries from cache. clearAll(); if (map.isEmpty() && state.compareAndSet(RENTING, EVICTED, 0, 0)) { if (log.isDebugEnabled()) log.debug("Evicted partition: " + this); if (!GridQueryProcessor.isEnabled(cctx.config())) clearSwap(); if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id); cctx.dataStructures().onPartitionEvicted(id); rent.onDone(); ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq); clearDeferredDeletes(); return true; } return false; }
/** * @param fut Future. * @param doneCb Closure. */ ChainFuture( GridFutureAdapter<R> fut, IgniteClosure<? super IgniteInternalFuture<R>, T> doneCb) { this.fut = fut; this.doneCb = doneCb; fut.listen(new GridFutureChainListener<>(this, doneCb)); }
/** * @param updateSeq Update sequence. * @return Future for evict attempt. */ IgniteInternalFuture<Boolean> tryEvictAsync(boolean updateSeq) { if (map.isEmpty() && !GridQueryProcessor.isEnabled(cctx.config()) && state.compareAndSet(RENTING, EVICTED, 0, 0)) { if (log.isDebugEnabled()) log.debug("Evicted partition: " + this); clearSwap(); if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id); cctx.dataStructures().onPartitionEvicted(id); rent.onDone(); ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq); clearDeferredDeletes(); return new GridFinishedFuture<>(true); } return cctx.closures() .callLocalSafe( new GPC<Boolean>() { @Override public Boolean call() { return tryEvict(true); } }, /*system pool*/ true); }
/** * @param cancel {@code True} to close with cancellation. * @throws GridException If failed. */ @Override public void close(boolean cancel) throws GridException { if (!closed.compareAndSet(false, true)) return; busyLock.block(); if (log.isDebugEnabled()) log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']'); GridException e = null; try { // Assuming that no methods are called on this loader after this method is called. if (cancel) { cancelled = true; for (Buffer buf : bufMappings.values()) buf.cancelAll(); } else doFlush(); ctx.event().removeLocalEventListener(discoLsnr); ctx.io().removeMessageListener(topic); } catch (GridException e0) { e = e0; } fut.onDone(null, e); if (e != null) throw e; }
void onNodeLeft() { assert !isLocNode; assert bufMappings.get(node.id()) != this; if (log.isDebugEnabled()) log.debug("Forcibly completing futures (node has left): " + node.id()); Exception e = new GridTopologyException( "Failed to wait for request completion " + "(node has left): " + node.id()); for (GridFutureAdapter<Object> f : reqs.values()) f.onDone(e); // Make sure to complete current future. GridFutureAdapter<Object> curFut0; synchronized (this) { curFut0 = curFut; } curFut0.onDone(e); }
/** @param res Response. */ void onResponse(GridDataLoadResponse res) { if (log.isDebugEnabled()) log.debug("Received data load response: " + res); GridFutureAdapter<?> f = reqs.remove(res.requestId()); if (f == null) { if (log.isDebugEnabled()) log.debug("Future for request has not been found: " + res.requestId()); return; } Throwable err = null; byte[] errBytes = res.errorBytes(); if (errBytes != null) { try { GridPeerDeployAware jobPda0 = jobPda; err = ctx.config() .getMarshaller() .unmarshal( errBytes, jobPda0 != null ? jobPda0.classLoader() : U.gridClassLoader()); } catch (GridException e) { f.onDone(null, new GridException("Failed to unmarshal response.", e)); return; } } f.onDone(null, err); if (log.isDebugEnabled()) log.debug( "Finished future [fut=" + f + ", reqId=" + res.requestId() + ", err=" + err + ']'); }
/** {@inheritDoc} */ @Override public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) { A.notEmpty(entries, "entries"); enterBusy(); try { GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx); activeFuts.add(resFut); resFut.listenAsync(rmvActiveFut); Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16); for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey()); load0(entries, resFut, keys, 0); return resFut; } finally { leaveBusy(); } }
/** {@inheritDoc} */ @Override public boolean onDone(AffinityTopologyVersion res, Throwable err) { Map<Integer, Boolean> m = null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name())) { if (m == null) m = new HashMap<>(); m.put( cacheCtx.cacheId(), cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes())); } } cacheValidRes = m != null ? m : Collections.<Integer, Boolean>emptyMap(); cctx.cache().onExchangeDone(exchId.topologyVersion(), reqs, err); cctx.exchange().onExchangeDone(this, err); if (super.onDone(res, err) && !dummy && !forcePreload) { if (log.isDebugEnabled()) log.debug( "Completed partition exchange [localNode=" + cctx.localNodeId() + ", exchange= " + this + ']'); initFut.onDone(err == null); GridTimeoutObject timeoutObj = this.timeoutObj; // Deschedule timeout object. if (timeoutObj != null) cctx.kernalContext().timeout().removeTimeoutObject(timeoutObj); if (exchId.isLeft()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.config().getAffinity().removeNode(exchId.nodeId()); } return true; } return dummy; }
/** @param node Node. */ Buffer(GridNode node) { assert node != null; this.node = node; locFuts = new GridConcurrentHashSet<>(); reqs = new ConcurrentHashMap8<>(); // Cache local node flag. isLocNode = node.equals(ctx.discovery().localNode()); entries = newEntries(); curFut = new GridFutureAdapter<>(ctx); curFut.listenAsync(signalC); sem = new Semaphore(parallelOps); }
/** * @return Future if any submitted. * @throws GridInterruptedException If thread has been interrupted. */ @Nullable GridFuture<?> flush() throws GridInterruptedException { List<Map.Entry<K, V>> entries0 = null; GridFutureAdapter<Object> curFut0 = null; synchronized (this) { if (!entries.isEmpty()) { entries0 = entries; curFut0 = curFut; entries = newEntries(); curFut = new GridFutureAdapter<>(ctx); curFut.listenAsync(signalC); } } if (entries0 != null) submit(entries0, curFut0); // Create compound future for this flush. GridCompoundFuture<Object, Object> res = null; for (GridFuture<Object> f : locFuts) { if (res == null) res = new GridCompoundFuture<>(ctx); res.add(f); } for (GridFuture<Object> f : reqs.values()) { if (res == null) res = new GridCompoundFuture<>(ctx); res.add(f); } if (res != null) res.markInitialized(); return res; }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** @param nodeId Left node id. */ public void onNodeLeft(final UUID nodeId) { if (isDone()) return; if (!enterBusy()) return; try { // Wait for initialization part of this future to complete. initFut.listen( new CI1<IgniteInternalFuture<?>>() { @Override public void apply(IgniteInternalFuture<?> f) { if (isDone()) return; if (!enterBusy()) return; try { // Pretend to have received message from this node. rcvdIds.add(nodeId); Collection<UUID> rmtIds = GridDhtPartitionsExchangeFuture.this.rmtIds; assert rmtIds != null; ClusterNode oldest = oldestNode.get(); if (oldest.id().equals(nodeId)) { if (log.isDebugEnabled()) log.debug( "Oldest node left or failed on partition exchange " + "(will restart exchange process)) [oldestNodeId=" + oldest.id() + ", exchangeId=" + exchId + ']'); boolean set = false; ClusterNode newOldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); if (newOldest != null) { // If local node is now oldest. if (newOldest.id().equals(cctx.localNodeId())) { synchronized (mux) { if (oldestNode.compareAndSet(oldest, newOldest)) { // If local node is just joining. if (exchId.nodeId().equals(cctx.localNodeId())) { try { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) cacheCtx .topology() .beforeExchange(GridDhtPartitionsExchangeFuture.this); } } catch (IgniteCheckedException e) { onDone(e); return; } } set = true; } } } else { synchronized (mux) { set = oldestNode.compareAndSet(oldest, newOldest); } if (set && log.isDebugEnabled()) log.debug( "Reassigned oldest node [this=" + cctx.localNodeId() + ", old=" + oldest.id() + ", new=" + newOldest.id() + ']'); } } if (set) { // If received any messages, process them. for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) onReceive(m.getKey(), m.getValue()); // Reassign oldest node and resend. recheck(); } } else if (rmtIds.contains(nodeId)) { if (log.isDebugEnabled()) log.debug( "Remote node left of failed during partition exchange (will ignore) " + "[rmtNode=" + nodeId + ", exchangeId=" + exchId + ']'); assert rmtNodes != null; for (Iterator<ClusterNode> it = rmtNodes.iterator(); it.hasNext(); ) { if (it.next().id().equals(nodeId)) it.remove(); } if (allReceived() && ready.get() && replied.compareAndSet(false, true)) if (spreadPartitions()) onDone(exchId.topologyVersion()); } } finally { leaveBusy(); } } }); } finally { leaveBusy(); } }
/** * @param nodeId Sender node ID. * @param msg Full partition info. */ public void onReceive(final UUID nodeId, final GridDhtPartitionsFullMessage msg) { assert msg != null; if (isDone()) { if (log.isDebugEnabled()) log.debug("Received message for finished future [msg=" + msg + ", fut=" + this + ']'); return; } if (log.isDebugEnabled()) log.debug("Received full partition map from node [nodeId=" + nodeId + ", msg=" + msg + ']'); assert exchId.topologyVersion().equals(msg.topologyVersion()); initFut.listen( new CI1<IgniteInternalFuture<Boolean>>() { @Override public void apply(IgniteInternalFuture<Boolean> t) { ClusterNode curOldest = oldestNode.get(); if (!nodeId.equals(curOldest.id())) { if (log.isDebugEnabled()) log.debug( "Received full partition map from unexpected node [oldest=" + curOldest.id() + ", unexpectedNodeId=" + nodeId + ']'); ClusterNode snd = cctx.discovery().node(nodeId); if (snd == null) { if (log.isDebugEnabled()) log.debug( "Sender node left grid, will ignore message from unexpected node [nodeId=" + nodeId + ", exchId=" + msg.exchangeId() + ']'); return; } // Will process message later if sender node becomes oldest node. if (snd.order() > curOldest.order()) fullMsgs.put(nodeId, msg); return; } assert msg.exchangeId().equals(exchId); assert msg.lastVersion() != null; cctx.versions().onReceived(nodeId, msg.lastVersion()); updatePartitionFullMap(msg); onDone(exchId.topologyVersion()); } }); }
/** * @param nodeId Sender node id. * @param msg Single partition info. */ public void onReceive(final UUID nodeId, final GridDhtPartitionsSingleMessage msg) { assert msg != null; assert msg.exchangeId().equals(exchId); // Update last seen version. while (true) { GridCacheVersion old = lastVer.get(); if (old == null || old.compareTo(msg.lastVersion()) < 0) { if (lastVer.compareAndSet(old, msg.lastVersion())) break; } else break; } if (isDone()) { if (log.isDebugEnabled()) log.debug( "Received message for finished future (will reply only to sender) [msg=" + msg + ", fut=" + this + ']'); sendAllPartitions(nodeId, cctx.gridConfig().getNetworkSendRetryCount()); } else { initFut.listen( new CI1<IgniteInternalFuture<Boolean>>() { @Override public void apply(IgniteInternalFuture<Boolean> t) { try { if (!t.get()) // Just to check if there was an error. return; ClusterNode loc = cctx.localNode(); singleMsgs.put(nodeId, msg); boolean match = true; // Check if oldest node has changed. if (!oldestNode.get().equals(loc)) { match = false; synchronized (mux) { // Double check. if (oldestNode.get().equals(loc)) match = true; } } if (match) { boolean allReceived; synchronized (rcvdIds) { if (rcvdIds.add(nodeId)) updatePartitionSingleMap(msg); allReceived = allReceived(); } // If got all replies, and initialization finished, and reply has not been sent // yet. if (allReceived && ready.get() && replied.compareAndSet(false, true)) { spreadPartitions(); onDone(exchId.topologyVersion()); } else if (log.isDebugEnabled()) log.debug( "Exchange future full map is not sent [allReceived=" + allReceived() + ", ready=" + ready + ", replied=" + replied.get() + ", init=" + init.get() + ", fut=" + this + ']'); } } catch (IgniteCheckedException e) { U.error(log, "Failed to initialize exchange future: " + this, e); } } }); } }
/** @return {@code true} If the loader is closed. */ boolean isClosed() { return fut.isDone(); }
/** * @param entries Entries. * @param resFut Result future. * @param activeKeys Active keys. * @param remaps Remaps count. */ private void load0( Collection<? extends Map.Entry<K, V>> entries, final GridFutureAdapter<Object> resFut, final Collection<K> activeKeys, final int remaps) { assert entries != null; if (remaps >= MAX_REMAP_CNT) { resFut.onDone(new GridException("Failed to finish operation (too many remaps): " + remaps)); return; } Map<GridNode, Collection<Map.Entry<K, V>>> mappings = new HashMap<>(); boolean initPda = ctx.deploy().enabled() && jobPda == null; for (Map.Entry<K, V> entry : entries) { GridNode node; try { K key = entry.getKey(); assert key != null; if (initPda) { jobPda = new DataLoaderPda(key, entry.getValue(), updater); initPda = false; } node = ctx.affinity().mapKeyToNode(cacheName, key); } catch (GridException e) { resFut.onDone(e); return; } if (node == null) { resFut.onDone( new GridTopologyException( "Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']')); return; } Collection<Map.Entry<K, V>> col = mappings.get(node); if (col == null) mappings.put(node, col = new ArrayList<>()); col.add(entry); } for (final Map.Entry<GridNode, Collection<Map.Entry<K, V>>> e : mappings.entrySet()) { final UUID nodeId = e.getKey().id(); Buffer buf = bufMappings.get(nodeId); if (buf == null) { Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey())); if (old != null) buf = old; } final Collection<Map.Entry<K, V>> entriesForNode = e.getValue(); GridInClosure<GridFuture<?>> lsnr = new GridInClosure<GridFuture<?>>() { @Override public void apply(GridFuture<?> t) { try { t.get(); for (Map.Entry<K, V> e : entriesForNode) activeKeys.remove(e.getKey()); if (activeKeys.isEmpty()) resFut.onDone(); } catch (GridException e1) { if (log.isDebugEnabled()) log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']'); if (cancelled) { resFut.onDone( new GridException( "Data loader has been cancelled: " + GridDataLoaderImpl.this, e1)); } else load0(entriesForNode, resFut, activeKeys, remaps + 1); } } }; GridFutureAdapter<?> f; try { f = buf.update(entriesForNode, lsnr); } catch (GridInterruptedException e1) { resFut.onDone(e1); return; } if (ctx.discovery().node(nodeId) == null) { if (bufMappings.remove(nodeId, buf)) buf.onNodeLeft(); if (f != null) f.onDone( new GridTopologyException( "Failed to wait for request completion " + "(node has left): " + nodeId)); } } }