/** {@inheritDoc} */ @Override public boolean onDone(@Nullable Void res, @Nullable Throwable err) { if (super.onDone(res, err)) { cctx.mvcc().removeAtomicFuture(version()); if (err != null) { if (!mappings.isEmpty()) { Collection<KeyCacheObject> hndKeys = new ArrayList<>(keys.size()); exit: for (GridDhtAtomicUpdateRequest req : mappings.values()) { for (int i = 0; i < req.size(); i++) { KeyCacheObject key = req.key(i); if (!hndKeys.contains(key)) { updateRes.addFailedKey(key, err); cctx.continuousQueries() .skipUpdateEvent( key, req.partitionId(i), req.updateCounter(i), updateReq.topologyVersion()); hndKeys.add(key); if (hndKeys.size() == keys.size()) break exit; } } } } else for (KeyCacheObject key : keys) updateRes.addFailedKey(key, err); } else { Collection<KeyCacheObject> hndKeys = new ArrayList<>(keys.size()); exit: for (GridDhtAtomicUpdateRequest req : mappings.values()) { for (int i = 0; i < req.size(); i++) { KeyCacheObject key = req.key(i); if (!hndKeys.contains(key)) { try { cctx.continuousQueries() .onEntryUpdated( key, req.value(i), req.localPreviousValue(i), key.internal() || !cctx.userCache(), req.partitionId(i), true, false, req.updateCounter(i), updateReq.topologyVersion()); } catch (IgniteCheckedException e) { U.warn( log, "Failed to send continuous query message. [key=" + key + ", newVal=" + req.value(i) + ", err=" + e + "]"); } hndKeys.add(key); if (hndKeys.size() == keys.size()) break exit; } } } } if (updateReq.writeSynchronizationMode() == FULL_SYNC) completionCb.apply(updateReq, updateRes); return true; } return false; }
/** * @param ctx Kernal context. * @return Continuous query manager. */ private CacheContinuousQueryManager manager(GridKernalContext ctx) { GridCacheContext<K, V> cacheCtx = cacheContext(ctx); return cacheCtx == null ? null : cacheCtx.continuousQueries(); }
/** * @param entry Entry to map. * @param val Value to write. * @param entryProcessor Entry processor. * @param ttl TTL (optional). * @param conflictExpireTime Conflict expire time (optional). * @param conflictVer Conflict version (optional). * @param updateCntr Partition update counter. */ public void addWriteEntry( GridDhtCacheEntry entry, @Nullable CacheObject val, EntryProcessor<Object, Object, Object> entryProcessor, long ttl, long conflictExpireTime, @Nullable GridCacheVersion conflictVer, boolean addPrevVal, @Nullable CacheObject prevVal, long updateCntr) { AffinityTopologyVersion topVer = updateReq.topologyVersion(); Collection<ClusterNode> dhtNodes = cctx.dht().topology().nodes(entry.partition(), topVer); if (log.isDebugEnabled()) log.debug( "Mapping entry to DHT nodes [nodes=" + U.nodeIds(dhtNodes) + ", entry=" + entry + ']'); CacheWriteSynchronizationMode syncMode = updateReq.writeSynchronizationMode(); keys.add(entry.key()); for (ClusterNode node : dhtNodes) { UUID nodeId = node.id(); if (!nodeId.equals(cctx.localNodeId())) { GridDhtAtomicUpdateRequest updateReq = mappings.get(nodeId); if (updateReq == null) { updateReq = new GridDhtAtomicUpdateRequest( cctx.cacheId(), nodeId, futVer, writeVer, syncMode, topVer, forceTransformBackups, this.updateReq.subjectId(), this.updateReq.taskNameHash(), forceTransformBackups ? this.updateReq.invokeArguments() : null, cctx.deploymentEnabled(), this.updateReq.keepBinary()); mappings.put(nodeId, updateReq); } updateReq.addWriteValue( entry.key(), val, entryProcessor, ttl, conflictExpireTime, conflictVer, addPrevVal, entry.partition(), prevVal, updateCntr); } else if (dhtNodes.size() == 1) { try { cctx.continuousQueries() .onEntryUpdated( entry.key(), val, prevVal, entry.key().internal() || !cctx.userCache(), entry.partition(), true, false, updateCntr, updateReq.topologyVersion()); } catch (IgniteCheckedException e) { U.warn( log, "Failed to send continuous query message. [key=" + entry.key() + ", newVal=" + val + ", err=" + e + "]"); } } } }