/** * Processes lock request. * * @param nodeId Sender node ID. * @param msg Lock request. */ @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"}) private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) { assert !nodeId.equals(locNodeId); List<byte[]> keys = msg.keyBytes(); int cnt = keys.size(); GridReplicatedTxRemote<K, V> tx = null; GridDistributedLockResponse res; ClassLoader ldr = null; try { ldr = ctx.deploy().globalLoader(); if (ldr != null) { res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt); for (int i = 0; i < keys.size(); i++) { byte[] bytes = keys.get(i); K key = msg.keys().get(i); Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i); if (bytes == null) continue; if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key); GridDistributedCacheEntry<K, V> entry = null; while (true) { try { entry = entryexx(key); // Handle implicit locks for pessimistic transactions. if (msg.inTx()) { tx = ctx.tm().tx(msg.version()); if (tx != null) { if (msg.txRead()) tx.addRead(key, bytes); else tx.addWrite(key, bytes); } else { tx = new GridReplicatedTxRemote<K, V>( nodeId, msg.threadId(), msg.version(), null, PESSIMISTIC, msg.isolation(), msg.isInvalidate(), msg.timeout(), key, bytes, msg.txRead(), ctx); tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + msg.version()); } } // Add remote candidate before reordering. entry.addRemote( msg.nodeId(), null, msg.threadId(), msg.version(), msg.timeout(), tx != null && tx.ec(), tx != null, tx != null && tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions()); // Double-check in case if sender node left the grid. if (ctx.discovery().node(msg.nodeId()) == null) { if (log.isDebugEnabled()) log.debug( "Node requesting lock left grid (lock request will be ignored): " + msg); if (tx != null) tx.rollback(); return; } res.setCandidates( i, entry.localCandidates(), ctx.tm().committedVersions(msg.version()), ctx.tm().rolledbackVersions(msg.version())); res.addValueBytes( entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx); // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug( "Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class for message: " + msg; U.warn(log, err); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err)); } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Received lock request for completed transaction (will ignore): " + e); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e); } catch (GridException e) { String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg; log.error(err, e); res = new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e)); if (tx != null) tx.rollback(); } catch (GridDistributedLockCancelledException ignored) { // Received lock request for cancelled lock. if (log.isDebugEnabled()) log.debug("Received lock request for canceled lock (will ignore): " + msg); if (tx != null) tx.rollback(); // Don't send response back. return; } GridNode node = ctx.discovery().node(msg.nodeId()); boolean releaseAll = false; if (node != null) { try { // Reply back to sender. ctx.io().send(node, res); } catch (GridException e) { U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e); releaseAll = ldr != null; } } // If sender left grid, release all locks acquired so far. else releaseAll = ldr != null; // Release all locks because sender node left grid. if (releaseAll) { for (K key : msg.keys()) { while (true) { GridDistributedCacheEntry<K, V> entry = peekexx(key); try { if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId()); break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock on removed entity during failure " + "of replicated lock request handling (will retry): " + entry); } } } U.warn( log, "Sender node left grid in the midst of lock acquisition (locks will be released)."); } }
/** * @param nodeId Primary node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. * @throws GridDistributedLockCancelledException If lock has been cancelled. */ @SuppressWarnings({"RedundantTypeArguments"}) @Nullable public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req) throws GridException, GridDistributedLockCancelledException { List<byte[]> nearKeyBytes = req.nearKeyBytes(); GridNearTxRemote<K, V> tx = null; ClassLoader ldr = ctx.deploy().globalLoader(); if (ldr != null) { for (int i = 0; i < nearKeyBytes.size(); i++) { byte[] bytes = nearKeyBytes.get(i); if (bytes == null) continue; K key = req.nearKeys().get(i); Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i); if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key); GridNearCacheEntry<K, V> entry = null; while (true) { try { entry = peekExx(key); if (entry != null) { entry.keyBytes(bytes); // Handle implicit locks for pessimistic transactions. if (req.inTx()) { tx = ctx.tm().tx(req.version()); if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/); else { tx = new GridNearTxRemote<K, V>( nodeId, req.nearNodeId(), req.threadId(), req.version(), null, PESSIMISTIC, req.isolation(), req.isInvalidate(), req.timeout(), key, bytes, null, // Value. null, // Value bytes. ctx); if (tx.empty()) return tx; tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Failed to acquire lock " + "(transaction has been completed): " + req.version()); } } // Add remote candidate before reordering. entry.addRemote( req.nodeId(), nodeId, req.threadId(), req.version(), req.timeout(), tx != null && tx.ec(), tx != null, tx != null && tx.implicitSingle()); // Remote candidates for ordered lock queuing. entry.addRemoteCandidates( cands, req.version(), req.committedVersions(), req.rolledbackVersions()); entry.orderOwned(req.version(), req.owned(entry.key())); } // Double-check in case if sender node left the grid. if (ctx.discovery().node(req.nodeId()) == null) { if (log.isDebugEnabled()) log.debug("Node requesting lock left grid (lock request will be ignored): " + req); if (tx != null) tx.rollback(); return null; } // Entry is legit. break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry; if (log.isDebugEnabled()) log.debug("Received entry removed exception (will retry on renewed entry): " + entry); if (tx != null) { tx.clearEntry(entry.key()); if (log.isDebugEnabled()) log.debug( "Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']'); } } } } } else { String err = "Failed to acquire deployment class loader for message: " + req; U.warn(log, err); throw new GridException(err); } return tx; }
/** * This method is called to map or split grid task into multiple grid jobs. This is the first * method that gets called when task execution starts. * * @param data Task execution argument. Can be {@code null}. This is the same argument as the one * passed into {@code Grid#execute(...)} methods. * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed * to be randomized by container. This ensures that every time you simply iterate through grid * nodes, the order of nodes will be random which over time should result into all nodes being * used equally. * @return Map of grid jobs assigned to subgrid node. Unless {@link * GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is * returned, exception will be thrown. * @throws GridException If mapping could not complete successfully. This exception will be thrown * out of {@link GridComputeTaskFuture#get()} method. */ @Override public Map<? extends GridComputeJob, GridNode> map( List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException { assert !subgrid.isEmpty(); // Give preference to wanted node. Otherwise, take the first one. GridNode targetNode = F.find( subgrid, subgrid.get(0), new GridPredicate<GridNode>() { @Override public boolean apply(GridNode e) { return preferredNode.equals(e.id()); } }); return Collections.singletonMap( new GridComputeJobAdapter() { @GridLoggerResource private GridLogger log; @GridInstanceResource private Grid grid; @Override public Object execute() throws GridException { log.info("Going to put data: " + data.size()); GridCache<Object, Object> cache = grid.cache(cacheName); assert cache != null; Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data); for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) { T2<Integer, Collection<Integer>> pair = entry.getValue(); Object affKey = pair.get1(); // Group lock partition. try (GridCacheTx tx = cache.txStartPartition( cache.affinity().partition(affKey), optimistic ? OPTIMISTIC : PESSIMISTIC, REPEATABLE_READ, 0, pair.get2().size())) { for (Integer val : pair.get2()) cache.put(val, val); tx.commit(); } } log.info("Finished put data: " + data.size()); return data; } /** * Groups values by partitions. * * @param data Data to put. * @return Grouped map. */ private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) { GridCache<Object, Object> cache = grid.cache(cacheName); Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>(); for (Integer val : data) { int part = cache.affinity().partition(val); T2<Integer, Collection<Integer>> tup = res.get(part); if (tup == null) { tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>()); res.put(part, tup); } tup.get2().add(val); } return res; } }, targetNode); }