/** @param node Node to remove. */ public void removeMappedNode(GridNode node) { if (mappedDhtNodes.contains(node)) mappedDhtNodes = new ArrayList<>(F.view(mappedDhtNodes, F.notEqualTo(node))); if (mappedNearNodes != null && mappedNearNodes.contains(node)) mappedNearNodes = new ArrayList<>(F.view(mappedNearNodes, F.notEqualTo(node))); }
/** {@inheritDoc} */ @SuppressWarnings("TypeMayBeWeakened") @Nullable private Collection<Object> unmarshalFieldsCollection( @Nullable Collection<byte[]> byteCol, GridCacheContext<K, V> ctx, ClassLoader ldr) throws GridException { assert ctx != null; assert ldr != null; Collection<Object> col = unmarshalCollection(byteCol, ctx, ldr); Collection<Object> col0 = null; if (col != null) { col0 = new ArrayList<>(col.size()); for (Object o : col) { List<Object> list = (List<Object>) o; List<Object> list0 = new ArrayList<>(list.size()); for (Object obj : list) list0.add(obj != null ? ctx.marshaller().unmarshal((byte[]) obj, ldr) : null); col0.add(list0); } } return col0; }
/** @throws Exception If failed. */ public void testCreateFileColocated() throws Exception { GridGgfsPath path = new GridGgfsPath("/colocated"); UUID uuid = UUID.randomUUID(); GridUuid affKey; long idx = 0; while (true) { affKey = new GridUuid(uuid, idx); if (grid(0).mapKeyToNode(DATA_CACHE_NAME, affKey).id().equals(grid(0).localNode().id())) break; idx++; } try (GridGgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) { // Write 5M, should be enough to test distribution. for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]); } GridGgfsFile info = fs.info(path); Collection<GridGgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length()); assertEquals(1, affNodes.size()); Collection<UUID> nodeIds = F.first(affNodes).nodeIds(); assertEquals(1, nodeIds.size()); assertEquals(grid(0).localNode().id(), F.first(nodeIds)); }
/** * Schedules runnable task for execution. * * @param w Runnable task. * @throws GridException Thrown if any exception occurred. */ @SuppressWarnings({"CatchGenericClass", "ProhibitedExceptionThrown"}) public void execute(final GridWorker w) throws GridException { workers.add(w); try { exec.execute( new Runnable() { @Override public void run() { try { w.run(); } finally { workers.remove(w); } } }); } catch (RejectedExecutionException e) { workers.remove(w); throw new GridComputeExecutionRejectedException( "Failed to execute worker due to execution rejection.", e); } catch (RuntimeException e) { workers.remove(w); throw new GridException("Failed to execute worker due to runtime exception.", e); } catch (Error e) { workers.remove(w); throw e; } }
/** {@inheritDoc} */ @Override public final Map<UUID, GridNodeMetrics> metrics(Collection<UUID> nodeIds) throws GridSpiException { assert !F.isEmpty(nodeIds); long now = U.currentTimeMillis(); Collection<UUID> expired = new LinkedList<>(); for (UUID id : nodeIds) { GridNodeMetrics nodeMetrics = metricsMap.get(id); Long ts = tsMap.get(id); if (nodeMetrics == null || ts == null || ts < now - metricsExpireTime) expired.add(id); } if (!expired.isEmpty()) { Map<UUID, GridNodeMetrics> refreshed = metrics0(expired); for (UUID id : refreshed.keySet()) tsMap.put(id, now); metricsMap.putAll(refreshed); } return F.view(metricsMap, F.contains(nodeIds)); }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, String arg) throws GridException { Collection<GridComputeJobAdapter> jobs = new ArrayList<>(jobCnt); for (int i = 0; i < jobCnt; i++) jobs.add(new TestJob()); return jobs; }
/** * Add information about key and version to request. * * <p>Other version has to be provided if suspect lock is DHT local. * * @param key Key. * @param cand Candidate. */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) void addCandidate(K key, GridCacheDgcLockCandidate cand) { Collection<GridCacheDgcLockCandidate> col = F.addIfAbsent(map, key, new ArrayList<GridCacheDgcLockCandidate>()); assert col != null; col.add(cand); }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, Object arg) throws GridException { if (log.isInfoEnabled()) log.info("Splitting job [job=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']'); Collection<GridComputeJob> jobs = new ArrayList<>(SPLIT_COUNT); for (int i = 1; i <= SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob(i)); return jobs; }
/** {@inheritDoc} */ @SuppressWarnings("all") @Override public boolean writeTo(ByteBuffer buf) { commState.setBuffer(buf); if (!super.writeTo(buf)) return false; if (!commState.typeWritten) { if (!commState.putByte(directType())) return false; commState.typeWritten = true; } switch (commState.idx) { case 2: if (!commState.putBoolean(err)) return false; commState.idx++; case 3: if (!commState.putLong(futId)) return false; commState.idx++; case 4: if (rejectedKeyBytes != null) { if (commState.it == null) { if (!commState.putInt(rejectedKeyBytes.size())) return false; commState.it = rejectedKeyBytes.iterator(); } while (commState.it.hasNext() || commState.cur != NULL) { if (commState.cur == NULL) commState.cur = commState.it.next(); if (!commState.putByteArray((byte[]) commState.cur)) return false; commState.cur = NULL; } commState.it = null; } else { if (!commState.putInt(-1)) return false; } commState.idx++; } return true; }
/** * @param rmtReducer Optional reducer. * @param rmtTransform Optional transformer. * @param args Arguments. * @return Future. */ @SuppressWarnings("IfMayBeConditional") private <R> GridCacheQueryFuture<R> execute( @Nullable GridReducer<T, R> rmtReducer, @Nullable GridClosure<T, R> rmtTransform, @Nullable Object... args) { Collection<GridNode> nodes = nodes(); cctx.checkSecurity(GridSecurityPermission.CACHE_READ); if (F.isEmpty(nodes)) return new GridCacheQueryErrorFuture<>( cctx.kernalContext(), new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx())); if (log.isDebugEnabled()) log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']'); if (cctx.deploymentEnabled()) { try { cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform); cctx.deploy().registerClasses(args); } catch (GridException e) { return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e); } } if (subjId == null) subjId = cctx.localNodeId(); taskHash = cctx.kernalContext().job().currentTaskNameHash(); GridCacheQueryBean bean = new GridCacheQueryBean( this, (GridReducer<Object, Object>) rmtReducer, (GridClosure<Object, Object>) rmtTransform, args); GridCacheQueryManager qryMgr = cctx.queries(); boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId()); if (type == SQL_FIELDS) return (GridCacheQueryFuture<R>) (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes)); else return (GridCacheQueryFuture<R>) (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes)); }
/** {@inheritDoc} */ @Override public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException { ExecutorService exec = new ThreadPoolExecutor( threadsCnt, threadsCnt, 0L, MILLISECONDS, new ArrayBlockingQueue<Runnable>(batchQueueSize), new BlockingRejectedExecutionHandler()); Iterator<I> iter = inputIterator(args); Collection<I> buf = new ArrayList<>(batchSize); try { while (iter.hasNext()) { if (Thread.currentThread().isInterrupted()) { U.warn(log, "Working thread was interrupted while loading data."); break; } buf.add(iter.next()); if (buf.size() == batchSize) { exec.submit(new Worker(c, buf, args)); buf = new ArrayList<>(batchSize); } } if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args)); } catch (RejectedExecutionException ignored) { // Because of custom RejectedExecutionHandler. assert false : "RejectedExecutionException was thrown while it shouldn't."; } finally { exec.shutdown(); try { exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while waiting for put operations to complete."); Thread.currentThread().interrupt(); } } }
/** {@inheritDoc} */ @SuppressWarnings("TypeMayBeWeakened") @Nullable private Collection<byte[]> marshalFieldsCollection( @Nullable Collection<Object> col, GridCacheContext<K, V> ctx) throws GridException { assert ctx != null; if (col == null) return null; Collection<List<Object>> col0 = new ArrayList<>(col.size()); for (Object o : col) { List<GridIndexingEntity<?>> list = (List<GridIndexingEntity<?>>) o; List<Object> list0 = new ArrayList<>(list.size()); for (GridIndexingEntity<?> ent : list) { if (ent.bytes() != null) list0.add(ent.bytes()); else { if (ctx.deploymentEnabled()) prepareObject(ent.value(), ctx); list0.add(CU.marshal(ctx, ent.value())); } } col0.add(list0); } return marshalCollection(col0, ctx); }
/** * Maps list by node ID. * * @param subgrid Subgrid. * @return Map. */ private Map<UUID, GridNode> mapSubgrid(Collection<GridNode> subgrid) { Map<UUID, GridNode> res = U.newHashMap(subgrid.size()); for (GridNode node : subgrid) res.put(node.id(), node); return res; }
/** * @param in Object input. * @return Read collection. * @throws IOException If failed. * @throws ClassNotFoundException If failed. */ private Collection<Object> readFieldsCollection(ObjectInput in) throws IOException, ClassNotFoundException { assert fields; int size = in.readInt(); if (size == -1) return null; Collection<Object> res = new ArrayList<>(size); for (int i = 0; i < size; i++) { int size0 = in.readInt(); Collection<Object> col = new ArrayList<>(size0); for (int j = 0; j < size0; j++) col.add(in.readObject()); assert col.size() == size0; res.add(col); } assert res.size() == size; return res; }
/** {@inheritDoc} */ @SuppressWarnings("all") @Override public boolean readFrom(ByteBuffer buf) { commState.setBuffer(buf); if (!super.readFrom(buf)) return false; switch (commState.idx) { case 2: if (buf.remaining() < 1) return false; err = commState.getBoolean(); commState.idx++; case 3: if (buf.remaining() < 8) return false; futId = commState.getLong(); commState.idx++; case 4: if (commState.readSize == -1) { if (buf.remaining() < 4) return false; commState.readSize = commState.getInt(); } if (commState.readSize >= 0) { if (rejectedKeyBytes == null) rejectedKeyBytes = new ArrayList<>(commState.readSize); for (int i = commState.readItems; i < commState.readSize; i++) { byte[] _val = commState.getByteArray(); if (_val == BYTE_ARR_NOT_READ) return false; rejectedKeyBytes.add((byte[]) _val); commState.readItems++; } } commState.readSize = -1; commState.readItems = 0; commState.idx++; } return true; }
/** * @param cctx Registry. * @param keys Keys to lock. * @param tx Transaction. * @param read Read flag. * @param retval Flag to return value or not. * @param timeout Lock acquisition timeout. * @param filter Filter. */ public GridNearLockFuture( GridCacheContext<K, V> cctx, Collection<? extends K> keys, @Nullable GridNearTxLocal<K, V> tx, boolean read, boolean retval, long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null; this.cctx = cctx; this.keys = keys; this.tx = tx; this.read = read; this.retval = retval; this.timeout = timeout; this.filter = filter; threadId = tx == null ? Thread.currentThread().getId() : tx.threadId(); lockVer = tx != null ? tx.xidVersion() : cctx.versions().next(); futId = GridUuid.randomUuid(); entries = new ArrayList<>(keys.size()); log = U.logger(ctx, logRef, GridNearLockFuture.class); if (timeout > 0) { timeoutObj = new LockTimeoutObject(); cctx.time().addTimeoutObject(timeoutObj); } valMap = new ConcurrentHashMap8<>(keys.size(), 1f); }
/** {@inheritDoc} */ @Override public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) { A.notEmpty(entries, "entries"); enterBusy(); try { GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx); activeFuts.add(resFut); resFut.listenAsync(rmvActiveFut); Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16); for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey()); load0(entries, resFut, keys, 0); return resFut; } finally { leaveBusy(); } }
/** * @param out Object output. * @throws IOException If failed. */ @SuppressWarnings("TypeMayBeWeakened") private void writeFieldsCollection(ObjectOutput out) throws IOException { assert fields; out.writeInt(data != null ? data.size() : -1); if (data == null) return; for (Object o : data) { List<GridIndexingEntity<?>> list = (List<GridIndexingEntity<?>>) o; out.writeInt(list.size()); for (GridIndexingEntity<?> idxEnt : list) { try { out.writeObject(idxEnt.value()); } catch (GridSpiException e) { throw new IOException("Failed to write indexing entity: " + idxEnt, e); } } } }
/** {@inheritDoc} */ @Override public Class<?> deployClass() { if (cls == null) { Class<?> cls0 = null; if (depCls != null) cls0 = depCls; else { for (Iterator<Object> it = objs.iterator(); (cls0 == null || U.isJdk(cls0)) && it.hasNext(); ) { Object o = it.next(); if (o != null) cls0 = U.detectClass(o); } if (cls0 == null || U.isJdk(cls0)) cls0 = GridDataLoaderImpl.class; } assert cls0 != null : "Failed to detect deploy class [objs=" + objs + ']'; cls = cls0; } return cls; }
/** {@inheritDoc} */ @Override public void execute(@Nullable GridProjection prj) throws GridException { if (cb == null) throw new IllegalStateException("Mandatory local callback is not set for the query: " + this); if (prj == null) prj = ctx.grid(); prj = prj.forCache(ctx.name()); if (prj.nodes().isEmpty()) throw new GridTopologyException("Failed to execute query (projection is empty): " + this); GridCacheMode mode = ctx.config().getCacheMode(); if (mode == LOCAL || mode == REPLICATED) { Collection<GridNode> nodes = prj.nodes(); GridNode node = nodes.contains(ctx.localNode()) ? ctx.localNode() : F.rand(nodes); assert node != null; if (nodes.size() > 1 && !ctx.cache().isDrSystemCache()) { if (node.id().equals(ctx.localNodeId())) U.warn( log, "Continuous query for " + mode + " cache can be run only on local node. " + "Will execute query locally: " + this); else U.warn( log, "Continuous query for " + mode + " cache can be run only on single node. " + "Will execute query on remote node [qry=" + this + ", node=" + node + ']'); } prj = prj.forNode(node); } closeLock.lock(); try { if (routineId != null) throw new IllegalStateException("Continuous query can't be executed twice."); guard.block(); GridContinuousHandler hnd = new GridCacheContinuousQueryHandler<>(ctx.name(), topic, cb, filter, prjPred); routineId = ctx.kernalContext() .continuous() .startRoutine(hnd, bufSize, timeInterval, autoUnsubscribe, prj.predicate()) .get(); } finally { closeLock.unlock(); } }
/** {@inheritDoc} */ @SuppressWarnings("all") @Override public boolean readFrom(ByteBuffer buf) { commState.setBuffer(buf); if (!super.readFrom(buf)) return false; switch (commState.idx) { case 2: if (commState.readSize == -1) { if (buf.remaining() < 4) return false; commState.readSize = commState.getInt(); } if (commState.readSize >= 0) { if (dataBytes == null) dataBytes = new ArrayList<>(commState.readSize); for (int i = commState.readItems; i < commState.readSize; i++) { byte[] _val = commState.getByteArray(); if (_val == BYTE_ARR_NOT_READ) return false; dataBytes.add((byte[]) _val); commState.readItems++; } } commState.readSize = -1; commState.readItems = 0; commState.idx++; case 3: byte[] errBytes0 = commState.getByteArray(); if (errBytes0 == BYTE_ARR_NOT_READ) return false; errBytes = errBytes0; commState.idx++; case 4: if (buf.remaining() < 1) return false; fields = commState.getBoolean(); commState.idx++; case 5: if (buf.remaining() < 1) return false; finished = commState.getBoolean(); commState.idx++; case 6: if (commState.readSize == -1) { if (buf.remaining() < 4) return false; commState.readSize = commState.getInt(); } if (commState.readSize >= 0) { if (metaDataBytes == null) metaDataBytes = new ArrayList<>(commState.readSize); for (int i = commState.readItems; i < commState.readSize; i++) { byte[] _val = commState.getByteArray(); if (_val == BYTE_ARR_NOT_READ) return false; metaDataBytes.add((byte[]) _val); commState.readItems++; } } commState.readSize = -1; commState.readItems = 0; commState.idx++; case 7: commState.idx++; case 8: if (buf.remaining() < 8) return false; reqId = commState.getLong(); commState.idx++; } return true; }
/** * Test file creation. * * @param path Path to file to store. * @param size Size of file to store. * @param salt Salt for file content generation. * @throws Exception In case of any exception. */ private void testCreateFile(final GridGgfsPath path, final long size, final int salt) throws Exception { info("Create file [path=" + path + ", size=" + size + ", salt=" + salt + ']'); final AtomicInteger cnt = new AtomicInteger(0); final Collection<GridGgfsPath> cleanUp = new ConcurrentLinkedQueue<>(); long time = runMultiThreaded( new Callable<Object>() { @Override public Object call() throws Exception { int id = cnt.incrementAndGet(); GridGgfsPath f = new GridGgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : "")); try (GridGgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) { assertNotNull(out); cleanUp.add(f); // Add all created into cleanup list. U.copy(new GridGgfsTestInputStream(size, salt), out); } return null; } }, WRITING_THREADS_CNT, "perform-multi-thread-writing"); if (time > 0) { double rate = size * 1000. / time / 1024 / 1024; info( String.format( "Write file [path=%s, size=%d kB, rate=%2.1f MB/s]", path, WRITING_THREADS_CNT * size / 1024, WRITING_THREADS_CNT * rate)); } info("Read and validate saved file: " + path); final InputStream expIn = new GridGgfsTestInputStream(size, salt); final GridGgfsInputStream actIn = fs.open(path, CFG_BLOCK_SIZE * READING_THREADS_CNT * 11 / 10); // Validate continuous reading of whole file. assertEqualStreams(expIn, actIn, size, null); // Validate random seek and reading. final Random rnd = new Random(); runMultiThreaded( new Callable<Object>() { @Override public Object call() throws Exception { long skip = Math.abs(rnd.nextLong() % (size + 1)); long range = Math.min(size - skip, rnd.nextInt(CFG_BLOCK_SIZE * 400)); assertEqualStreams(new GridGgfsTestInputStream(size, salt), actIn, range, skip); return null; } }, READING_THREADS_CNT, "validate-multi-thread-reading"); expIn.close(); actIn.close(); info("Get stored file info: " + path); GridGgfsFile desc = fs.info(path); info("Validate stored file info: " + desc); assertNotNull(desc); if (log.isDebugEnabled()) log.debug("File descriptor: " + desc); Collection<GridGgfsBlockLocation> aff = fs.affinity(path, 0, desc.length()); assertFalse("Affinity: " + aff, desc.length() != 0 && aff.isEmpty()); int blockSize = desc.blockSize(); assertEquals("File size", size, desc.length()); assertEquals("Binary block size", CFG_BLOCK_SIZE, blockSize); // assertEquals("Permission", "rwxr-xr-x", desc.getPermission().toString()); // assertEquals("Permission sticky bit marks this is file", false, // desc.getPermission().getStickyBit()); assertEquals("Type", true, desc.isFile()); assertEquals("Type", false, desc.isDirectory()); info("Cleanup files: " + cleanUp); for (GridGgfsPath f : cleanUp) { fs.delete(f, true); assertNull(fs.info(f)); } }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }
/** * Add rejected key to response. * * @param key Evicted key. */ void addRejected(K key) { assert key != null; rejectedKeys.add(key); }
/** * Creates node predicate that evaluates to {@code true} for all provided node IDs. Implementation * will make a defensive copy. * * @param ids Optional node IDs. If none provided - predicate will always return {@code false}. */ public GridNodePredicate(@Nullable Collection<UUID> ids) { this.ids = F.isEmpty(ids) ? Collections.<UUID>emptySet() : ids.size() == 1 ? Collections.singleton(F.first(ids)) : new HashSet<>(ids); }