/** * @param in Object input. * @return Read collection. * @throws IOException If failed. * @throws ClassNotFoundException If failed. */ private Collection<Object> readFieldsCollection(ObjectInput in) throws IOException, ClassNotFoundException { assert fields; int size = in.readInt(); if (size == -1) return null; Collection<Object> res = new ArrayList<>(size); for (int i = 0; i < size; i++) { int size0 = in.readInt(); Collection<Object> col = new ArrayList<>(size0); for (int j = 0; j < size0; j++) col.add(in.readObject()); assert col.size() == size0; res.add(col); } assert res.size() == size; return res; }
/** {@inheritDoc} */ @Override public final Map<UUID, GridNodeMetrics> metrics(Collection<UUID> nodeIds) throws GridSpiException { assert !F.isEmpty(nodeIds); long now = U.currentTimeMillis(); Collection<UUID> expired = new LinkedList<>(); for (UUID id : nodeIds) { GridNodeMetrics nodeMetrics = metricsMap.get(id); Long ts = tsMap.get(id); if (nodeMetrics == null || ts == null || ts < now - metricsExpireTime) expired.add(id); } if (!expired.isEmpty()) { Map<UUID, GridNodeMetrics> refreshed = metrics0(expired); for (UUID id : refreshed.keySet()) tsMap.put(id, now); metricsMap.putAll(refreshed); } return F.view(metricsMap, F.contains(nodeIds)); }
/** {@inheritDoc} */ @SuppressWarnings("TypeMayBeWeakened") @Nullable private Collection<Object> unmarshalFieldsCollection( @Nullable Collection<byte[]> byteCol, GridCacheContext<K, V> ctx, ClassLoader ldr) throws GridException { assert ctx != null; assert ldr != null; Collection<Object> col = unmarshalCollection(byteCol, ctx, ldr); Collection<Object> col0 = null; if (col != null) { col0 = new ArrayList<>(col.size()); for (Object o : col) { List<Object> list = (List<Object>) o; List<Object> list0 = new ArrayList<>(list.size()); for (Object obj : list) list0.add(obj != null ? ctx.marshaller().unmarshal((byte[]) obj, ldr) : null); col0.add(list0); } } return col0; }
/** * Schedules runnable task for execution. * * @param w Runnable task. * @throws GridException Thrown if any exception occurred. */ @SuppressWarnings({"CatchGenericClass", "ProhibitedExceptionThrown"}) public void execute(final GridWorker w) throws GridException { workers.add(w); try { exec.execute( new Runnable() { @Override public void run() { try { w.run(); } finally { workers.remove(w); } } }); } catch (RejectedExecutionException e) { workers.remove(w); throw new GridComputeExecutionRejectedException( "Failed to execute worker due to execution rejection.", e); } catch (RuntimeException e) { workers.remove(w); throw new GridException("Failed to execute worker due to runtime exception.", e); } catch (Error e) { workers.remove(w); throw e; } }
/** * Add information about key and version to request. * * <p>Other version has to be provided if suspect lock is DHT local. * * @param key Key. * @param cand Candidate. */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) void addCandidate(K key, GridCacheDgcLockCandidate cand) { Collection<GridCacheDgcLockCandidate> col = F.addIfAbsent(map, key, new ArrayList<GridCacheDgcLockCandidate>()); assert col != null; col.add(cand); }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, String arg) throws GridException { Collection<GridComputeJobAdapter> jobs = new ArrayList<>(jobCnt); for (int i = 0; i < jobCnt; i++) jobs.add(new TestJob()); return jobs; }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, Object arg) throws GridException { if (log.isInfoEnabled()) log.info("Splitting job [job=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']'); Collection<GridComputeJob> jobs = new ArrayList<>(SPLIT_COUNT); for (int i = 1; i <= SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob(i)); return jobs; }
/** {@inheritDoc} */ @SuppressWarnings("all") @Override public boolean readFrom(ByteBuffer buf) { commState.setBuffer(buf); if (!super.readFrom(buf)) return false; switch (commState.idx) { case 2: if (buf.remaining() < 1) return false; err = commState.getBoolean(); commState.idx++; case 3: if (buf.remaining() < 8) return false; futId = commState.getLong(); commState.idx++; case 4: if (commState.readSize == -1) { if (buf.remaining() < 4) return false; commState.readSize = commState.getInt(); } if (commState.readSize >= 0) { if (rejectedKeyBytes == null) rejectedKeyBytes = new ArrayList<>(commState.readSize); for (int i = commState.readItems; i < commState.readSize; i++) { byte[] _val = commState.getByteArray(); if (_val == BYTE_ARR_NOT_READ) return false; rejectedKeyBytes.add((byte[]) _val); commState.readItems++; } } commState.readSize = -1; commState.readItems = 0; commState.idx++; } return true; }
/** {@inheritDoc} */ @Override public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException { ExecutorService exec = new ThreadPoolExecutor( threadsCnt, threadsCnt, 0L, MILLISECONDS, new ArrayBlockingQueue<Runnable>(batchQueueSize), new BlockingRejectedExecutionHandler()); Iterator<I> iter = inputIterator(args); Collection<I> buf = new ArrayList<>(batchSize); try { while (iter.hasNext()) { if (Thread.currentThread().isInterrupted()) { U.warn(log, "Working thread was interrupted while loading data."); break; } buf.add(iter.next()); if (buf.size() == batchSize) { exec.submit(new Worker(c, buf, args)); buf = new ArrayList<>(batchSize); } } if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args)); } catch (RejectedExecutionException ignored) { // Because of custom RejectedExecutionHandler. assert false : "RejectedExecutionException was thrown while it shouldn't."; } finally { exec.shutdown(); try { exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while waiting for put operations to complete."); Thread.currentThread().interrupt(); } } }
/** {@inheritDoc} */ @Override public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) { A.notEmpty(entries, "entries"); enterBusy(); try { GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx); activeFuts.add(resFut); resFut.listenAsync(rmvActiveFut); Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16); for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey()); load0(entries, resFut, keys, 0); return resFut; } finally { leaveBusy(); } }
/** {@inheritDoc} */ @SuppressWarnings("all") @Override public boolean readFrom(ByteBuffer buf) { commState.setBuffer(buf); if (!super.readFrom(buf)) return false; switch (commState.idx) { case 2: if (commState.readSize == -1) { if (buf.remaining() < 4) return false; commState.readSize = commState.getInt(); } if (commState.readSize >= 0) { if (dataBytes == null) dataBytes = new ArrayList<>(commState.readSize); for (int i = commState.readItems; i < commState.readSize; i++) { byte[] _val = commState.getByteArray(); if (_val == BYTE_ARR_NOT_READ) return false; dataBytes.add((byte[]) _val); commState.readItems++; } } commState.readSize = -1; commState.readItems = 0; commState.idx++; case 3: byte[] errBytes0 = commState.getByteArray(); if (errBytes0 == BYTE_ARR_NOT_READ) return false; errBytes = errBytes0; commState.idx++; case 4: if (buf.remaining() < 1) return false; fields = commState.getBoolean(); commState.idx++; case 5: if (buf.remaining() < 1) return false; finished = commState.getBoolean(); commState.idx++; case 6: if (commState.readSize == -1) { if (buf.remaining() < 4) return false; commState.readSize = commState.getInt(); } if (commState.readSize >= 0) { if (metaDataBytes == null) metaDataBytes = new ArrayList<>(commState.readSize); for (int i = commState.readItems; i < commState.readSize; i++) { byte[] _val = commState.getByteArray(); if (_val == BYTE_ARR_NOT_READ) return false; metaDataBytes.add((byte[]) _val); commState.readItems++; } } commState.readSize = -1; commState.readItems = 0; commState.idx++; case 7: commState.idx++; case 8: if (buf.remaining() < 8) return false; reqId = commState.getLong(); commState.idx++; } return true; }
/** * Add rejected key to response. * * @param key Evicted key. */ void addRejected(K key) { assert key != null; rejectedKeys.add(key); }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }