/** * @param in Object input. * @return Read collection. * @throws IOException If failed. * @throws ClassNotFoundException If failed. */ private Collection<Object> readFieldsCollection(ObjectInput in) throws IOException, ClassNotFoundException { assert fields; int size = in.readInt(); if (size == -1) return null; Collection<Object> res = new ArrayList<>(size); for (int i = 0; i < size; i++) { int size0 = in.readInt(); Collection<Object> col = new ArrayList<>(size0); for (int j = 0; j < size0; j++) col.add(in.readObject()); assert col.size() == size0; res.add(col); } assert res.size() == size; return res; }
/** @throws Exception If failed. */ public void testClientAffinity() throws Exception { GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME); Collection<Object> keys = new ArrayList<>(); keys.addAll(Arrays.asList(Boolean.TRUE, Boolean.FALSE, 1, Integer.MAX_VALUE)); Random rnd = new Random(); StringBuilder sb = new StringBuilder(); // Generate some random strings. for (int i = 0; i < 100; i++) { sb.setLength(0); for (int j = 0; j < 255; j++) // Only printable ASCII symbols for test. sb.append((char) (rnd.nextInt(0x7f - 0x20) + 0x20)); keys.add(sb.toString()); } // Generate some more keys to achieve better coverage. for (int i = 0; i < 100; i++) keys.add(UUID.randomUUID()); for (Object key : keys) { UUID nodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id(); UUID clientNodeId = partitioned.affinity(key); assertEquals( "Invalid affinity mapping for REST response for key: " + key, nodeId, clientNodeId); } }
/** {@inheritDoc} */ @Override public void explicitUndeploy(UUID nodeId, String rsrcName) { Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); if (dep.hasName(rsrcName)) { if (!dep.isUndeployed()) { dep.undeploy(); dep.onRemoved(); // Undeploy. i2.remove(); undeployed.add(dep); if (log.isInfoEnabled()) log.info("Undeployed per-version class loader: " + dep); } break; } } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(null, undeployed); }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, Object arg) throws GridException { Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize); this.gridSize = gridSize; final String locNodeId = grid.localNode().id().toString(); for (int i = 0; i < gridSize; i++) { jobs.add( new GridComputeJobAdapter() { @SuppressWarnings("OverlyStrongTypeCast") @Override public Object execute() { try { Thread.sleep(1000); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); } return new GridBiTuple<>(locNodeId, 1); } }); } return jobs; }
/** {@inheritDoc} */ @SuppressWarnings("TypeMayBeWeakened") @Nullable private Collection<Object> unmarshalFieldsCollection( @Nullable Collection<byte[]> byteCol, GridCacheContext<K, V> ctx, ClassLoader ldr) throws GridException { assert ctx != null; assert ldr != null; Collection<Object> col = unmarshalCollection(byteCol, ctx, ldr); Collection<Object> col0 = null; if (col != null) { col0 = new ArrayList<>(col.size()); for (Object o : col) { List<Object> list = (List<Object>) o; List<Object> list0 = new ArrayList<>(list.size()); for (Object obj : list) list0.add(obj != null ? ctx.marshaller().unmarshal((byte[]) obj, ldr) : null); col0.add(list0); } } return col0; }
/** {@inheritDoc} */ @Override protected Collection<E> dequeue0(int cnt) { WindowHolder tup = ref.get(); AtomicInteger size = tup.size(); Collection<T> evts = tup.collection(); Collection<E> resCol = new ArrayList<>(cnt); while (true) { int curSize = size.get(); if (curSize > 0) { if (size.compareAndSet(curSize, curSize - 1)) { E res = pollInternal(evts, tup.set()); if (res != null) { resCol.add(res); if (resCol.size() >= cnt) return resCol; } else { size.incrementAndGet(); return resCol; } } } else return resCol; } }
/** * Check whether provided path must be excluded from evictions. * * @param path Path. * @return {@code True} in case non block of related file must be excluded. * @throws GridException In case of faulty patterns. */ public boolean exclude(GridGgfsPath path) throws GridException { assert path != null; Collection<Pattern> excludePatterns0; if (excludeRecompile.compareAndSet(true, false)) { // Recompile. Collection<String> excludePaths0 = excludePaths; if (excludePaths0 != null) { excludePatterns0 = new HashSet<>(excludePaths0.size(), 1.0f); for (String excludePath : excludePaths0) { try { excludePatterns0.add(Pattern.compile(excludePath)); } catch (PatternSyntaxException ignore) { throw new GridException("Invalid regex pattern: " + excludePath); } } excludePatterns = excludePatterns0; } else excludePatterns0 = excludePatterns = null; } else excludePatterns0 = excludePatterns; if (excludePatterns0 != null) { String pathStr = path.toString(); for (Pattern pattern : excludePatterns0) { if (pattern.matcher(pathStr).matches()) return true; } } return false; }
/** * @param key Key to add to read set. * @param val Value. * @param drVer Data center replication version. * @param skipStore Skip store flag. * @throws IgniteCheckedException If failed. * @return {@code True} if entry has been enlisted. */ public boolean addEntry( GridCacheContext cacheCtx, IgniteTxKey key, GridCacheOperation op, CacheObject val, @Nullable GridCacheVersion drVer, boolean skipStore) throws IgniteCheckedException { checkInternal(key); GridNearCacheEntry cached = cacheCtx.near().peekExx(key.key()); try { if (cached == null) { evicted.add(key); return false; } else { cached.unswap(); CacheObject peek = cached.peek(true, false, false, null); if (peek == null && cached.evictInternal(false, xidVer, null)) { cached.context().cache().removeIfObsolete(key.key()); evicted.add(key); return false; } else { IgniteTxEntry txEntry = new IgniteTxEntry(cacheCtx, this, op, val, -1L, -1L, cached, drVer, skipStore); writeMap.put(key, txEntry); return true; } } } catch (GridCacheEntryRemovedException ignore) { evicted.add(key); if (log.isDebugEnabled()) log.debug( "Got removed entry when adding reads to remote transaction (will ignore): " + cached); return false; } }
/** * Sends query request. * * @param fut Distributed future. * @param req Request. * @param nodes Nodes. * @throws IgniteCheckedException In case of error. */ @SuppressWarnings("unchecked") private void sendRequest( final GridCacheDistributedQueryFuture<?, ?, ?> fut, final GridCacheQueryRequest req, Collection<ClusterNode> nodes) throws IgniteCheckedException { assert fut != null; assert req != null; assert nodes != null; final UUID locNodeId = cctx.localNodeId(); ClusterNode locNode = null; Collection<ClusterNode> rmtNodes = null; for (ClusterNode n : nodes) { if (n.id().equals(locNodeId)) locNode = n; else { if (rmtNodes == null) rmtNodes = new ArrayList<>(nodes.size()); rmtNodes.add(n); } } // Request should be sent to remote nodes before the query is processed on the local node. // For example, a remote reducer has a state, we should not serialize and then send // the reducer changed by the local node. if (!F.isEmpty(rmtNodes)) { cctx.io() .safeSend( rmtNodes, req, cctx.ioPolicy(), new P1<ClusterNode>() { @Override public boolean apply(ClusterNode node) { fut.onNodeLeft(node.id()); return !fut.isDone(); } }); } if (locNode != null) { cctx.closures() .callLocalSafe( new Callable<Object>() { @Override public Object call() throws Exception { req.beforeLocalExecution(cctx); processQueryRequest(locNodeId, req); return null; } }); } }
/** {@inheritDoc} */ @Override public Collection<UUID> nodeIds() { Collection<UUID> ids = new GridLeanSet<UUID>(); ids.add(cctx.nodeId()); ids.addAll(mappings.keySet()); return ids; }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, String arg) throws GridException { Collection<GridComputeJobAdapter> jobs = new ArrayList<>(jobCnt); for (int i = 0; i < jobCnt; i++) jobs.add(new TestJob()); return jobs; }
/** * @param entry Entry to enlist. * @throws IgniteCheckedException If failed. * @return {@code True} if entry was enlisted. */ private boolean addEntry(IgniteTxEntry entry) throws IgniteCheckedException { checkInternal(entry.txKey()); GridCacheContext cacheCtx = entry.context(); if (!cacheCtx.isNear()) cacheCtx = cacheCtx.dht().near().context(); GridNearCacheEntry cached = cacheCtx.near().peekExx(entry.key()); if (cached == null) { evicted.add(entry.txKey()); return false; } else { cached.unswap(); try { CacheObject val = cached.peek(true, false, false, null); if (val == null && cached.evictInternal(false, xidVer, null)) { evicted.add(entry.txKey()); return false; } else { // Initialize cache entry. entry.cached(cached); writeMap.put(entry.txKey(), entry); addExplicit(entry); return true; } } catch (GridCacheEntryRemovedException ignore) { evicted.add(entry.txKey()); if (log.isDebugEnabled()) log.debug("Got removed entry when adding to remote transaction (will ignore): " + cached); return false; } } }
/** {@inheritDoc} */ @Override public Collection<GridDeployment> getDeployments() { Collection<GridDeployment> deps = new LinkedList<GridDeployment>(); synchronized (mux) { for (List<SharedDeployment> list : cache.values()) for (SharedDeployment d : list) deps.add(d); } return deps; }
/** {@inheritDoc} */ @Override protected Collection<E> pollEvicted0(int cnt) { Collection<E> res = new ArrayList<>(cnt); for (int i = 0; i < cnt; i++) { E evicted = pollEvictedInternal(); if (evicted == null) return res; res.add(evicted); } return res; }
/** {@inheritDoc} */ @Override public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) { Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer1=" + topVer + ", topVer2=" + this.topVer + ", cache=" + cctx.name() + ", node2part=" + node2part + ']'; Collection<ClusterNode> nodes = null; Collection<UUID> nodeIds = part2node.get(p); if (!F.isEmpty(nodeIds)) { Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id())); for (UUID nodeId : nodeIds) { if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) { ClusterNode n = cctx.discovery().node(nodeId); if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) { if (nodes == null) { nodes = new ArrayList<>(affNodes.size() + 2); nodes.addAll(affNodes); } nodes.add(n); } } } } return nodes != null ? nodes : affNodes; } finally { lock.readLock().unlock(); } }
/** {@inheritDoc} */ @Override public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException { ExecutorService exec = new ThreadPoolExecutor( threadsCnt, threadsCnt, 0L, MILLISECONDS, new ArrayBlockingQueue<Runnable>(batchQueueSize), new BlockingRejectedExecutionHandler()); Iterator<I> iter = inputIterator(args); Collection<I> buf = new ArrayList<>(batchSize); try { while (iter.hasNext()) { if (Thread.currentThread().isInterrupted()) { U.warn(log, "Working thread was interrupted while loading data."); break; } buf.add(iter.next()); if (buf.size() == batchSize) { exec.submit(new Worker(c, buf, args)); buf = new ArrayList<>(batchSize); } } if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args)); } catch (RejectedExecutionException ignored) { // Because of custom RejectedExecutionHandler. assert false : "RejectedExecutionException was thrown while it shouldn't."; } finally { exec.shutdown(); try { exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS); } catch (InterruptedException ignored) { U.warn(log, "Working thread was interrupted while waiting for put operations to complete."); Thread.currentThread().interrupt(); } } }
/** {@inheritDoc} */ @Override public Collection<GridDeployment> getDeployments() { Collection<GridDeployment> deps = new ArrayList<GridDeployment>(); synchronized (mux) { for (List<GridDeployment> depList : cache.values()) { for (GridDeployment d : depList) { if (!deps.contains(d)) { deps.add(d); } } } return deps; } }
/** {@inheritDoc} */ @Override public void start0() throws GridException { aff = new GridAffinityAssignmentCache( cctx, cctx.namex(), cctx.config().getAffinity(), cctx.config().getAffinityMapper(), cctx.config().getBackups()); // Generate internal keys for partitions. int partCnt = partitions(); partAffKeys = new GridPartitionLockKey[partCnt]; Collection<Integer> found = new HashSet<>(); long affKey = 0; while (true) { GridPartitionLockKey key = new GridPartitionLockKey(affKey); int part = aff.partition(key); if (found.add(part)) { // This is a key for not yet calculated partition. key.partitionId(part); partAffKeys[part] = key; if (found.size() == partCnt) break; } affKey++; if (affKey > partCnt * MAX_PARTITION_KEY_ATTEMPT_RATIO) throw new IllegalStateException( "Failed to calculate partition affinity keys for given affinity " + "function [attemptCnt=" + affKey + ", found=" + found + ", cacheName=" + cctx.name() + ']'); } }
/** * Processes cache query request. * * @param sndId Sender node id. * @param req Query request. */ @SuppressWarnings("unchecked") @Override void processQueryRequest(UUID sndId, GridCacheQueryRequest req) { if (req.cancel()) { cancelIds.add(new CancelMessageId(req.id(), sndId)); if (req.fields()) removeFieldsQueryResult(sndId, req.id()); else removeQueryResult(sndId, req.id()); } else { if (!cancelIds.contains(new CancelMessageId(req.id(), sndId))) { if (!F.eq(req.cacheName(), cctx.name())) { GridCacheQueryResponse res = new GridCacheQueryResponse( cctx.cacheId(), req.id(), new IgniteCheckedException( "Received request for incorrect cache [expected=" + cctx.name() + ", actual=" + req.cacheName())); sendQueryResponse(sndId, res, 0); } else { threads.put(req.id(), Thread.currentThread()); try { GridCacheQueryInfo info = distributedQueryInfo(sndId, req); if (info == null) return; if (req.fields()) runFieldsQuery(info); else runQuery(info); } catch (Throwable e) { U.error(log(), "Failed to run query.", e); sendQueryResponse( sndId, new GridCacheQueryResponse(cctx.cacheId(), req.id(), e.getCause()), 0); if (e instanceof Error) throw (Error) e; } finally { threads.remove(req.id()); } } } } }
/** {@inheritDoc} */ @Override public Map<GridRichNode, Collection<K>> mapKeysToNodes(Collection<? extends K> keys) { Map<GridRichNode, Collection<K>> map = new HashMap<GridRichNode, Collection<K>>(); for (K key : keys) { Collection<GridRichNode> nodes = ctx.allNodes(key); for (GridRichNode node : nodes) { Collection<K> keyCol = map.get(node); if (keyCol == null) map.put(node, keyCol = new LinkedList<K>()); keyCol.add(key); } } return map; }
/** {@inheritDoc} */ @Override public void stop() { Collection<SharedDeployment> copy = new HashSet<SharedDeployment>(); synchronized (mux) { for (List<SharedDeployment> deps : cache.values()) for (SharedDeployment dep : deps) { // Mark undeployed. dep.undeploy(); copy.add(dep); } cache.clear(); } for (SharedDeployment dep : copy) dep.recordUndeployed(null); if (log.isDebugEnabled()) log.debug(stopInfo()); }
/** * @param ldr Class loader to undeploy. * @param recEvt Whether or not to record the event. */ private void undeploy(ClassLoader ldr, boolean recEvt) { Collection<GridDeployment> doomed = new HashSet<GridDeployment>(); synchronized (mux) { for (Iterator<LinkedList<GridDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { LinkedList<GridDeployment> deps = i1.next(); for (Iterator<GridDeployment> i2 = deps.iterator(); i2.hasNext(); ) { GridDeployment dep = i2.next(); if (dep.classLoader() == ldr) { dep.undeploy(); i2.remove(); doomed.add(dep); if (log.isInfoEnabled()) { log.info("Removed undeployed class: " + dep); } } } if (deps.isEmpty()) { i1.remove(); } } } for (GridDeployment dep : doomed) { if (dep.isObsolete()) { // Resource cleanup. ctx.resource().onUndeployed(dep); } if (recEvt) { recordUndeploy(dep, true); } } }
/** * Decode file charset. * * @param f File to process. * @return File charset. * @throws IOException in case of error. */ public static Charset decode(File f) throws IOException { SortedMap<String, Charset> charsets = Charset.availableCharsets(); String[] firstCharsets = { Charset.defaultCharset().name(), "US-ASCII", "UTF-8", "UTF-16BE", "UTF-16LE" }; Collection<Charset> orderedCharsets = U.newLinkedHashSet(charsets.size()); for (String c : firstCharsets) if (charsets.containsKey(c)) orderedCharsets.add(charsets.get(c)); orderedCharsets.addAll(charsets.values()); try (RandomAccessFile raf = new RandomAccessFile(f, "r")) { FileChannel ch = raf.getChannel(); ByteBuffer buf = ByteBuffer.allocate(4096); ch.read(buf); buf.flip(); for (Charset charset : orderedCharsets) { CharsetDecoder decoder = charset.newDecoder(); decoder.reset(); try { decoder.decode(buf); return charset; } catch (CharacterCodingException ignored) { } } } return Charset.defaultCharset(); }
/** {@inheritDoc} */ @Override public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) { A.notEmpty(entries, "entries"); enterBusy(); try { GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx); activeFuts.add(resFut); resFut.listenAsync(rmvActiveFut); Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16); for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey()); load0(entries, resFut, keys, 0); return resFut; } finally { leaveBusy(); } }
/** {@inheritDoc} */ @Override public void onKernalStart() throws GridException { discoLsnr = new GridLocalEventListener() { @Override public void onEvent(GridEvent evt) { assert evt instanceof GridDiscoveryEvent; assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt; Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); if (log.isDebugEnabled()) log.debug("Processing node departure event: " + evt); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); dep.removeParticipant(discoEvt.eventNodeId()); if (!dep.hasParticipants()) { if (dep.deployMode() == SHARED) { if (!dep.isUndeployed()) { dep.undeploy(); // Undeploy. i2.remove(); assert !dep.isRemoved(); dep.onRemoved(); undeployed.add(dep); if (log.isDebugEnabled()) log.debug( "Undeployed class loader as there are no participating " + "nodes: " + dep); } } else if (log.isDebugEnabled()) log.debug("Preserving deployment without node participants: " + dep); } else if (log.isDebugEnabled()) log.debug("Keeping deployment as it still has participants: " + dep); } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(discoEvt.eventNodeId(), undeployed); } }; ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT); Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); for (UUID nodeId : dep.getParticipantNodeIds()) if (ctx.discovery().node(nodeId) == null) dep.removeParticipant(nodeId); if (!dep.hasParticipants()) { if (dep.deployMode() == SHARED) { if (!dep.isUndeployed()) { dep.undeploy(); // Undeploy. i2.remove(); dep.onRemoved(); undeployed.add(dep); if (log.isDebugEnabled()) log.debug("Undeployed class loader as there are no participating nodes: " + dep); } } else if (log.isDebugEnabled()) log.debug("Preserving deployment without node participants: " + dep); } else if (log.isDebugEnabled()) log.debug("Keeping deployment as it still has participants: " + dep); } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(null, undeployed); if (log.isDebugEnabled()) log.debug("Registered deployment discovery listener: " + discoLsnr); }
/** {@inheritDoc} */ @SuppressWarnings("all") @Override public boolean readFrom(ByteBuffer buf) { commState.setBuffer(buf); if (!super.readFrom(buf)) return false; switch (commState.idx) { case 2: if (commState.readSize == -1) { if (buf.remaining() < 4) return false; commState.readSize = commState.getInt(); } if (commState.readSize >= 0) { if (dataBytes == null) dataBytes = new ArrayList<>(commState.readSize); for (int i = commState.readItems; i < commState.readSize; i++) { byte[] _val = commState.getByteArray(); if (_val == BYTE_ARR_NOT_READ) return false; dataBytes.add((byte[]) _val); commState.readItems++; } } commState.readSize = -1; commState.readItems = 0; commState.idx++; case 3: byte[] errBytes0 = commState.getByteArray(); if (errBytes0 == BYTE_ARR_NOT_READ) return false; errBytes = errBytes0; commState.idx++; case 4: if (buf.remaining() < 1) return false; fields = commState.getBoolean(); commState.idx++; case 5: if (buf.remaining() < 1) return false; finished = commState.getBoolean(); commState.idx++; case 6: if (commState.readSize == -1) { if (buf.remaining() < 4) return false; commState.readSize = commState.getInt(); } if (commState.readSize >= 0) { if (metaDataBytes == null) metaDataBytes = new ArrayList<>(commState.readSize); for (int i = commState.readItems; i < commState.readSize; i++) { byte[] _val = commState.getByteArray(); if (_val == BYTE_ARR_NOT_READ) return false; metaDataBytes.add((byte[]) _val); commState.readItems++; } } commState.readSize = -1; commState.readItems = 0; commState.idx++; case 7: commState.idx++; case 8: if (buf.remaining() < 8) return false; reqId = commState.getLong(); commState.idx++; } return true; }
/** {@inheritDoc} */ @Override void onQueryFutureCanceled(long reqId) { cancelled.add(reqId); }
/** * Grabs local events and detects if events was lost since last poll. * * @param ignite Target grid. * @param evtOrderKey Unique key to take last order key from node local map. * @param evtThrottleCntrKey Unique key to take throttle count from node local map. * @param evtTypes Event types to collect. * @param evtMapper Closure to map grid events to Visor data transfer objects. * @return Collections of node events */ public static Collection<VisorGridEvent> collectEvents( Ignite ignite, String evtOrderKey, String evtThrottleCntrKey, final int[] evtTypes, IgniteClosure<Event, VisorGridEvent> evtMapper) { assert ignite != null; assert evtTypes != null && evtTypes.length > 0; ConcurrentMap<String, Long> nl = ignite.cluster().nodeLocalMap(); final long lastOrder = getOrElse(nl, evtOrderKey, -1L); final long throttle = getOrElse(nl, evtThrottleCntrKey, 0L); // When we first time arrive onto a node to get its local events, // we'll grab only last those events that not older than given period to make sure we are // not grabbing GBs of data accidentally. final long notOlderThan = System.currentTimeMillis() - EVENTS_COLLECT_TIME_WINDOW; // Flag for detecting gaps between events. final AtomicBoolean lastFound = new AtomicBoolean(lastOrder < 0); IgnitePredicate<Event> p = new IgnitePredicate<Event>() { /** */ private static final long serialVersionUID = 0L; @Override public boolean apply(Event e) { // Detects that events were lost. if (!lastFound.get() && (lastOrder == e.localOrder())) lastFound.set(true); // Retains events by lastOrder, period and type. return e.localOrder() > lastOrder && e.timestamp() > notOlderThan && F.contains(evtTypes, e.type()); } }; Collection<Event> evts = ignite.events().localQuery(p); // Update latest order in node local, if not empty. if (!evts.isEmpty()) { Event maxEvt = Collections.max(evts, EVTS_ORDER_COMPARATOR); nl.put(evtOrderKey, maxEvt.localOrder()); } // Update throttle counter. if (!lastFound.get()) nl.put(evtThrottleCntrKey, throttle == 0 ? EVENTS_LOST_THROTTLE : throttle - 1); boolean lost = !lastFound.get() && throttle == 0; Collection<VisorGridEvent> res = new ArrayList<>(evts.size() + (lost ? 1 : 0)); if (lost) res.add(new VisorGridEventsLost(ignite.cluster().localNode().id())); for (Event e : evts) { VisorGridEvent visorEvt = evtMapper.apply(e); if (visorEvt != null) res.add(visorEvt); } return res; }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }