/** @throws Exception If failed. */ public void testCreateFileColocated() throws Exception { GridGgfsPath path = new GridGgfsPath("/colocated"); UUID uuid = UUID.randomUUID(); GridUuid affKey; long idx = 0; while (true) { affKey = new GridUuid(uuid, idx); if (grid(0).mapKeyToNode(DATA_CACHE_NAME, affKey).id().equals(grid(0).localNode().id())) break; idx++; } try (GridGgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) { // Write 5M, should be enough to test distribution. for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]); } GridGgfsFile info = fs.info(path); Collection<GridGgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length()); assertEquals(1, affNodes.size()); Collection<UUID> nodeIds = F.first(affNodes).nodeIds(); assertEquals(1, nodeIds.size()); assertEquals(grid(0).localNode().id(), F.first(nodeIds)); }
/** @throws Exception If failed. */ public void testTopologyListener() throws Exception { final Collection<UUID> added = new ArrayList<>(1); final Collection<UUID> rmvd = new ArrayList<>(1); final CountDownLatch addedLatch = new CountDownLatch(1); final CountDownLatch rmvLatch = new CountDownLatch(1); assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size()); GridClientTopologyListener lsnr = new GridClientTopologyListener() { @Override public void onNodeAdded(GridClientNode node) { added.add(node.nodeId()); addedLatch.countDown(); } @Override public void onNodeRemoved(GridClientNode node) { rmvd.add(node.nodeId()); rmvLatch.countDown(); } }; client.addTopologyListener(lsnr); try { Grid g = startGrid(NODES_CNT + 1); UUID id = g.localNode().id(); assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, added.size()); assertEquals(id, F.first(added)); stopGrid(NODES_CNT + 1); assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS)); assertEquals(1, rmvd.size()); assertEquals(id, F.first(rmvd)); } finally { client.removeTopologyListener(lsnr); stopGrid(NODES_CNT + 1); } }
/** @throws Exception If failed. */ public void testProjectionRun() throws Exception { GridClientCompute dflt = client.compute(); Collection<? extends GridClientNode> nodes = dflt.nodes(); assertEquals(NODES_CNT, nodes.size()); for (int i = 0; i < NODES_CNT; i++) { Grid g = grid(i); assert g != null; GridClientNode clientNode = dflt.node(g.localNode().id()); assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode); GridClientCompute prj = dflt.projection(clientNode); String res = prj.execute(TestTask.class.getName(), null); assertNotNull(res); assertEquals(g.localNode().id().toString(), res); } }
/** * Check whether provided path must be excluded from evictions. * * @param path Path. * @return {@code True} in case non block of related file must be excluded. * @throws GridException In case of faulty patterns. */ public boolean exclude(GridGgfsPath path) throws GridException { assert path != null; Collection<Pattern> excludePatterns0; if (excludeRecompile.compareAndSet(true, false)) { // Recompile. Collection<String> excludePaths0 = excludePaths; if (excludePaths0 != null) { excludePatterns0 = new HashSet<>(excludePaths0.size(), 1.0f); for (String excludePath : excludePaths0) { try { excludePatterns0.add(Pattern.compile(excludePath)); } catch (PatternSyntaxException ignore) { throw new GridException("Invalid regex pattern: " + excludePath); } } excludePatterns = excludePatterns0; } else excludePatterns0 = excludePatterns = null; } else excludePatterns0 = excludePatterns; if (excludePatterns0 != null) { String pathStr = path.toString(); for (Pattern pattern : excludePatterns0) { if (pattern.matcher(pathStr).matches()) return true; } } return false; }
/** {@inheritDoc} */ @Override protected Collection<E> dequeue0(int cnt) { WindowHolder tup = ref.get(); AtomicInteger size = tup.size(); Collection<T> evts = tup.collection(); Collection<E> resCol = new ArrayList<>(cnt); while (true) { int curSize = size.get(); if (curSize > 0) { if (size.compareAndSet(curSize, curSize - 1)) { E res = pollInternal(evts, tup.set()); if (res != null) { resCol.add(res); if (resCol.size() >= cnt) return resCol; } else { size.incrementAndGet(); return resCol; } } } else return resCol; } }
/** * @param cacheCtx Cache context. * @return {@code True} if local node can calculate affinity on it's own for this partition map * exchange. */ private boolean canCalculateAffinity(GridCacheContext cacheCtx) { AffinityFunction affFunc = cacheCtx.config().getAffinity(); // Do not request affinity from remote nodes if affinity function is not centralized. if (!U.hasAnnotation(affFunc, AffinityCentralizedFunction.class)) return true; // If local node did not initiate exchange or local node is the only cache node in grid. Collection<ClusterNode> affNodes = CU.affinityNodes(cacheCtx, exchId.topologyVersion()); return !exchId.nodeId().equals(cctx.localNodeId()) || (affNodes.size() == 1 && affNodes.contains(cctx.localNode())); }
/** * Returns compact class host. * * @param obj Object to compact. * @return String. */ @Nullable public static Object compactObject(Object obj) { if (obj == null) return null; if (obj instanceof Enum) return obj.toString(); if (obj instanceof String || obj instanceof Boolean || obj instanceof Number) return obj; if (obj instanceof Collection) { Collection col = (Collection) obj; Object[] res = new Object[col.size()]; int i = 0; for (Object elm : col) res[i++] = compactObject(elm); return res; } if (obj.getClass().isArray()) { Class<?> arrType = obj.getClass().getComponentType(); if (arrType.isPrimitive()) { if (obj instanceof boolean[]) return Arrays.toString((boolean[]) obj); if (obj instanceof byte[]) return Arrays.toString((byte[]) obj); if (obj instanceof short[]) return Arrays.toString((short[]) obj); if (obj instanceof int[]) return Arrays.toString((int[]) obj); if (obj instanceof long[]) return Arrays.toString((long[]) obj); if (obj instanceof float[]) return Arrays.toString((float[]) obj); if (obj instanceof double[]) return Arrays.toString((double[]) obj); } Object[] arr = (Object[]) obj; int iMax = arr.length - 1; StringBuilder sb = new StringBuilder("["); for (int i = 0; i <= iMax; i++) { sb.append(compactObject(arr[i])); if (i != iMax) sb.append(", "); } sb.append("]"); return sb.toString(); } return U.compact(obj.getClass().getName()); }
/** * Increases priority if job has bumped down. * * @param waitJobs Ordered collection of collision contexts for jobs that are currently waiting * for execution. * @param passiveJobs Reordered collection of collision contexts for waiting jobs. */ private void bumpPriority( Collection<GridCollisionJobContext> waitJobs, List<GridCollisionJobContext> passiveJobs) { assert waitJobs != null; assert passiveJobs != null; assert waitJobs.size() == passiveJobs.size(); for (int i = 0; i < passiveJobs.size(); i++) { GridCollisionJobContext ctx = passiveJobs.get(i); if (i > indexOf(waitJobs, ctx)) ctx.getJobContext().setAttribute(jobAttrKey, getJobPriority(ctx) + starvationInc); } }
/** * @param cctx Registry. * @param keys Keys to lock. * @param tx Transaction. * @param read Read flag. * @param retval Flag to return value or not. * @param timeout Lock acquisition timeout. * @param filter Filter. */ public GridNearLockFuture( GridCacheContext<K, V> cctx, Collection<? extends K> keys, @Nullable GridNearTxLocal<K, V> tx, boolean read, boolean retval, long timeout, GridPredicate<GridCacheEntry<K, V>>[] filter) { super(cctx.kernalContext(), CU.boolReducer()); assert cctx != null; assert keys != null; this.cctx = cctx; this.keys = keys; this.tx = tx; this.read = read; this.retval = retval; this.timeout = timeout; this.filter = filter; threadId = tx == null ? Thread.currentThread().getId() : tx.threadId(); lockVer = tx != null ? tx.xidVersion() : cctx.versions().next(); futId = GridUuid.randomUuid(); entries = new ArrayList<>(keys.size()); log = U.logger(ctx, logRef, GridNearLockFuture.class); if (timeout > 0) { timeoutObj = new LockTimeoutObject(); cctx.time().addTimeoutObject(timeoutObj); } valMap = new ConcurrentHashMap8<>(keys.size(), 1f); }
/** @throws Exception If failed. */ public void testAffinityExecute() throws Exception { GridClientCompute dflt = client.compute(); GridClientData data = client.data(PARTITIONED_CACHE_NAME); Collection<? extends GridClientNode> nodes = dflt.nodes(); assertEquals(NODES_CNT, nodes.size()); for (int i = 0; i < NODES_CNT; i++) { Grid g = grid(i); assert g != null; int affinityKey = -1; for (int key = 0; key < 10000; key++) { if (g.localNode().id().equals(data.affinity(key))) { affinityKey = key; break; } } if (affinityKey == -1) throw new Exception("Unable to found key for which node is primary: " + g.localNode().id()); GridClientNode clientNode = dflt.node(g.localNode().id()); assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode); String res = dflt.affinityExecute(TestTask.class.getName(), PARTITIONED_CACHE_NAME, affinityKey, null); assertNotNull(res); assertEquals(g.localNode().id().toString(), res); } }
/** {@inheritDoc} */ @Override public void onCollision( Collection<GridCollisionJobContext> waitJobs, Collection<GridCollisionJobContext> activeJobs) { assert waitJobs != null; assert activeJobs != null; int activeSize = F.size(activeJobs, RUNNING_JOBS); waitingCnt.set(waitJobs.size()); runningCnt.set(activeSize); heldCnt.set(activeJobs.size() - activeSize); int waitSize = waitJobs.size(); int activateCnt = parallelJobsNum - activeSize; if (activateCnt > 0 && !waitJobs.isEmpty()) { if (waitJobs.size() <= activateCnt) { for (GridCollisionJobContext waitJob : waitJobs) { waitJob.activate(); waitSize--; } } else { List<GridCollisionJobContext> passiveList = new ArrayList<GridCollisionJobContext>(waitJobs); Collections.sort( passiveList, new Comparator<GridCollisionJobContext>() { /** {@inheritDoc} */ @Override public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) { int p1 = getJobPriority(o1); int p2 = getJobPriority(o2); return p1 < p2 ? 1 : p1 == p2 ? 0 : -1; } }); if (preventStarvation) bumpPriority(waitJobs, passiveList); for (int i = 0; i < activateCnt; i++) { passiveList.get(i).activate(); waitSize--; } } } if (waitSize > waitJobsNum) { List<GridCollisionJobContext> waitList = new ArrayList<GridCollisionJobContext>(waitJobs); // Put jobs with highest priority first. Collections.sort( waitList, new Comparator<GridCollisionJobContext>() { /** {@inheritDoc} */ @Override public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) { int p1 = getJobPriority(o1); int p2 = getJobPriority(o2); return p1 < p2 ? 1 : p1 == p2 ? 0 : -1; } }); int skip = waitJobs.size() - waitSize; int i = 0; for (GridCollisionJobContext waitCtx : waitList) { if (++i >= skip) { waitCtx.cancel(); if (--waitSize <= waitJobsNum) break; } } } }
/** @throws Exception If failed. */ public void testEmptyProjections() throws Exception { final GridClientCompute dflt = client.compute(); Collection<? extends GridClientNode> nodes = dflt.nodes(); assertEquals(NODES_CNT, nodes.size()); Iterator<? extends GridClientNode> iter = nodes.iterator(); final GridClientCompute singleNodePrj = dflt.projection(Collections.singletonList(iter.next())); final GridClientNode second = iter.next(); final GridClientPredicate<GridClientNode> noneFilter = new GridClientPredicate<GridClientNode>() { @Override public boolean apply(GridClientNode node) { return false; } }; final GridClientPredicate<GridClientNode> targetFilter = new GridClientPredicate<GridClientNode>() { @Override public boolean apply(GridClientNode node) { return node.nodeId().equals(second.nodeId()); } }; GridTestUtils.assertThrows( log(), new Callable<Object>() { @Override public Object call() throws Exception { return dflt.projection(noneFilter).log(-1, -1); } }, GridServerUnreachableException.class, null); GridTestUtils.assertThrows( log(), new Callable<Object>() { @Override public Object call() throws Exception { return singleNodePrj.projection(second); } }, GridClientException.class, null); GridTestUtils.assertThrows( log(), new Callable<Object>() { @Override public Object call() throws Exception { return singleNodePrj.projection(targetFilter); } }, GridClientException.class, null); }
/** * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as * such approach does not preserve order of lock acquisition. Instead, keys are split in * continuous groups belonging to one primary node and locks for these groups are acquired * sequentially. * * @param keys Keys. */ private void map(Iterable<? extends K> keys) { try { GridDiscoveryTopologySnapshot snapshot = topSnapshot.get(); assert snapshot != null; long topVer = snapshot.topologyVersion(); assert topVer > 0; if (CU.affinityNodes(cctx, topVer).isEmpty()) { onDone( new GridTopologyException( "Failed to map keys for near-only cache (all " + "partition nodes left the grid).")); return; } ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>(); // Assign keys to primary nodes. GridNearLockMapping<K, V> map = null; for (K key : keys) { GridNearLockMapping<K, V> updated = map(key, map, topVer); // If new mapping was created, add to collection. if (updated != map) mappings.add(updated); map = updated; } if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); return; } if (log.isDebugEnabled()) log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']'); // Create mini futures. for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) { GridNearLockMapping<K, V> mapping = iter.next(); GridNode node = mapping.node(); Collection<K> mappedKeys = mapping.mappedKeys(); assert !mappedKeys.isEmpty(); GridNearLockRequest<K, V> req = null; Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size()); boolean explicit = false; for (K key : mappedKeys) { while (true) { GridNearCacheEntry<K, V> entry = null; try { entry = cctx.near().entryExx(key, topVer); if (!cctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); onComplete(false, false); return; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id()); if (isDone()) { if (log.isDebugEnabled()) log.debug( "Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']'); return; } if (cand != null) { if (tx == null && !cand.reentry()) cctx.mvcc().addExplicitLock(threadId, cand, snapshot); GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue(); if (val == null) { GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key); try { if (dhtEntry != null) val = dhtEntry.versionedValue(topVer); } catch (GridCacheEntryRemovedException ignored) { assert dhtEntry.obsolete() : " Got removed exception for non-obsolete entry: " + dhtEntry; if (log.isDebugEnabled()) log.debug( "Got removed exception for DHT entry in map (will ignore): " + dhtEntry); } } GridCacheVersion dhtVer = null; if (val != null) { dhtVer = val.get1(); valMap.put(key, val); } if (!cand.reentry()) { if (req == null) { req = new GridNearLockRequest<>( topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), implicitTx(), implicitSingleTx(), read, isolation(), isInvalidate(), timeout, syncCommit(), syncRollback(), mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() ? tx.groupLockKey() : null, inTx() && tx.partitionLock(), inTx() ? tx.subjectId() : null); mapping.request(req); } distributedKeys.add(key); GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null; if (tx != null) tx.addKeyMapping(key, mapping.node()); req.addKeyBytes( key, node.isLocal() ? null : entry.getOrMarshalKeyBytes(), retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry. writeEntry, inTx() ? tx.entry(key).drVersion() : null, cctx); // Clear transfer required flag since we are sending message. if (writeEntry != null) writeEntry.transferRequired(false); } if (cand.reentry()) explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); } else // Ignore reentries within transactions. explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion()); if (explicit) tx.addKeyMapping(key, mapping.node()); break; } catch (GridCacheEntryRemovedException ignored) { assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry; if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } // Mark mapping explicit lock flag. if (explicit) { boolean marked = tx != null && tx.markExplicit(node.id()); assert tx == null || marked; } } if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys); else { assert mapping.request() == null; iter.remove(); } } cctx.mvcc().recheckPendingLocks(); proceedMapping(mappings); } catch (GridException ex) { onError(ex); } }
/** * Grabs local events and detects if events was lost since last poll. * * @param ignite Target grid. * @param evtOrderKey Unique key to take last order key from node local map. * @param evtThrottleCntrKey Unique key to take throttle count from node local map. * @param evtTypes Event types to collect. * @param evtMapper Closure to map grid events to Visor data transfer objects. * @return Collections of node events */ public static Collection<VisorGridEvent> collectEvents( Ignite ignite, String evtOrderKey, String evtThrottleCntrKey, final int[] evtTypes, IgniteClosure<Event, VisorGridEvent> evtMapper) { assert ignite != null; assert evtTypes != null && evtTypes.length > 0; ConcurrentMap<String, Long> nl = ignite.cluster().nodeLocalMap(); final long lastOrder = getOrElse(nl, evtOrderKey, -1L); final long throttle = getOrElse(nl, evtThrottleCntrKey, 0L); // When we first time arrive onto a node to get its local events, // we'll grab only last those events that not older than given period to make sure we are // not grabbing GBs of data accidentally. final long notOlderThan = System.currentTimeMillis() - EVENTS_COLLECT_TIME_WINDOW; // Flag for detecting gaps between events. final AtomicBoolean lastFound = new AtomicBoolean(lastOrder < 0); IgnitePredicate<Event> p = new IgnitePredicate<Event>() { /** */ private static final long serialVersionUID = 0L; @Override public boolean apply(Event e) { // Detects that events were lost. if (!lastFound.get() && (lastOrder == e.localOrder())) lastFound.set(true); // Retains events by lastOrder, period and type. return e.localOrder() > lastOrder && e.timestamp() > notOlderThan && F.contains(evtTypes, e.type()); } }; Collection<Event> evts = ignite.events().localQuery(p); // Update latest order in node local, if not empty. if (!evts.isEmpty()) { Event maxEvt = Collections.max(evts, EVTS_ORDER_COMPARATOR); nl.put(evtOrderKey, maxEvt.localOrder()); } // Update throttle counter. if (!lastFound.get()) nl.put(evtThrottleCntrKey, throttle == 0 ? EVENTS_LOST_THROTTLE : throttle - 1); boolean lost = !lastFound.get() && throttle == 0; Collection<VisorGridEvent> res = new ArrayList<>(evts.size() + (lost ? 1 : 0)); if (lost) res.add(new VisorGridEventsLost(ignite.cluster().localNode().id())); for (Event e : evts) { VisorGridEvent visorEvt = evtMapper.apply(e); if (visorEvt != null) res.add(visorEvt); } return res; }