/** {@inheritDoc} */ @Override public Map<K, V> peekAll( @Nullable Collection<? extends K> keys, @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys == null || keys.isEmpty()) return emptyMap(); final Collection<K> skipped = new GridLeanSet<K>(); final Map<K, V> map = peekAll0(keys, filter, skipped); if (map.size() + skipped.size() != keys.size()) { map.putAll( dht.peekAll( F.view( keys, new P1<K>() { @Override public boolean apply(K k) { return !map.containsKey(k) && !skipped.contains(k); } }), filter)); } return map; }
/** {@inheritDoc} */ @Override public void explicitUndeploy(UUID nodeId, String rsrcName) { Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); if (dep.hasName(rsrcName)) { if (!dep.isUndeployed()) { dep.undeploy(); dep.onRemoved(); // Undeploy. i2.remove(); undeployed.add(dep); if (log.isInfoEnabled()) log.info("Undeployed per-version class loader: " + dep); } break; } } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(null, undeployed); }
/** {@inheritDoc} */ @Override public Map<?, ?> waitForAttributes(Collection<?> keys, long timeout) throws InterruptedException { A.notNull(keys, "keys"); if (keys.isEmpty()) return Collections.emptyMap(); if (timeout == 0) timeout = Long.MAX_VALUE; long now = System.currentTimeMillis(); // Prevent overflow. long end = now + timeout < 0 ? Long.MAX_VALUE : now + timeout; // Don't wait longer than session timeout. if (end > endTime) end = endTime; synchronized (mux) { while (!closed && !attrs.keySet().containsAll(keys) && now < end) { mux.wait(end - now); now = System.currentTimeMillis(); } if (closed) throw new InterruptedException("Session was closed: " + this); Map<Object, Object> retVal = new HashMap<Object, Object>(keys.size()); for (Object key : keys) retVal.put(key, attrs.get(key)); return retVal; } }
/** {@inheritDoc} */ @Override public Map<K, V> peekAll( @Nullable Collection<? extends K> keys, @Nullable Collection<GridCachePeekMode> modes) throws GridException { if (keys == null || keys.isEmpty()) return emptyMap(); final Collection<K> skipped = new GridLeanSet<K>(); final Map<K, V> map = !modes.contains(PARTITIONED_ONLY) ? peekAll0(keys, modes, ctx.tm().localTxx(), skipped) : new GridLeanMap<K, V>(0); if (map.size() != keys.size() && !modes.contains(NEAR_ONLY)) { map.putAll( dht.peekAll( F.view( keys, new P1<K>() { @Override public boolean apply(K k) { return !map.containsKey(k) && !skipped.contains(k); } }), modes)); } return map; }
/** * The added suite will be always executed locally, but in parallel with other locally or remotely * running tests. This comes handy for tests that cannot be distributed for some environmental * reasons, but still would benefit from parallel execution. * * <p>Note, that local suites will be executed on local node even if grid topology only allows * remote nodes. * * @param localSuite Test to execute locally in parallel with other local or distributed tests. */ @SuppressWarnings({"TypeMayBeWeakened"}) public void addTest(GridJunit3LocalTestSuite localSuite) { if (!locTests.contains(localSuite.getName())) { locTests.add(localSuite.getName()); } addTest((Test) localSuite); }
/** {@inheritDoc} */ @Override public Collection<UUID> nodeIds() { Collection<UUID> ids = new GridLeanSet<UUID>(); ids.add(cctx.nodeId()); ids.addAll(mappings.keySet()); return ids; }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; Collection<GridRichNode> nodes = ctx.remoteNodes(keys); try { // Send request to remove from remote nodes. GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size()); req.version(ver); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekexx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { // Remove candidate from local node first. if (entry.removeLock(cand.version())) { // If there is only local node in this lock's topology, // then there is no reason to distribute the request. if (nodes.isEmpty()) continue; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (nodes.isEmpty()) return; req.completedVersions(ctx.tm().committedVersions(ver), ctx.tm().rolledbackVersions(ver)); if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().safeSend(nodes, req, null); } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** {@inheritDoc} */ @Override public Collection<GridDeployment> getDeployments() { Collection<GridDeployment> deps = new LinkedList<GridDeployment>(); synchronized (mux) { for (List<SharedDeployment> list : cache.values()) for (SharedDeployment d : list) deps.add(d); } return deps; }
/** @param siblings Siblings. */ public void addJobSiblings(Collection<GridJobSibling> siblings) { assert isTaskNode(); synchronized (mux) { Collection<GridJobSibling> tmp = new ArrayList<GridJobSibling>(this.siblings); tmp.addAll(siblings); this.siblings = Collections.unmodifiableCollection(tmp); } }
/** {@inheritDoc} */ @Override public Collection<GridDeployment> getDeployments() { Collection<GridDeployment> deps = new ArrayList<GridDeployment>(); synchronized (mux) { for (List<GridDeployment> depList : cache.values()) { for (GridDeployment d : depList) { if (!deps.contains(d)) { deps.add(d); } } } return deps; } }
/** {@inheritDoc} */ @Override public V peek(K key, @Nullable Collection<GridCachePeekMode> modes) throws GridException { V val = null; if (!modes.contains(PARTITIONED_ONLY)) { try { val = peek0(true, key, modes, ctx.tm().txx()); } catch (GridCacheFilterFailedException ignored) { if (log.isDebugEnabled()) log.debug("Filter validation failed for key: " + key); return null; } } return val == null && !modes.contains(NEAR_ONLY) ? dht.peek(key, modes) : val; }
/** {@inheritDoc} */ @Override public Map<GridRichNode, Collection<K>> mapKeysToNodes(Collection<? extends K> keys) { Map<GridRichNode, Collection<K>> map = new HashMap<GridRichNode, Collection<K>>(); for (K key : keys) { Collection<GridRichNode> nodes = ctx.allNodes(key); for (GridRichNode node : nodes) { Collection<K> keyCol = map.get(node); if (keyCol == null) map.put(node, keyCol = new LinkedList<K>()); keyCol.add(key); } } return map; }
/** {@inheritDoc} */ @Override public void stop() { Collection<SharedDeployment> copy = new HashSet<SharedDeployment>(); synchronized (mux) { for (List<SharedDeployment> deps : cache.values()) for (SharedDeployment dep : deps) { // Mark undeployed. dep.undeploy(); copy.add(dep); } cache.clear(); } for (SharedDeployment dep : copy) dep.recordUndeployed(null); if (log.isDebugEnabled()) log.debug(stopInfo()); }
/** * @param ldr Class loader to undeploy. * @param recEvt Whether or not to record the event. */ private void undeploy(ClassLoader ldr, boolean recEvt) { Collection<GridDeployment> doomed = new HashSet<GridDeployment>(); synchronized (mux) { for (Iterator<LinkedList<GridDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { LinkedList<GridDeployment> deps = i1.next(); for (Iterator<GridDeployment> i2 = deps.iterator(); i2.hasNext(); ) { GridDeployment dep = i2.next(); if (dep.classLoader() == ldr) { dep.undeploy(); i2.remove(); doomed.add(dep); if (log.isInfoEnabled()) { log.info("Removed undeployed class: " + dep); } } } if (deps.isEmpty()) { i1.remove(); } } } for (GridDeployment dep : doomed) { if (dep.isObsolete()) { // Resource cleanup. ctx.resource().onUndeployed(dep); } if (recEvt) { recordUndeploy(dep, true); } } }
/** * Adds a test to be executed on the grid. In case of test suite, all tests inside of test suite * will be executed sequentially on some remote node. * * @param test Test to add. */ @Override public void addTest(Test test) { if (copy == null) { copy = new TestSuite(getName()); } // Add test to the list of local ones. if (test instanceof GridJunit3LocalTestSuite) { String testName = ((TestSuite) test).getName(); if (!locTests.contains(testName)) { locTests.add(testName); } } if (test instanceof GridJunit3TestSuite) { copy.addTest(((GridJunit3TestSuite) test).copy); super.addTest(new GridJunit3TestSuiteProxy(((GridJunit3TestSuite) test).copy, factory)); } else if (test instanceof GridJunit3TestSuiteProxy) { copy.addTest(((GridJunit3TestSuiteProxy) test).getOriginal()); super.addTest(test); } else if (test instanceof GridJunit3TestCaseProxy) { copy.addTest(((GridJunit3TestCaseProxy) test).getGridGainJunit3OriginalTestCase()); super.addTest(test); } else if (test instanceof TestSuite) { copy.addTest(test); super.addTest(new GridJunit3TestSuiteProxy((TestSuite) test, factory)); } else { assert test instanceof TestCase : "Test must be either instance of TestSuite or TestCase: " + test; copy.addTest(test); super.addTest(factory.createProxy((TestCase) test)); } }
/** * @param meta Request metadata. * @return {@code True} if class loader is obsolete. */ private boolean isDeadClassLoader(GridDeploymentMetadata meta) { assert Thread.holdsLock(mux); synchronized (mux) { if (deadClsLdrs.contains(meta.classLoaderId())) { if (log.isDebugEnabled()) log.debug("Ignoring request for obsolete class loader: " + meta); return true; } return false; } }
/** * Increases priority if job has bumped down. * * @param waitJobs Ordered collection of collision contexts for jobs that are currently waiting * for execution. * @param passiveJobs Reordered collection of collision contexts for waiting jobs. */ private void bumpPriority( Collection<GridCollisionJobContext> waitJobs, List<GridCollisionJobContext> passiveJobs) { assert waitJobs != null; assert passiveJobs != null; assert waitJobs.size() == passiveJobs.size(); for (int i = 0; i < passiveJobs.size(); i++) { GridCollisionJobContext ctx = passiveJobs.get(i); if (i > indexOf(waitJobs, ctx)) ctx.getJobContext().setAttribute(jobAttrKey, getJobPriority(ctx) + starvationInc); } }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** {@inheritDoc} */ @Override public void onKernalStart() throws GridException { discoLsnr = new GridLocalEventListener() { @Override public void onEvent(GridEvent evt) { assert evt instanceof GridDiscoveryEvent; assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt; Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); if (log.isDebugEnabled()) log.debug("Processing node departure event: " + evt); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); dep.removeParticipant(discoEvt.eventNodeId()); if (!dep.hasParticipants()) { if (dep.deployMode() == SHARED) { if (!dep.isUndeployed()) { dep.undeploy(); // Undeploy. i2.remove(); assert !dep.isRemoved(); dep.onRemoved(); undeployed.add(dep); if (log.isDebugEnabled()) log.debug( "Undeployed class loader as there are no participating " + "nodes: " + dep); } } else if (log.isDebugEnabled()) log.debug("Preserving deployment without node participants: " + dep); } else if (log.isDebugEnabled()) log.debug("Keeping deployment as it still has participants: " + dep); } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(discoEvt.eventNodeId(), undeployed); } }; ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT); Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); for (UUID nodeId : dep.getParticipantNodeIds()) if (ctx.discovery().node(nodeId) == null) dep.removeParticipant(nodeId); if (!dep.hasParticipants()) { if (dep.deployMode() == SHARED) { if (!dep.isUndeployed()) { dep.undeploy(); // Undeploy. i2.remove(); dep.onRemoved(); undeployed.add(dep); if (log.isDebugEnabled()) log.debug("Undeployed class loader as there are no participating nodes: " + dep); } } else if (log.isDebugEnabled()) log.debug("Preserving deployment without node participants: " + dep); } else if (log.isDebugEnabled()) log.debug("Keeping deployment as it still has participants: " + dep); } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(null, undeployed); if (log.isDebugEnabled()) log.debug("Registered deployment discovery listener: " + discoLsnr); }
/** * See <a href="http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html"> * http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html</a> for more * information. * * @param str Virtual name by which the class is registered as a {@code startupClass} in the * {@code config.xml} file * @param params A hashtable that is made up of the name-value pairs supplied from the {@code * startupArgs} property * @return Result string (log message). * @throws Exception Thrown if error occurred. */ @SuppressWarnings({"unchecked", "CatchGenericClass"}) @Override public String startup(String str, Hashtable params) throws Exception { GridLogger log = new GridJavaLogger(LoggingHelper.getServerLogger()); cfgFile = (String) params.get(cfgFilePathParam); if (cfgFile == null) { throw new IllegalArgumentException("Failed to read property: " + cfgFilePathParam); } String workMgrName = (String) params.get(workMgrParam); URL cfgUrl = U.resolveGridGainUrl(cfgFile); if (cfgUrl == null) throw new ServerLifecycleException( "Failed to find Spring configuration file (path provided should be " + "either absolute, relative to GRIDGAIN_HOME, or relative to META-INF folder): " + cfgFile); GenericApplicationContext springCtx; try { springCtx = new GenericApplicationContext(); XmlBeanDefinitionReader xmlReader = new XmlBeanDefinitionReader(springCtx); xmlReader.loadBeanDefinitions(new UrlResource(cfgUrl)); springCtx.refresh(); } catch (BeansException e) { throw new ServerLifecycleException( "Failed to instantiate Spring XML application context: " + e.getMessage(), e); } Map cfgMap; try { // Note: Spring is not generics-friendly. cfgMap = springCtx.getBeansOfType(GridConfiguration.class); } catch (BeansException e) { throw new ServerLifecycleException( "Failed to instantiate bean [type=" + GridConfiguration.class + ", err=" + e.getMessage() + ']', e); } if (cfgMap == null) throw new ServerLifecycleException( "Failed to find a single grid factory configuration in: " + cfgUrl); if (cfgMap.isEmpty()) throw new ServerLifecycleException("Can't find grid factory configuration in: " + cfgUrl); try { ExecutorService execSvc = null; MBeanServer mbeanSrv = null; for (GridConfiguration cfg : (Collection<GridConfiguration>) cfgMap.values()) { assert cfg != null; GridConfigurationAdapter adapter = new GridConfigurationAdapter(cfg); // Set logger. if (cfg.getGridLogger() == null) adapter.setGridLogger(log); if (cfg.getExecutorService() == null) { if (execSvc == null) execSvc = workMgrName != null ? new GridThreadWorkManagerExecutor(workMgrName) : new GridThreadWorkManagerExecutor(J2EEWorkManager.getDefault()); adapter.setExecutorService(execSvc); } if (cfg.getMBeanServer() == null) { if (mbeanSrv == null) { InitialContext ctx = null; try { ctx = new InitialContext(); mbeanSrv = (MBeanServer) ctx.lookup("java:comp/jmx/runtime"); } catch (Exception e) { throw new IllegalArgumentException( "MBean server was not provided and failed to obtain " + "Weblogic MBean server.", e); } finally { if (ctx != null) ctx.close(); } } adapter.setMBeanServer(mbeanSrv); } Grid grid = G.start(adapter, springCtx); // Test if grid is not null - started properly. if (grid != null) gridNames.add(grid.name()); } return getClass().getSimpleName() + " started successfully."; } catch (GridException e) { // Stop started grids only. for (String name : gridNames) G.stop(name, true); throw new ServerLifecycleException("Failed to start GridGain.", e); } }
/** {@inheritDoc} */ @Override public void onCollision( Collection<GridCollisionJobContext> waitJobs, Collection<GridCollisionJobContext> activeJobs) { assert waitJobs != null; assert activeJobs != null; int activeSize = F.size(activeJobs, RUNNING_JOBS); waitingCnt.set(waitJobs.size()); runningCnt.set(activeSize); heldCnt.set(activeJobs.size() - activeSize); int waitSize = waitJobs.size(); int activateCnt = parallelJobsNum - activeSize; if (activateCnt > 0 && !waitJobs.isEmpty()) { if (waitJobs.size() <= activateCnt) { for (GridCollisionJobContext waitJob : waitJobs) { waitJob.activate(); waitSize--; } } else { List<GridCollisionJobContext> passiveList = new ArrayList<GridCollisionJobContext>(waitJobs); Collections.sort( passiveList, new Comparator<GridCollisionJobContext>() { /** {@inheritDoc} */ @Override public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) { int p1 = getJobPriority(o1); int p2 = getJobPriority(o2); return p1 < p2 ? 1 : p1 == p2 ? 0 : -1; } }); if (preventStarvation) bumpPriority(waitJobs, passiveList); for (int i = 0; i < activateCnt; i++) { passiveList.get(i).activate(); waitSize--; } } } if (waitSize > waitJobsNum) { List<GridCollisionJobContext> waitList = new ArrayList<GridCollisionJobContext>(waitJobs); // Put jobs with highest priority first. Collections.sort( waitList, new Comparator<GridCollisionJobContext>() { /** {@inheritDoc} */ @Override public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) { int p1 = getJobPriority(o1); int p2 = getJobPriority(o2); return p1 < p2 ? 1 : p1 == p2 ? 0 : -1; } }); int skip = waitJobs.size() - waitSize; int i = 0; for (GridCollisionJobContext waitCtx : waitList) { if (++i >= skip) { waitCtx.cancel(); if (--waitSize <= waitJobsNum) break; } } } }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys == null || keys.isEmpty()) return; Collection<? extends GridNode> nodes = ctx.remoteNodes(keys); try { GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size()); for (K key : keys) { GridDistributedCacheEntry<K, V> entry = entryexx(key); if (!ctx.isAll(entry.wrap(false), filter)) continue; // Unlock local lock first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null && !nodes.isEmpty()) { if (!rmv.reentry()) { req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); // We are assuming that lock ID is the same for all keys. req.version(rmv.version()); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else { if (log.isDebugEnabled()) log.debug( "Locally unlocked lock reentry without distributing to other nodes [removed=" + rmv + ", entry=" + entry + ']'); } } else { if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes) [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } } // Don't proceed of no keys to unlock. if (req.keyBytes().isEmpty()) { if (log.isDebugEnabled()) log.debug("No keys to unlock locally (was it reentry unlock?): " + keys); return; } // We don't wait for reply to this message. Receiving side will have // to make sure that unlock requests don't come before lock requests. ctx.io().safeSend(nodes, req, null); } catch (GridException e) { U.error(log, "Failed to unlock keys: " + keys, e); } }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"}) @Override protected GridFuture<Boolean> lockAllAsync( Collection<? extends K> keys, long timeout, GridCacheTxLocalEx<K, V> tx, boolean isInvalidate, boolean isRead, boolean retval, GridCacheTxIsolation isolation, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return new GridFinishedFuture<Boolean>(ctx.kernalContext(), true); Collection<GridRichNode> nodes = ctx.remoteNodes(keys); final GridReplicatedLockFuture<K, V> fut = new GridReplicatedLockFuture<K, V>(ctx, keys, tx, this, nodes, timeout, filter); GridDistributedLockRequest<K, V> req = new GridDistributedLockRequest<K, V>( locNodeId, Thread.currentThread().getId(), fut.futureId(), fut.version(), tx != null, isRead, isolation, isInvalidate, timeout, keys.size()); try { // Must add future before redying locks. if (!ctx.mvcc().addFuture(fut)) throw new IllegalStateException("Duplicate future ID: " + fut); boolean distribute = false; for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = null; try { entry = entryexx(key); if (!ctx.isAll(entry.wrap(false), filter)) { if (log.isDebugEnabled()) log.debug("Entry being locked did not pass filter (will not lock): " + entry); fut.onDone(false); return fut; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = fut.addEntry(entry); if (cand != null) { req.addKeyBytes( key, cand.reentry() ? null : entry.getOrMarshalKeyBytes(), retval, entry.localCandidates(fut.version()), ctx); req.completedVersions( ctx.tm().committedVersions(fut.version()), ctx.tm().rolledbackVersions(fut.version())); distribute = !cand.reentry(); } else if (fut.isDone()) return fut; break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } } // If nothing to distribute at this point, // then all locks are reentries. if (!distribute) fut.complete(true); if (nodes.isEmpty()) fut.readyLocks(); // No reason to send request if all locks are locally re-entered, // or if timeout is negative and local locks could not be acquired. if (fut.isDone()) return fut; try { ctx.io() .safeSend( fut.nodes(), req, new P1<GridNode>() { @Override public boolean apply(GridNode node) { fut.onNodeLeft(node.id()); return !fut.isDone(); } }); } catch (GridException e) { U.error( log, "Failed to send lock request to node [nodes=" + U.toShortString(nodes) + ", req=" + req + ']', e); fut.onError(e); } return fut; } catch (GridException e) { Throwable err = new GridException("Failed to acquire asynchronous lock for keys: " + keys, e); // Clean-up. fut.onError(err); ctx.mvcc().removeFuture(fut); return fut; } }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * Runs all tests belonging to this test suite on the grid. * * @param result Test result collector. */ @Override public void run(TestResult result) { if (isDisabled) { copy.run(result); } else { GridTestRouter router = createRouter(); Grid grid = startGrid(); try { List<GridTaskFuture<?>> futs = new ArrayList<GridTaskFuture<?>>(testCount()); List<GridJunit3SerializableTest> tests = new ArrayList<GridJunit3SerializableTest>(testCount()); for (int i = 0; i < testCount(); i++) { Test junit = testAt(i); GridJunit3SerializableTest test; if (junit instanceof TestSuite) { test = new GridJunit3SerializableTestSuite((TestSuite) junit); } else { assert junit instanceof TestCase : "Test must be either TestSuite or TestCase: " + junit; test = new GridJunit3SerializableTestCase((TestCase) junit); } tests.add(test); if (clsLdr == null) { clsLdr = U.detectClassLoader(junit.getClass()); } futs.add( grid.execute( new GridJunit3Task(junit.getClass(), clsLdr), new GridJunit3Argument(router, test, locTests.contains(test.getName())), timeout)); } for (int i = 0; i < testCount(); i++) { GridTaskFuture<?> fut = futs.get(i); GridJunit3SerializableTest origTest = tests.get(i); try { GridJunit3SerializableTest resTest = (GridJunit3SerializableTest) fut.get(); origTest.setResult(resTest); origTest.getTest().run(result); } catch (GridException e) { handleFail(result, origTest, e); } } } finally { stopGrid(); } } }
/** @param p Projection to get metrics for. */ GridProjectionMetricsImpl(GridProjection p) { assert p != null; Collection<GridRichNode> nodes = p.nodes(); int size = nodes.size(); for (GridRichNode node : nodes) { GridNodeMetrics m = node.metrics(); minActJobs = min(minActJobs, m.getCurrentActiveJobs()); maxActJobs = max(maxActJobs, m.getCurrentActiveJobs()); avgActJobs += m.getCurrentActiveJobs(); minCancelJobs = min(minCancelJobs, m.getCurrentCancelledJobs()); maxCancelJobs = max(maxCancelJobs, m.getCurrentCancelledJobs()); avgCancelJobs += m.getCurrentCancelledJobs(); minRejectJobs = min(minRejectJobs, m.getCurrentRejectedJobs()); maxRejectJobs = max(maxRejectJobs, m.getCurrentRejectedJobs()); avgRejectJobs += m.getCurrentRejectedJobs(); minWaitJobs = min(minWaitJobs, m.getCurrentWaitingJobs()); maxWaitJobs = max(maxWaitJobs, m.getCurrentWaitingJobs()); avgWaitJobs += m.getCurrentWaitingJobs(); minJobExecTime = min(minJobExecTime, m.getCurrentJobExecuteTime()); maxJobExecTime = max(maxJobExecTime, m.getCurrentJobExecuteTime()); avgJobExecTime += m.getCurrentJobExecuteTime(); minJobWaitTime = min(minJobWaitTime, m.getCurrentJobWaitTime()); maxJobWaitTime = max(maxJobWaitTime, m.getCurrentJobWaitTime()); avgJobWaitTime += m.getCurrentJobWaitTime(); minDaemonThreadCnt = min(minDaemonThreadCnt, m.getCurrentDaemonThreadCount()); maxDaemonThreadCnt = max(maxDaemonThreadCnt, m.getCurrentDaemonThreadCount()); avgDaemonThreadCnt += m.getCurrentDaemonThreadCount(); minThreadCnt = min(minThreadCnt, m.getCurrentThreadCount()); maxThreadCnt = max(maxThreadCnt, m.getCurrentThreadCount()); avgThreadCnt += m.getCurrentThreadCount(); minIdleTime = min(minIdleTime, m.getCurrentIdleTime()); maxIdleTime = max(maxIdleTime, m.getCurrentIdleTime()); avgIdleTime += m.getCurrentIdleTime(); minBusyTimePerc = min(minBusyTimePerc, m.getBusyTimePercentage()); maxBusyTimePerc = max(maxBusyTimePerc, m.getBusyTimePercentage()); avgBusyTimePerc += m.getBusyTimePercentage(); minCpuLoad = min(minCpuLoad, m.getCurrentCpuLoad()); maxCpuLoad = max(maxCpuLoad, m.getCurrentCpuLoad()); avgCpuLoad += m.getCurrentCpuLoad(); minHeapMemCmt = min(minHeapMemCmt, m.getHeapMemoryCommitted()); maxHeapMemCmt = max(maxHeapMemCmt, m.getHeapMemoryCommitted()); avgHeapMemCmt += m.getHeapMemoryCommitted(); minHeapMemUsed = min(minHeapMemUsed, m.getHeapMemoryUsed()); maxHeapMemUsed = max(maxHeapMemUsed, m.getHeapMemoryUsed()); avgHeapMemUsed += m.getHeapMemoryUsed(); minHeapMemMax = min(minHeapMemMax, m.getHeapMemoryMaximum()); maxHeapMemMax = max(maxHeapMemMax, m.getHeapMemoryMaximum()); avgHeapMemMax += m.getHeapMemoryMaximum(); minHeapMemInit = min(minHeapMemInit, m.getHeapMemoryInitialized()); maxHeapMemInit = max(maxHeapMemInit, m.getHeapMemoryInitialized()); avgHeapMemInit += m.getHeapMemoryInitialized(); minNonHeapMemCmt = min(minNonHeapMemCmt, m.getNonHeapMemoryCommitted()); maxNonHeapMemCmt = max(maxNonHeapMemCmt, m.getNonHeapMemoryCommitted()); avgNonHeapMemCmt += m.getNonHeapMemoryCommitted(); minNonHeapMemUsed = min(minNonHeapMemUsed, m.getNonHeapMemoryUsed()); maxNonHeapMemUsed = max(maxNonHeapMemUsed, m.getNonHeapMemoryUsed()); avgNonHeapMemUsed += m.getNonHeapMemoryUsed(); minNonHeapMemMax = min(minNonHeapMemMax, m.getNonHeapMemoryMaximum()); maxNonHeapMemMax = max(maxNonHeapMemMax, m.getNonHeapMemoryMaximum()); avgNonHeapMemMax += m.getNonHeapMemoryMaximum(); minNonHeapMemInit = min(minNonHeapMemInit, m.getNonHeapMemoryInitialized()); maxNonHeapMemInit = max(maxNonHeapMemInit, m.getNonHeapMemoryInitialized()); avgNonHeapMemInit += m.getNonHeapMemoryInitialized(); minUpTime = min(minUpTime, m.getUpTime()); maxUpTime = max(maxUpTime, m.getUpTime()); avgUpTime += m.getUpTime(); minCpusPerNode = min(minCpusPerNode, m.getTotalCpus()); maxCpusPerNode = max(maxCpusPerNode, m.getTotalCpus()); avgCpusPerNode += m.getTotalCpus(); } avgActJobs /= size; avgCancelJobs /= size; avgRejectJobs /= size; avgWaitJobs /= size; avgJobExecTime /= size; avgJobWaitTime /= size; avgDaemonThreadCnt /= size; avgThreadCnt /= size; avgIdleTime /= size; avgBusyTimePerc /= size; avgCpuLoad /= size; avgHeapMemCmt /= size; avgHeapMemUsed /= size; avgHeapMemMax /= size; avgHeapMemInit /= size; avgNonHeapMemCmt /= size; avgNonHeapMemUsed /= size; avgNonHeapMemMax /= size; avgNonHeapMemInit /= size; avgUpTime /= size; avgCpusPerNode /= size; // // Note that since we are accessing projection // again it can get out of sync with previous // measurements as projection could have been // changed. // // We accept it for simplicity and performance // reasons. Metrics are not guaranteed to be // "transactional". // youngestNodeStartTime = p.youngest().metrics().getNodeStartTime(); oldestNodeStartTime = p.oldest().metrics().getNodeStartTime(); Collection<GridProjection> neighborhood = p.neighborhood(); for (GridProjection neighbors : neighborhood) { minNodesPerHost = min(minNodesPerHost, neighbors.size()); maxNodesPerHost = max(maxNodesPerHost, neighbors.size()); avgNodesPerHost += neighbors.size(); } avgNodesPerHost /= neighborhood.size(); totalCpus = p.cpus(); totalHosts = p.hosts(); totalNodes = p.size(); }