/** @param keyMap Key map to register. */ void addKeyMapping(Map<GridRichNode, Collection<K>> keyMap) { for (Map.Entry<GridRichNode, Collection<K>> mapping : keyMap.entrySet()) { GridRichNode n = mapping.getKey(); for (K key : mapping.getValue()) { GridCacheTxEntry<K, V> txEntry = txMap.get(key); assert txEntry != null; GridDistributedTxMapping<K, V> m = mappings.get(n.id()); if (m == null) mappings.put(n.id(), m = new GridDistributedTxMapping<K, V>(n)); txEntry.nodeId(n.id()); m.add(txEntry); } } if (log.isDebugEnabled()) log.debug( "Added mappings to transaction [locId=" + cctx.nodeId() + ", mappings=" + keyMap + ", tx=" + this + ']'); }
/** @param m Mapping. */ @SuppressWarnings({"unchecked"}) private void finish(GridDistributedTxMapping<K, V> m) { GridRichNode n = m.node(); assert !m.empty(); GridNearTxFinishRequest req = new GridNearTxFinishRequest<K, V>( futId, tx.xidVersion(), tx.commitVersion(), tx.threadId(), commit, tx.isInvalidate(), m.explicitLock(), tx.topologyVersion(), null, null, null, commit && tx.pessimistic() ? m.writes() : null, tx.syncCommit() && commit || tx.syncRollback() && !commit); // If this is the primary node for the keys. if (n.isLocal()) { req.miniId(GridUuid.randomUuid()); if (CU.DHT_ENABLED) { GridFuture<GridCacheTx> fut = commit ? dht().commitTx(n.id(), req) : dht().rollbackTx(n.id(), req); // Add new future. add(fut); } else // Add done future for testing. add(new GridFinishedFuture<GridCacheTx>(ctx)); } else { MiniFuture fut = new MiniFuture(m); req.miniId(fut.futureId()); add(fut); // Append new future. try { cctx.io().send(n, req); // If we don't wait for result, then mark future as done. if (!isSync() && !m.explicitLock()) fut.onDone(); } catch (GridTopologyException e) { // Remove previous mapping. mappings.remove(m.node().id()); fut.onResult(e); } catch (GridException e) { // Fail the whole thing. fut.onResult(e); } } }
/** * @param entry Transaction entry. * @param nodes Nodes. */ private void map(GridCacheTxEntry<K, V> entry, Collection<GridRichNode> nodes) { GridRichNode primary = CU.primary0(cctx.affinity(entry.key(), nodes)); GridDistributedTxMapping<K, V> t = mappings.get(primary.id()); if (t == null) mappings.put(primary.id(), t = new GridDistributedTxMapping<K, V>(primary)); t.add(entry); }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** @param p Projection to get metrics for. */ GridProjectionMetricsImpl(GridProjection p) { assert p != null; Collection<GridRichNode> nodes = p.nodes(); int size = nodes.size(); for (GridRichNode node : nodes) { GridNodeMetrics m = node.metrics(); minActJobs = min(minActJobs, m.getCurrentActiveJobs()); maxActJobs = max(maxActJobs, m.getCurrentActiveJobs()); avgActJobs += m.getCurrentActiveJobs(); minCancelJobs = min(minCancelJobs, m.getCurrentCancelledJobs()); maxCancelJobs = max(maxCancelJobs, m.getCurrentCancelledJobs()); avgCancelJobs += m.getCurrentCancelledJobs(); minRejectJobs = min(minRejectJobs, m.getCurrentRejectedJobs()); maxRejectJobs = max(maxRejectJobs, m.getCurrentRejectedJobs()); avgRejectJobs += m.getCurrentRejectedJobs(); minWaitJobs = min(minWaitJobs, m.getCurrentWaitingJobs()); maxWaitJobs = max(maxWaitJobs, m.getCurrentWaitingJobs()); avgWaitJobs += m.getCurrentWaitingJobs(); minJobExecTime = min(minJobExecTime, m.getCurrentJobExecuteTime()); maxJobExecTime = max(maxJobExecTime, m.getCurrentJobExecuteTime()); avgJobExecTime += m.getCurrentJobExecuteTime(); minJobWaitTime = min(minJobWaitTime, m.getCurrentJobWaitTime()); maxJobWaitTime = max(maxJobWaitTime, m.getCurrentJobWaitTime()); avgJobWaitTime += m.getCurrentJobWaitTime(); minDaemonThreadCnt = min(minDaemonThreadCnt, m.getCurrentDaemonThreadCount()); maxDaemonThreadCnt = max(maxDaemonThreadCnt, m.getCurrentDaemonThreadCount()); avgDaemonThreadCnt += m.getCurrentDaemonThreadCount(); minThreadCnt = min(minThreadCnt, m.getCurrentThreadCount()); maxThreadCnt = max(maxThreadCnt, m.getCurrentThreadCount()); avgThreadCnt += m.getCurrentThreadCount(); minIdleTime = min(minIdleTime, m.getCurrentIdleTime()); maxIdleTime = max(maxIdleTime, m.getCurrentIdleTime()); avgIdleTime += m.getCurrentIdleTime(); minBusyTimePerc = min(minBusyTimePerc, m.getBusyTimePercentage()); maxBusyTimePerc = max(maxBusyTimePerc, m.getBusyTimePercentage()); avgBusyTimePerc += m.getBusyTimePercentage(); minCpuLoad = min(minCpuLoad, m.getCurrentCpuLoad()); maxCpuLoad = max(maxCpuLoad, m.getCurrentCpuLoad()); avgCpuLoad += m.getCurrentCpuLoad(); minHeapMemCmt = min(minHeapMemCmt, m.getHeapMemoryCommitted()); maxHeapMemCmt = max(maxHeapMemCmt, m.getHeapMemoryCommitted()); avgHeapMemCmt += m.getHeapMemoryCommitted(); minHeapMemUsed = min(minHeapMemUsed, m.getHeapMemoryUsed()); maxHeapMemUsed = max(maxHeapMemUsed, m.getHeapMemoryUsed()); avgHeapMemUsed += m.getHeapMemoryUsed(); minHeapMemMax = min(minHeapMemMax, m.getHeapMemoryMaximum()); maxHeapMemMax = max(maxHeapMemMax, m.getHeapMemoryMaximum()); avgHeapMemMax += m.getHeapMemoryMaximum(); minHeapMemInit = min(minHeapMemInit, m.getHeapMemoryInitialized()); maxHeapMemInit = max(maxHeapMemInit, m.getHeapMemoryInitialized()); avgHeapMemInit += m.getHeapMemoryInitialized(); minNonHeapMemCmt = min(minNonHeapMemCmt, m.getNonHeapMemoryCommitted()); maxNonHeapMemCmt = max(maxNonHeapMemCmt, m.getNonHeapMemoryCommitted()); avgNonHeapMemCmt += m.getNonHeapMemoryCommitted(); minNonHeapMemUsed = min(minNonHeapMemUsed, m.getNonHeapMemoryUsed()); maxNonHeapMemUsed = max(maxNonHeapMemUsed, m.getNonHeapMemoryUsed()); avgNonHeapMemUsed += m.getNonHeapMemoryUsed(); minNonHeapMemMax = min(minNonHeapMemMax, m.getNonHeapMemoryMaximum()); maxNonHeapMemMax = max(maxNonHeapMemMax, m.getNonHeapMemoryMaximum()); avgNonHeapMemMax += m.getNonHeapMemoryMaximum(); minNonHeapMemInit = min(minNonHeapMemInit, m.getNonHeapMemoryInitialized()); maxNonHeapMemInit = max(maxNonHeapMemInit, m.getNonHeapMemoryInitialized()); avgNonHeapMemInit += m.getNonHeapMemoryInitialized(); minUpTime = min(minUpTime, m.getUpTime()); maxUpTime = max(maxUpTime, m.getUpTime()); avgUpTime += m.getUpTime(); minCpusPerNode = min(minCpusPerNode, m.getTotalCpus()); maxCpusPerNode = max(maxCpusPerNode, m.getTotalCpus()); avgCpusPerNode += m.getTotalCpus(); } avgActJobs /= size; avgCancelJobs /= size; avgRejectJobs /= size; avgWaitJobs /= size; avgJobExecTime /= size; avgJobWaitTime /= size; avgDaemonThreadCnt /= size; avgThreadCnt /= size; avgIdleTime /= size; avgBusyTimePerc /= size; avgCpuLoad /= size; avgHeapMemCmt /= size; avgHeapMemUsed /= size; avgHeapMemMax /= size; avgHeapMemInit /= size; avgNonHeapMemCmt /= size; avgNonHeapMemUsed /= size; avgNonHeapMemMax /= size; avgNonHeapMemInit /= size; avgUpTime /= size; avgCpusPerNode /= size; // // Note that since we are accessing projection // again it can get out of sync with previous // measurements as projection could have been // changed. // // We accept it for simplicity and performance // reasons. Metrics are not guaranteed to be // "transactional". // youngestNodeStartTime = p.youngest().metrics().getNodeStartTime(); oldestNodeStartTime = p.oldest().metrics().getNodeStartTime(); Collection<GridProjection> neighborhood = p.neighborhood(); for (GridProjection neighbors : neighborhood) { minNodesPerHost = min(minNodesPerHost, neighbors.size()); maxNodesPerHost = max(maxNodesPerHost, neighbors.size()); avgNodesPerHost += neighbors.size(); } avgNodesPerHost /= neighborhood.size(); totalCpus = p.cpus(); totalHosts = p.hosts(); totalNodes = p.size(); }