/** {@inheritDoc} */ @Override public CacheStatistics getStatistics() { final CacheStatistics stats = CacheStatistics.get(); for (Entry<String, PVCache> entry : cachedPVs.entrySet()) stats.addPVStats(entry.getKey(), entry.getValue().getStatistics()); return stats; }
/** * Computes the status code for a given set of cache statistics. * * @param statistics Cache statistics. * @return {@link StatusCode#WARN} if eviction count is above threshold or if percent free space * is below threshold, otherwise {@link StatusCode#OK}. */ protected StatusCode status(final CacheStatistics statistics) { final StatusCode code; if (statistics.getEvictions() > this.evictionThreshold) { code = StatusCode.WARN; } else if (statistics.getPercentFree() < this.warnFreeThreshold) { code = StatusCode.WARN; } else { code = StatusCode.OK; } return code; }
@Override public void clear(String namespace, Collection<String> keys) { long startTime = System.currentTimeMillis(); CacheStatistics stats = null; LOG.trace("clear(%s, %s)", namespace, keys); if (cacheStatistics != null) { stats = cacheStatistics.getCacheStatistics(namespace); stats.incrementClears(keys.size()); } provider.clear(namespace, keys, stats); recordElapsedTime( stats, startTime, keys.size(), CacheOperation.CLEAR_KEYS, CacheOperation.CLEAR_OPERATIONS); }
@Override public void set(String namespace, Collection<CacheStore<byte[]>> stores) { long startTime = System.currentTimeMillis(); CacheStatistics stats = null; LOG.trace("set(%s, %s)", namespace, stores); if (cacheStatistics != null) { stats = cacheStatistics.getCacheStatistics(namespace); stats.incrementStores(stores.size()); } provider.set(namespace, stores, stats); recordElapsedTime( stats, startTime, stores.size(), CacheOperation.STORE_KEYS, CacheOperation.STORE_OPERATIONS); }
@Override public Map<String, byte[]> get(String namespace, Collection<String> keys) { long startTime = System.currentTimeMillis(); CacheStatistics stats = null; if (cacheStatistics != null) { stats = cacheStatistics.getCacheStatistics(namespace); stats.incrementFetches(keys.size()); } Map<String, byte[]> result = provider.get(namespace, keys, stats); if (stats != null) { stats.incrementHits(result.size()); } recordElapsedTime( stats, startTime, keys.size(), CacheOperation.FETCH_KEYS, CacheOperation.FETCH_OPERATIONS); LOG.trace("get(%s, %s) hit %d", namespace, keys, result.size()); return result; }
private void recordElapsedTime( CacheStatistics stats, long startTime, int keyCount, CacheOperation keysOperation, CacheOperation callsOperation) { if (stats != null) { long elapsed = System.currentTimeMillis() - startTime; stats.recordElapsedTime(elapsed, keyCount, keysOperation, callsOperation); } }
/** * Builds the description string for the retrieved statistics. * * @param desc the desc * @param statistics the statistics * @return the string */ private static String buildDescription(final String desc, final CacheStatistics... statistics) { if (statistics == null || statistics.length == 0) { return desc; } final StringBuilder sb = new StringBuilder(); if (desc != null) { sb.append(desc); if (!desc.endsWith(".")) { sb.append('.'); } sb.append(' '); } sb.append("Cache statistics: ["); int i = 0; for (final CacheStatistics stats : statistics) { if (i++ > 0) { sb.append('|'); } stats.toString(sb); } sb.append(']'); return sb.toString(); }
protected void setContent(String value) throws SAXException { if (insideCacheConfiguration) { if ("clearOnFlush".equalsIgnoreCase(inside)) cc.setClearOnFlush(toBooleanValue(value, true)); else if ("diskExpiryThreadIntervalSeconds".equalsIgnoreCase(inside)) cc.setDiskExpiryThreadIntervalSeconds(toInt(value, 0)); else if ("diskPersistent".equalsIgnoreCase(inside)) cc.setDiskPersistent(toBooleanValue(value, false)); else if ("diskSpoolBufferSizeMB".equalsIgnoreCase(inside)) cc.setDiskSpoolBufferSize(toInt(value, 0) * 1024L * 1024L); else if ("eternal".equalsIgnoreCase(inside)) cc.setEternal(toBooleanValue(value, false)); else if ("maxElementsInMemory".equalsIgnoreCase(inside)) cc.setMaxElementsInMemory(toInt(value, 0)); else if ("maxElementsOnDisk".equalsIgnoreCase(inside)) cc.setMaxElementsOnDisk(toInt(value, 0)); else if ("name".equalsIgnoreCase(inside)) cc.setName(value); else if ("overflowToDisk".equalsIgnoreCase(inside)) cc.setOverflowToDisk(toBooleanValue(value, true)); else if ("timeToIdleSeconds".equalsIgnoreCase(inside)) cc.setTimeToIdleSeconds(toInt(value, 0)); else if ("timeToLiveSeconds".equalsIgnoreCase(inside)) cc.setTimeToLiveSeconds(toInt(value, 0)); } else if (insideStatistics) { if ("averageGetTime".equalsIgnoreCase(inside)) cs.setAverageGetTime(toDoubleValue(value, 0)); else if ("cacheHits".equalsIgnoreCase(inside)) cs.setCacheHits(toInt(value, 0)); else if ("diskStoreSize".equalsIgnoreCase(inside)) cs.setDiskStoreSize(toInt(value, 0)); else if ("evictionCount".equalsIgnoreCase(inside)) cs.setEvictionCount(toInt(value, 0)); else if ("inMemoryHits".equalsIgnoreCase(inside)) cs.setInMemoryHits(toInt(value, 0)); else if ("memoryStoreSize".equalsIgnoreCase(inside)) cs.setMemoryStoreSize(toInt(value, 0)); else if ("misses".equalsIgnoreCase(inside)) cs.setMisses(toInt(value, 0)); else if ("onDiskHits".equalsIgnoreCase(inside)) cs.setOnDiskHits(toInt(value, 0)); else if ("size".equalsIgnoreCase(inside)) cs.setSize(toInt(value, 0)); else if ("statisticsAccuracy".equalsIgnoreCase(inside)) cs.setStatisticsAccuracy(value); } else { // System.err.println(inside+":"+value); } }
/** * Transfers cache removals or clears. This allows explicit cache cleanup to be propagated to the * shared cache even in the event of rollback - useful if the cause of a problem is the shared * cache value. */ public void afterRollback() { TransactionData txnData = getTransactionData(); try { if (txnData.isClearOn) { // clear shared cache final long startNanos = cacheStatsEnabled ? System.nanoTime() : 0; sharedCache.clear(); final long endNanos = cacheStatsEnabled ? System.nanoTime() : 0; if (cacheStatsEnabled) { TransactionStats stats = txnData.stats; stats.record(startNanos, endNanos, OpType.CLEAR); } if (isDebugEnabled) { logger.debug("Clear notification recieved in rollback - clearing shared cache"); } } else { // transfer any removed items for (Serializable key : txnData.removedItemsCache) { final long startNanos = System.nanoTime(); sharedCache.remove(key); final long endNanos = System.nanoTime(); TransactionStats stats = txnData.stats; stats.record(startNanos, endNanos, OpType.REMOVE); } if (isDebugEnabled) { logger.debug( "Removed " + txnData.removedItemsCache.size() + " values from shared cache in rollback"); } } } catch (Throwable e) { throw new AlfrescoRuntimeException("Failed to transfer updates to shared cache", e); } finally { removeCaches(txnData); // Aggregate this transaction's stats with centralised cache stats. if (cacheStatsEnabled) { cacheStats.add(name, txnData.stats); } } }
/** Merge the transactional caches into the shared cache */ public void afterCommit() { if (isDebugEnabled) { logger.debug("Processing after-commit"); } TransactionData txnData = getTransactionData(); try { if (txnData.isClearOn) { // clear shared cache final long startNanos = cacheStatsEnabled ? System.nanoTime() : 0; sharedCache.clear(); final long endNanos = cacheStatsEnabled ? System.nanoTime() : 0; if (cacheStatsEnabled) { TransactionStats stats = txnData.stats; stats.record(startNanos, endNanos, OpType.CLEAR); } if (isDebugEnabled) { logger.debug("Clear notification recieved in commit - clearing shared cache"); } } else { // transfer any removed items for (Serializable key : txnData.removedItemsCache) { final long startNanos = System.nanoTime(); sharedCache.remove(key); final long endNanos = System.nanoTime(); TransactionStats stats = txnData.stats; stats.record(startNanos, endNanos, OpType.REMOVE); } if (isDebugEnabled) { logger.debug( "Removed " + txnData.removedItemsCache.size() + " values from shared cache in commit"); } } // transfer updates Set<Serializable> keys = (Set<Serializable>) txnData.updatedItemsCache.keySet(); for (Map.Entry<Serializable, CacheBucket<V>> entry : (Set<Map.Entry<Serializable, CacheBucket<V>>>) txnData.updatedItemsCache.entrySet()) { Serializable key = entry.getKey(); CacheBucket<V> bucket = entry.getValue(); try { bucket.doPostCommit( sharedCache, key, this.isMutable, this.allowEqualsChecks, txnData.isReadOnly, txnData.stats); } catch (Exception e) { // MNT-10486: NPE in NodeEntity during post-commit write through to shared cache // This try-catch is diagnostic in nature. We need to know the names of the // caches // and details of the values involved. // The causal exception will be rethrown. throw new AlfrescoRuntimeException( "CacheBucket postCommit transfer to shared cache failed: \n" + " Cache: " + sharedCache + "\n" + " Key: " + key + "\n" + " New Value: " + bucket.getValue() + "\n" + " Cache Value:" + sharedCache.get(key), e); } } if (isDebugEnabled) { logger.debug("Post-commit called for " + keys.size() + " values."); } } catch (Throwable e) { throw new AlfrescoRuntimeException("Failed to transfer updates to shared cache", e); } finally { removeCaches(txnData); // Aggregate this transaction's stats with centralised cache stats. if (cacheStatsEnabled) { cacheStats.add(name, txnData.stats); } } }