private void initCaches(FilterConfig filterConfig) { ArtifactorySystemProperties properties = ((ArtifactoryHome) filterConfig.getServletContext().getAttribute(ArtifactoryHome.SERVLET_CTX_ATTR)) .getArtifactoryProperties(); ConstantValues idleTimeSecsProp = ConstantValues.securityAuthenticationCacheIdleTimeSecs; long cacheIdleSecs = properties.getLongProperty(idleTimeSecsProp); ConstantValues initSizeProp = ConstantValues.securityAuthenticationCacheInitSize; long initSize = properties.getLongProperty(initSizeProp); nonUiAuthCache = CacheBuilder.newBuilder() .softValues() .initialCapacity((int) initSize) .expireAfterWrite(cacheIdleSecs, TimeUnit.SECONDS) .<AuthCacheKey, Authentication>build() .asMap(); userChangedCache = CacheBuilder.newBuilder() .softValues() .initialCapacity((int) initSize) .expireAfterWrite(cacheIdleSecs, TimeUnit.SECONDS) .<String, AuthenticationCache>build() .asMap(); SecurityService securityService = context.beanForType(SecurityService.class); securityService.addListener(this); }
public StandardTitanTx( StandardTitanGraph graph, TransactionConfig config, BackendTransaction txHandle) { Preconditions.checkNotNull(graph); Preconditions.checkArgument(graph.isOpen()); Preconditions.checkNotNull(config); Preconditions.checkNotNull(txHandle); this.graph = graph; this.config = config; this.idInspector = graph.getIDInspector(); this.txHandle = txHandle; temporaryID = new AtomicLong(-1); Cache<StandardElementQuery, List<Object>> indexCacheBuilder = CacheBuilder.newBuilder() .weigher( new Weigher<StandardElementQuery, List<Object>>() { @Override public int weigh(StandardElementQuery q, List<Object> r) { return 2 + r.size(); } }) .maximumWeight(DEFAULT_CACHE_SIZE) .build(); int concurrencyLevel; if (config.isSingleThreaded()) { vertexCache = new SimpleVertexCache(); addedRelations = new SimpleBufferAddedRelations(); concurrencyLevel = 1; typeCache = new HashMap<String, TitanType>(); newVertexIndexEntries = new SimpleIndexCache(); } else { vertexCache = new ConcurrentVertexCache(); addedRelations = new ConcurrentBufferAddedRelations(); concurrencyLevel = 4; typeCache = new ConcurrentHashMap<String, TitanType>(); newVertexIndexEntries = new ConcurrentIndexCache(); } for (SystemType st : SystemKey.values()) typeCache.put(st.getName(), st); indexCache = CacheBuilder.newBuilder() .weigher( new Weigher<StandardElementQuery, List<Object>>() { @Override public int weigh(StandardElementQuery q, List<Object> r) { return 2 + r.size(); } }) .concurrencyLevel(concurrencyLevel) .maximumWeight(DEFAULT_CACHE_SIZE) .build(); uniqueLocks = UNINITIALIZED_LOCKS; deletedRelations = EMPTY_DELETED_RELATIONS; this.isOpen = true; }
public JWKSetCacheService() { this.validators = CacheBuilder.newBuilder() .expireAfterWrite(1, TimeUnit.HOURS) // expires 1 hour after fetch .maximumSize(100) .build(new JWKSetVerifierFetcher()); this.encrypters = CacheBuilder.newBuilder() .expireAfterWrite(1, TimeUnit.HOURS) // expires 1 hour after fetch .maximumSize(100) .build(new JWKSetEncryptorFetcher()); }
public class LocationGuesserServiceImpl implements LocationGuesserService { private final FloorplanLoader loader_; private final GuesserStrategyFactory strategies_; // Caching floorplans here may not be necessary...or wanted... private final LoadingCache<String, Floorplan> floorplans_ = CacheBuilder.newBuilder() .maximumSize(1000) .build( new CacheLoader<String, Floorplan>() { public Floorplan load(String id) throws Exception { return loader_.load(id); } }); @Inject public LocationGuesserServiceImpl(GuesserStrategyFactory strategies, FloorplanLoader loader) { loader_ = loader; strategies_ = strategies; } @Override public LocationGuesser getGuesser(String floorplanId) { try { final Floorplan fp = floorplans_.get(floorplanId); return new LocationGuesserImpl(strategies_, fp, fp.getRootType()); } catch (Exception e) { throw new RuntimeException(e); } } }
public DaemonicParserState( TypeCoercerFactory typeCoercerFactory, ConstructorArgMarshaller marshaller, int parsingThreads) { this.typeCoercerFactory = typeCoercerFactory; this.marshaller = marshaller; this.allRawNodes = new OptimisticLoadingCache<>(parsingThreads); this.targetsCornucopia = HashMultimap.create(); this.allTargetNodes = new OptimisticLoadingCache<>(parsingThreads); this.hasCachedTargetNodeForBuildTargetPredicate = new Predicate<BuildTarget>() { @Override public boolean apply(BuildTarget buildTarget) { return hasCachedTargetNodeForBuildTarget(buildTarget); } }; this.buildFileTrees = CacheBuilder.newBuilder() .build( new CacheLoader<Cell, BuildFileTree>() { @Override public BuildFileTree load(Cell cell) throws Exception { return new FilesystemBackedBuildFileTree( cell.getFilesystem(), cell.getBuildFileName()); } }); this.buildFileDependents = HashMultimap.create(); this.cachedEnvironment = ImmutableMap.of(); this.cachedIncludes = new ConcurrentHashMap<>(); this.knownCells = Collections.synchronizedSet(new HashSet<Cell>()); this.cachedStateLock = new AutoCloseableReadWriteUpdateLock(); }
public UserInfoFetcher(HttpClient httpClient) { cache = CacheBuilder.newBuilder() .expireAfterWrite(1, TimeUnit.HOURS) // expires 1 hour after fetch .maximumSize(100) .build(new UserInfoLoader(httpClient)); }
public CachedCodeService() { cache = CacheBuilder.newBuilder() .maximumSize(1000) .expireAfterAccess(8, TimeUnit.HOURS) .build(new CodeLoader()); }
private PresentWhenExtensionAnnotationMatchesExtensionSet whenExtensionsAndAliasesInRegionInclude( String region, final Set<Extension> extensions, final Multimap<URI, URI> aliases) { final LoadingCache<String, Set<? extends Extension>> extensionsForRegion = CacheBuilder.newBuilder() .build( CacheLoader.from( Functions.forMap( ImmutableMap.<String, Set<? extends Extension>>of( region, extensions, "differentregion", ImmutableSet.<Extension>of())))); PresentWhenExtensionAnnotationMatchesExtensionSet fn = Guice.createInjector( new AbstractModule() { @Override protected void configure() { MapBinder<URI, URI> aliasBindings = MapBinder.newMapBinder( binder(), URI.class, URI.class, NamespaceAliases.class) .permitDuplicates(); for (URI key : aliases.keySet()) { for (URI value : aliases.get(key)) { aliasBindings.addBinding(key).toInstance(value); } } } @Provides LoadingCache<String, Set<? extends Extension>> getExtensions() { return extensionsForRegion; } }) .getInstance(PresentWhenExtensionAnnotationMatchesExtensionSet.class); return fn; }
public BlockPattern.PatternHelper func_177681_a(World p_177681_1_, BlockPos p_177681_2_) { LoadingCache var3 = CacheBuilder.newBuilder().build(new BlockPattern.CacheLoader(p_177681_1_)); int var4 = Math.max(Math.max(this.field_177686_d, this.field_177688_c), this.field_177687_b); Iterator var5 = BlockPos.func_177980_a(p_177681_2_, p_177681_2_.func_177982_a(var4 - 1, var4 - 1, var4 - 1)) .iterator(); while (var5.hasNext()) { BlockPos var6 = (BlockPos) var5.next(); EnumFacing[] var7 = EnumFacing.values(); int var8 = var7.length; for (int var9 = 0; var9 < var8; ++var9) { EnumFacing var10 = var7[var9]; EnumFacing[] var11 = EnumFacing.values(); int var12 = var11.length; for (int var13 = 0; var13 < var12; ++var13) { EnumFacing var14 = var11[var13]; if (var14 != var10 && var14 != var10.func_176734_d()) { BlockPattern.PatternHelper var15 = this.func_177682_a(var6, var10, var14, var3); if (var15 != null) { return var15; } } } } } return null; }
private ExecutorServiceParallelExecutor( int targetParallelism, Map<PValue, Collection<AppliedPTransform<?, ?, ?>>> valueToConsumers, Set<PValue> keyedPValues, RootProviderRegistry rootProviderRegistry, TransformEvaluatorRegistry registry, @SuppressWarnings("rawtypes") Map<Class<? extends PTransform>, Collection<ModelEnforcementFactory>> transformEnforcements, EvaluationContext context) { this.targetParallelism = targetParallelism; this.executorService = Executors.newFixedThreadPool(targetParallelism); this.valueToConsumers = valueToConsumers; this.keyedPValues = keyedPValues; this.rootProviderRegistry = rootProviderRegistry; this.registry = registry; this.transformEnforcements = transformEnforcements; this.evaluationContext = context; // Weak Values allows TransformExecutorServices that are no longer in use to be reclaimed. // Executing TransformExecutorServices have a strong reference to their TransformExecutorService // which stops the TransformExecutorServices from being prematurely garbage collected executorServices = CacheBuilder.newBuilder().weakValues().build(serialTransformExecutorServiceCacheLoader()); this.allUpdates = new ConcurrentLinkedQueue<>(); this.visibleUpdates = new LinkedBlockingQueue<>(); parallelExecutorService = TransformExecutorServices.parallel(executorService); defaultCompletionCallback = new TimerIterableCompletionCallback(Collections.<TimerData>emptyList()); this.pendingRootBundles = new ConcurrentHashMap<>(); }
@GwtIncompatible("CacheTesting") public void testConcurrencyLevel_small() { LoadingCache<?, ?> cache = CacheBuilder.newBuilder().concurrencyLevel(1).build(identityLoader()); LocalCache<?, ?> map = CacheTesting.toLocalCache(cache); assertEquals(1, map.segments.length); }
@Override public List statisticByTop(final String resourceId, final DateSeries dateSeries, TopFilter top) { Cache<DateSeries, Map<TopFilter, List>> cache = topCache.getIfPresent(resourceId); if (cache == null) { cache = CacheBuilder.newBuilder().maximumSize(1024).expireAfterWrite(1, TimeUnit.DAYS).build(); topCache.put(resourceId, cache); } if (dateSeries != DateSeries.today) { try { Map<TopFilter, List> map = cache.get( dateSeries, new Callable<Map<TopFilter, List>>() { @Override public Map<TopFilter, List> call() throws Exception { Map<TopFilter, List> map = newHashMap(); for (TopFilter t : TopFilter.values()) { map.put(t, statisticTopReport.statisticByTop(resourceId, dateSeries, t)); } return map; } }); return map.get(top); } catch (ExecutionException e) { throw new RuntimeException(e); } } // today data is dynamic,so don't do cache else { return statisticTopReport.statisticByTop(resourceId, dateSeries, top); } }
private static void registerProviderClass(Class<?> cls) { Cache<EventDAO, Object> cache = CACHE_MAP.get(cls); if (cache == null) { cache = CacheBuilder.newBuilder().weakKeys().softValues().build(); CACHE_MAP.putIfAbsent(cls, cache); } }
public RegionReplicaSinkWriter( RegionReplicaOutputSink sink, ClusterConnection connection, ExecutorService pool, int operationTimeout) { this.sink = sink; this.connection = connection; this.operationTimeout = operationTimeout; this.rpcRetryingCallerFactory = RpcRetryingCallerFactory.instantiate(connection.getConfiguration()); this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration()); this.pool = pool; int nonExistentTableCacheExpiryMs = connection .getConfiguration() .getInt( "hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000); // A cache for non existing tables that have a default expiry of 5 sec. This means that if the // table is created again with the same name, we might miss to replicate for that amount of // time. But this cache prevents overloading meta requests for every edit from a deleted file. disabledAndDroppedTables = CacheBuilder.newBuilder() .expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS) .initialCapacity(10) .maximumSize(1000) .build(); }
public RegionReplicaOutputSink( PipelineController controller, TableDescriptors tableDescriptors, EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool, int numWriters, int operationTimeout) { super(controller, entryBuffers, numWriters); this.sinkWriter = new RegionReplicaSinkWriter(this, connection, pool, operationTimeout); this.tableDescriptors = tableDescriptors; // A cache for the table "memstore replication enabled" flag. // It has a default expiry of 5 sec. This means that if the table is altered // with a different flag value, we might miss to replicate for that amount of // time. But this cache avoid the slow lookup and parsing of the TableDescriptor. int memstoreReplicationEnabledCacheExpiryMs = connection .getConfiguration() .getInt( "hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000); this.memstoreReplicationEnabled = CacheBuilder.newBuilder() .expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS) .initialCapacity(10) .maximumSize(1000) .build(); }
@Override public void init(DeviceId deviceId, PipelinerContext context) { log.debug("Initiate OLT pipeline"); this.serviceDirectory = context.directory(); this.deviceId = deviceId; flowRuleService = serviceDirectory.get(FlowRuleService.class); coreService = serviceDirectory.get(CoreService.class); groupService = serviceDirectory.get(GroupService.class); flowObjectiveStore = context.store(); appId = coreService.registerApplication("org.onosproject.driver.OLTPipeline"); pendingGroups = CacheBuilder.newBuilder() .expireAfterWrite(20, TimeUnit.SECONDS) .removalListener( (RemovalNotification<GroupKey, NextObjective> notification) -> { if (notification.getCause() == RemovalCause.EXPIRED) { fail(notification.getValue(), ObjectiveError.GROUPINSTALLATIONFAILED); } }) .build(); groupService.addListener(new InnerGroupListener()); }
private BuildRequest( String commandName, final OptionsProvider options, final OptionsProvider startupOptions, List<String> targets, OutErr outErr, UUID id, long startTimeMillis) { this.commandName = commandName; this.optionsDescription = OptionsUtils.asShellEscapedString(options); this.outErr = outErr; this.targets = targets; this.id = id; this.startTimeMillis = startTimeMillis; this.optionsCache = CacheBuilder.newBuilder() .build( new CacheLoader<Class<? extends OptionsBase>, Optional<OptionsBase>>() { @Override public Optional<OptionsBase> load(Class<? extends OptionsBase> key) throws Exception { OptionsBase result = options.getOptions(key); if (result == null && startupOptions != null) { result = startupOptions.getOptions(key); } return Optional.fromNullable(result); } }); for (Class<? extends OptionsBase> optionsClass : MANDATORY_OPTIONS) { Preconditions.checkNotNull(getOptions(optionsClass)); } }
@GwtIncompatible("removalListener") public void testRemovalNotification_clear() throws InterruptedException { // If a clear() happens while a computation is pending, we should not get a removal // notification. final AtomicBoolean shouldWait = new AtomicBoolean(false); final CountDownLatch computingLatch = new CountDownLatch(1); CacheLoader<String, String> computingFunction = new CacheLoader<String, String>() { @Override public String load(String key) throws InterruptedException { if (shouldWait.get()) { computingLatch.await(); } return key; } }; QueuingRemovalListener<String, String> listener = queuingRemovalListener(); final LoadingCache<String, String> cache = CacheBuilder.newBuilder() .concurrencyLevel(1) .removalListener(listener) .build(computingFunction); // seed the map, so its segment's count > 0 cache.getUnchecked("a"); shouldWait.set(true); final CountDownLatch computationStarted = new CountDownLatch(1); final CountDownLatch computationComplete = new CountDownLatch(1); new Thread( new Runnable() { @Override public void run() { computationStarted.countDown(); cache.getUnchecked("b"); computationComplete.countDown(); } }) .start(); // wait for the computingEntry to be created computationStarted.await(); cache.invalidateAll(); // let the computation proceed computingLatch.countDown(); // don't check cache.size() until we know the get("b") call is complete computationComplete.await(); // At this point, the listener should be holding the seed value (a -> a), and the map should // contain the computed value (b -> b), since the clear() happened before the computation // completed. assertEquals(1, listener.size()); RemovalNotification<String, String> notification = listener.remove(); assertEquals("a", notification.getKey()); assertEquals("a", notification.getValue()); assertEquals(1, cache.size()); assertEquals("b", cache.getUnchecked("b")); }
/** Created by rg on 2015/7/15. */ public class CommonCallableCache { private static final Cache<String, Object> cache = CacheBuilder.newBuilder().maximumSize(1000).build(); public static Object get(String key) { Object value = null; try { value = cache.get( key, new Callable<Object>() { @Override public Object call() throws Exception { return "from cache :" + key; } }); } catch (ExecutionException e) { e.printStackTrace(); } return value; } public static void put(String key, Object value) { try { cache.put(key, value); } catch (Exception e) { e.printStackTrace(); } } }
@GwtIncompatible("CacheTesting") public void testSizingDefaults() { LoadingCache<?, ?> cache = CacheBuilder.newBuilder().build(identityLoader()); LocalCache<?, ?> map = CacheTesting.toLocalCache(cache); assertEquals(4, map.segments.length); // concurrency level assertEquals(4, map.segments[0].table.length()); // capacity / conc level }
public void testTimeToIdleAndToLive() { CacheBuilder.newBuilder() .expireAfterWrite(1, NANOSECONDS) .expireAfterAccess(1, NANOSECONDS) .build(identityLoader()); // well, it didn't blow up. }
private LoadingCache<DatasourceMonth, DataFileWriter<GenericRecord>> createWritersCache() { CacheLoader<DatasourceMonth, DataFileWriter<GenericRecord>> loader = new CacheLoader<DatasourceMonth, DataFileWriter<GenericRecord>>() { @Override public DataFileWriter<GenericRecord> load(DatasourceMonth datasourceMonth) throws Exception { return AvroWriterBolt.this.openHDFSFile(datasourceMonth); } }; // A synchronous removal listener should be enough in principle RemovalListener<DatasourceMonth, DataFileWriter<GenericRecord>> removalListener = new RemovalListener<DatasourceMonth, DataFileWriter<GenericRecord>>() { @Override public void onRemoval( RemovalNotification<DatasourceMonth, DataFileWriter<GenericRecord>> removal) { try { LOGGER.info("Closing file for datasource {}", removal.getKey().datasource()); removal.getValue().close(); } catch (IOException ioe) { LOGGER.error( "Error closing file for datasource {}: {}", removal.getKey().datasource(), ioe.getMessage()); throw new RuntimeException(ioe); } } }; return CacheBuilder.newBuilder() .expireAfterAccess(CACHE_EXPIRATION_TIME, TimeUnit.MINUTES) .removalListener(removalListener) .build(loader); }
public PointSetDatastore( Integer maxCacheSize, String s3CredentialsFilename, Boolean workOffline, String pointsetBucket) { super(); // allow the data store to work offline with cached data and skip S3 connection this.workOffline = workOffline; this.pointsetBucket = pointsetBucket; if (!this.workOffline) { if (s3CredentialsFilename != null) { AWSCredentials creds = new ProfileCredentialsProvider(s3CredentialsFilename, "default").getCredentials(); s3 = new AmazonS3Client(creds); } else { // default credentials providers, e.g. IAM role s3 = new AmazonS3Client(); } } // set up the cache this.pointSets = CacheBuilder.newBuilder() .maximumSize(maxCacheSize) .build(new S3PointSetLoader(workOffline, s3, pointsetBucket)); }
TopLevelItemsCache() { cache = CacheBuilder.newBuilder() .initialCapacity(INITIAL_CAPACITY) .expireAfterAccess(EVICT_IN_SECONDS, TimeUnit.SECONDS) .maximumSize(MAX_ENTRIES) .softValues() .removalListener( new RemovalListener<LazyTopLevelItem.Key, TopLevelItem>() { @Override public void onRemoval( RemovalNotification<LazyTopLevelItem.Key, TopLevelItem> notification) { // System.out.println("*** Removed from cache " + notification.getKey().name ); } }) .build( new CacheLoader<LazyTopLevelItem.Key, TopLevelItem>() { Map<String, Integer> map = new HashMap<String, Integer>(); @Override public TopLevelItem load(LazyTopLevelItem.Key key) throws Exception { TopLevelItem item = (TopLevelItem) key.configFile.read(); item.onLoad(key.parent, key.name); return item; } }); }
public TemplateBuilder createTemplateBuilder() { final Supplier<Set<? extends Image>> images = Suppliers.<Set<? extends Image>>ofInstance( ImmutableSet.of( new ImageBuilder() .providerId("ebs-image-provider") .name("image") .id("us-east-1/bogus-image") .location(jcloudsDomainLocation) .userMetadata(ImmutableMap.of("rootDeviceType", RootDeviceType.EBS.value())) .operatingSystem( new OperatingSystem( OsFamily.UBUNTU, null, "1.0", VirtualizationType.PARAVIRTUAL.value(), "ubuntu", true)) .description("description") .version("1.0") .defaultCredentials(LoginCredentials.builder().user("root").build()) .status(Image.Status.AVAILABLE) .build())); ImmutableMap<RegionAndName, Image> imageMap = (ImmutableMap<RegionAndName, Image>) ImagesToRegionAndIdMap.imagesToMap(images.get()); Supplier<LoadingCache<RegionAndName, ? extends Image>> imageCache = Suppliers.<LoadingCache<RegionAndName, ? extends Image>>ofInstance( CacheBuilder.newBuilder() .<RegionAndName, Image>build(CacheLoader.from(Functions.forMap(imageMap)))); JcloudsStubTemplateBuilder jcloudsStubTemplateBuilder = new JcloudsStubTemplateBuilder(); return jcloudsStubTemplateBuilder.newTemplateBuilder(images, imageCache); }
@Override public <K, V, C extends Configuration<K, V>> Cache<K, V> newCache( final String cacheName, final C configuration) { CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder(); if (configuration instanceof CompleteConfiguration) { configureCacheBuilder((CompleteConfiguration) configuration, cacheBuilder); } return new WrappedCache<K, V>(cacheBuilder.<K, V>build()) { @Override public String getName() { return cacheName; } @Override public CacheManager getCacheManager() { return GuavaCacheManager.this; } @Override public void close() { if (!isClosed()) { super.close(); destroyCache(cacheName); } } @Override public <T extends Configuration<K, V>> T getConfiguration(Class<T> clazz) { return Constants.unwrap(configuration, clazz); } }; }
/** * Constructor for CacheDataSource. * * @param src DataSource */ public CacheDataSource(DataSource src) { source = src; executorService = MoreExecutors.listeningDecorator( Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("AuthMe-CacheLoader") .build())); cachedAuths = CacheBuilder.newBuilder() .refreshAfterWrite(5, TimeUnit.MINUTES) .expireAfterAccess(15, TimeUnit.MINUTES) .build( new CacheLoader<String, Optional<PlayerAuth>>() { @Override public Optional<PlayerAuth> load(String key) { return Optional.fromNullable(source.getAuth(key)); } @Override public ListenableFuture<Optional<PlayerAuth>> reload( final String key, Optional<PlayerAuth> oldValue) { return executorService.submit( new Callable<Optional<PlayerAuth>>() { @Override public Optional<PlayerAuth> call() { ConsoleLogger.debug("REFRESH " + key); return load(key); } }); } }); }
protected CacheBuilder<Object, Object> createCacheBuilder( Duration expireAfterWriteDuration, Duration refreshAfterWriteDuration) { return CacheBuilder.newBuilder() .expireAfterWrite(expireAfterWriteDuration.getValue(), expireAfterWriteDuration.getUnit()) .refreshAfterWrite( refreshAfterWriteDuration.getValue(), refreshAfterWriteDuration.getUnit()); }
/** * Default constructor. Specify the size of the blocks, number of blocks, and the SlabCache this * cache will be assigned to. * * @param blockSize the size of each block, in bytes * @param numBlocks the number of blocks of blockSize this cache will hold. * @param master the SlabCache this SingleSlabCache is assigned to. */ public SingleSizeCache(int blockSize, int numBlocks, SlabItemActionWatcher master) { this.blockSize = blockSize; this.numBlocks = numBlocks; backingStore = new Slab(blockSize, numBlocks); this.stats = new CacheStats(); this.actionWatcher = master; this.size = new AtomicLong(CACHE_FIXED_OVERHEAD + backingStore.heapSize()); this.timeSinceLastAccess = new AtomicLong(); // This evictionListener is called whenever the cache automatically // evicts something. RemovalListener<BlockCacheKey, CacheablePair> listener = new RemovalListener<BlockCacheKey, CacheablePair>() { @Override public void onRemoval(RemovalNotification<BlockCacheKey, CacheablePair> notification) { if (!notification.wasEvicted()) { // Only process removals by eviction, not by replacement or // explicit removal return; } CacheablePair value = notification.getValue(); timeSinceLastAccess.set(System.nanoTime() - value.recentlyAccessed.get()); stats.evict(); doEviction(notification.getKey(), value); } }; backingMap = CacheBuilder.newBuilder() .maximumSize(numBlocks - 1) .removalListener(listener) .<BlockCacheKey, CacheablePair>build() .asMap(); }
/** * Identifies a portlet definition * * @author Eric Dalquist * @version $Revision$ */ class PortletDefinitionIdImpl extends AbstractObjectId implements IPortletDefinitionId { private static final long serialVersionUID = 1L; private static final LoadingCache<Long, IPortletDefinitionId> ID_CACHE = CacheBuilder.newBuilder() .maximumSize(1000) .softValues() .build( new CacheLoader<Long, IPortletDefinitionId>() { @Override public IPortletDefinitionId load(Long key) throws Exception { return new PortletDefinitionIdImpl(key); } }); public static IPortletDefinitionId create(long portletDefinitionId) { return ID_CACHE.getUnchecked(portletDefinitionId); } private final long longId; private PortletDefinitionIdImpl(Long portletDefinitionId) { super(portletDefinitionId.toString()); this.longId = portletDefinitionId; } @Override public long getLongId() { return this.longId; } }