public static List createTopologyAwareCacheNodes( int numberOfNodes, CacheMode cacheMode, boolean transactional, boolean indexLocalOnly, boolean isRamDirectoryProvider) { List caches = new ArrayList(); ConfigurationBuilder builder = AbstractCacheTest.getDefaultClusteredCacheConfig(cacheMode, transactional); builder.indexing().enable().indexLocalOnly(indexLocalOnly); if (isRamDirectoryProvider) { builder .indexing() .addProperty("default.directory_provider", "ram") .addProperty("lucene_version", "LUCENE_CURRENT"); } else { builder .indexing() .addProperty( "hibernate.search.default.indexmanager", "org.infinispan.query.indexmanager.InfinispanIndexManager") .addProperty("default.directory_provider", "infinispan") .addProperty("hibernate.search.default.exclusive_index_use", "false") .addProperty("lucene_version", "LUCENE_36"); if (cacheMode.isClustered()) { builder.clustering().stateTransfer().fetchInMemoryState(true); } } for (int i = 0; i < numberOfNodes; i++) { GlobalConfigurationBuilder globalConfigurationBuilder = GlobalConfigurationBuilder.defaultClusteredBuilder(); globalConfigurationBuilder.transport().machineId("a" + i).rackId("b" + i).siteId("test" + i); EmbeddedCacheManager cm1 = TestCacheManagerFactory.createClusteredCacheManager(globalConfigurationBuilder, builder); caches.add(cm1.getCache()); } return caches; }
public static synchronized Configuration getDefaultConfiguration(CacheMode cacheMode) { if (defaults == null) { ConfigurationBuilderHolder holder = load(DEFAULTS); Configuration defaultConfig = holder.getDefaultConfigurationBuilder().build(); Map<CacheMode, Configuration> map = new EnumMap<CacheMode, Configuration>(CacheMode.class); map.put(defaultConfig.clustering().cacheMode(), defaultConfig); for (ConfigurationBuilder builder : holder.getNamedConfigurationBuilders().values()) { Configuration config = builder.build(); map.put(config.clustering().cacheMode(), config); } for (CacheMode mode : CacheMode.values()) { if (!map.containsKey(mode)) { map.put( mode, new ConfigurationBuilder().read(defaultConfig).clustering().cacheMode(mode).build()); } } defaults = map; } return defaults.get(cacheMode); }
@Override public Map<String, Number> call() throws Exception { Map<String, Number> map = new HashMap<>(); Stats stats = remoteCache.getStats(); map.put(AVERAGE_READ_TIME, stats.getAverageReadTime()); map.put(AVERAGE_WRITE_TIME, stats.getAverageWriteTime()); map.put(AVERAGE_REMOVE_TIME, stats.getAverageRemoveTime()); map.put(EVICTIONS, stats.getEvictions()); map.put(HITS, stats.getHits()); map.put(MISSES, stats.getMisses()); final CacheMode cacheMode = getCacheMode(remoteCache); // for replicated caches, we don't need to send the number of entries since it is the same in // all the nodes. if (cacheMode.isDistributed()) { map.put(NUMBER_OF_ENTRIES, stats.getCurrentNumberOfEntries() / numOwners()); } else if (!cacheMode.isReplicated()) { map.put(NUMBER_OF_ENTRIES, stats.getCurrentNumberOfEntries()); } map.put(STORES, stats.getStores()); map.put(REMOVE_HITS, stats.getRemoveHits()); map.put(REMOVE_MISSES, stats.getRemoveMisses()); map.put(TIME_SINCE_START, stats.getTimeSinceStart()); LockManager lockManager = remoteCache.getLockManager(); map.put(NUMBER_OF_LOCKS_HELD, lockManager.getNumberOfLocksHeld()); // number of locks available is not exposed through the LockManager interface map.put(NUMBER_OF_LOCKS_AVAILABLE, 0); // invalidations InvalidationInterceptor invalidationInterceptor = getFirstInterceptorWhichExtends(remoteCache, InvalidationInterceptor.class); if (invalidationInterceptor != null) { map.put(INVALIDATIONS, invalidationInterceptor.getInvalidations()); } else { map.put(INVALIDATIONS, 0); } // passivations PassivationManager pManager = remoteCache.getComponentRegistry().getComponent(PassivationManager.class); if (pManager != null) { map.put(PASSIVATIONS, pManager.getPassivations()); } else { map.put(PASSIVATIONS, 0); } // activations ActivationManager aManager = remoteCache.getComponentRegistry().getComponent(ActivationManager.class); if (pManager != null) { map.put(ACTIVATIONS, aManager.getActivationCount()); } else { map.put(ACTIVATIONS, 0); } // cache loaders ActivationInterceptor aInterceptor = getFirstInterceptorWhichExtends(remoteCache, ActivationInterceptor.class); if (aInterceptor != null) { map.put(CACHE_LOADER_LOADS, aInterceptor.getCacheLoaderLoads()); map.put(CACHE_LOADER_MISSES, aInterceptor.getCacheLoaderMisses()); } else { map.put(CACHE_LOADER_LOADS, 0); map.put(CACHE_LOADER_MISSES, 0); } // cache store CacheWriterInterceptor interceptor = getFirstInterceptorWhichExtends(remoteCache, CacheWriterInterceptor.class); if (interceptor != null) { map.put(CACHE_WRITER_STORES, interceptor.getWritesToTheStores()); } else { map.put(CACHE_WRITER_STORES, 0); } return map; }
/** * Adds the listener using the provided filter converter and class loader. The provided builder is * used to add additional configuration including (clustered, onlyPrimary & identifier) which can * be used after this method is completed to see what values were used in the addition of this * listener * * @param listener * @param filter * @param converter * @param classLoader * @param <C> * @return */ public <C> void addListener( Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, ClassLoader classLoader) { Listener l = testListenerClassValidity(listener.getClass()); UUID generatedId = UUID.randomUUID(); CacheMode cacheMode = config.clustering().cacheMode(); CacheInvocationBuilder builder = new CacheInvocationBuilder(); builder .setIncludeCurrentState(l.includeCurrentState()) .setClustered(l.clustered()) .setOnlyPrimary( l.clustered() ? (cacheMode.isDistributed() ? true : false) : l.primaryOnly()) .setFilter(filter) .setConverter(converter) .setIdentifier(generatedId) .setClassLoader(classLoader); boolean foundMethods = validateAndAddListenerInvocation(listener, builder); if (foundMethods && l.clustered()) { if (cacheMode.isInvalidation()) { throw new UnsupportedOperationException( "Cluster listeners cannot be used with Invalidation Caches!"); } else if (cacheMode.isDistributed()) { clusterListenerIDs.put(listener, generatedId); EmbeddedCacheManager manager = cache.getCacheManager(); Address ourAddress = manager.getAddress(); List<Address> members = manager.getMembers(); // If we are the only member don't even worry about sending listeners if (members != null && members.size() > 1) { DistributedExecutionCompletionService decs = new DistributedExecutionCompletionService(distExecutorService); if (log.isTraceEnabled()) { log.tracef( "Replicating cluster listener to other nodes %s for cluster listener with id %s", members, generatedId); } Callable callable = new ClusterListenerReplicateCallable( generatedId, ourAddress, filter, converter, l.sync()); for (Address member : members) { if (!member.equals(ourAddress)) { decs.submit(member, callable); } } for (int i = 0; i < members.size() - 1; ++i) { try { decs.take().get(); } catch (InterruptedException e) { throw new CacheListenerException(e); } catch (ExecutionException e) { throw new CacheListenerException(e); } } int extraCount = 0; // If anyone else joined since we sent these we have to send the listeners again, since // they may have queried // before the other nodes got the new listener List<Address> membersAfter = manager.getMembers(); for (Address member : membersAfter) { if (!members.contains(member) && !member.equals(ourAddress)) { if (log.isTraceEnabled()) { log.tracef( "Found additional node %s that joined during replication of cluster listener with id %s", member, generatedId); } extraCount++; decs.submit(member, callable); } } for (int i = 0; i < extraCount; ++i) { try { decs.take().get(); } catch (InterruptedException e) { throw new CacheListenerException(e); } catch (ExecutionException e) { throw new CacheListenerException(e); } } } } } // If we have a segment listener handler, it means we have to do initial state QueueingSegmentListener handler = segmentHandler.remove(generatedId); if (handler != null) { if (log.isTraceEnabled()) { log.tracef("Listener %s requests initial state for cache", generatedId); } try (CloseableIterator<CacheEntry<K, C>> iterator = entryRetriever.retrieveEntries( filter == null ? null : new CacheEventFilterAsKeyValueFilter(filter), converter == null ? null : new CacheEventConverterAsConverter(converter), null, handler)) { while (iterator.hasNext()) { CacheEntry<K, C> entry = iterator.next(); // Mark the key as processed and see if we had a concurrent update Object value = handler.markKeyAsProcessing(entry.getKey()); if (value == BaseQueueingSegmentListener.REMOVED) { // Don't process this value if we had a concurrent remove continue; } raiseEventForInitialTransfer(generatedId, entry, builder.isClustered()); handler.notifiedKey(entry.getKey()); } } Set<CacheEntry> entries = handler.findCreatedEntries(); for (CacheEntry entry : entries) { raiseEventForInitialTransfer(generatedId, entry, builder.isClustered()); } if (log.isTraceEnabled()) { log.tracef("Listener %s initial state for cache completed", generatedId); } handler.transferComplete(); } }
/** * Create a Configuration object initialized from the operation ModelNode * * @param cache ModelNode representing cache configuration * @param builder ConfigurationBuilder object to add data to * @return initialised Configuration object */ void processModelNode( ModelNode cache, ConfigurationBuilder builder, List<Dependency<?>> dependencies) { String cacheName = cache.require(ModelKeys.NAME).asString(); builder.classLoader(this.getClass().getClassLoader()); builder .clustering() .cacheMode(CacheMode.valueOf(cache.require(ModelKeys.CACHE_MODE).asString())); if (cache.hasDefined(ModelKeys.INDEXING)) { Indexing indexing = Indexing.valueOf(cache.get(ModelKeys.INDEXING).asString()); builder.indexing().enabled(indexing.isEnabled()).indexLocalOnly(indexing.isLocalOnly()); } if (cache.hasDefined(ModelKeys.QUEUE_SIZE)) { int size = cache.get(ModelKeys.QUEUE_SIZE).asInt(); builder.clustering().async().replQueueMaxElements(size).useReplQueue(size > 0); } if (cache.hasDefined(ModelKeys.QUEUE_FLUSH_INTERVAL)) { builder .clustering() .async() .replQueueInterval(cache.get(ModelKeys.QUEUE_FLUSH_INTERVAL).asLong()); } if (cache.hasDefined(ModelKeys.REMOTE_TIMEOUT)) { builder.clustering().sync().replTimeout(cache.get(ModelKeys.REMOTE_TIMEOUT).asLong()); } if (cache.hasDefined(ModelKeys.OWNERS)) { builder.clustering().hash().numOwners(cache.get(ModelKeys.OWNERS).asInt()); } if (cache.hasDefined(ModelKeys.VIRTUAL_NODES)) { builder.clustering().hash().numVirtualNodes(cache.get(ModelKeys.VIRTUAL_NODES).asInt()); } if (cache.hasDefined(ModelKeys.L1_LIFESPAN)) { long lifespan = cache.get(ModelKeys.L1_LIFESPAN).asLong(); if (lifespan > 0) { builder.clustering().l1().enable().lifespan(lifespan); } else { builder.clustering().l1().disable(); } } // locking is a child resource if (cache.hasDefined(ModelKeys.LOCKING) && cache.get(ModelKeys.LOCKING, ModelKeys.LOCKING_NAME).isDefined()) { ModelNode locking = cache.get(ModelKeys.LOCKING, ModelKeys.LOCKING_NAME); if (locking.hasDefined(ModelKeys.ISOLATION)) { builder .locking() .isolationLevel(IsolationLevel.valueOf(locking.get(ModelKeys.ISOLATION).asString())); } if (locking.hasDefined(ModelKeys.STRIPING)) { builder.locking().useLockStriping(locking.get(ModelKeys.STRIPING).asBoolean()); } if (locking.hasDefined(ModelKeys.ACQUIRE_TIMEOUT)) { builder.locking().lockAcquisitionTimeout(locking.get(ModelKeys.ACQUIRE_TIMEOUT).asLong()); } if (locking.hasDefined(ModelKeys.CONCURRENCY_LEVEL)) { builder.locking().concurrencyLevel(locking.get(ModelKeys.CONCURRENCY_LEVEL).asInt()); } } TransactionMode txMode = TransactionMode.NONE; LockingMode lockingMode = LockingMode.OPTIMISTIC; // locking is a child resource if (cache.hasDefined(ModelKeys.TRANSACTION) && cache.get(ModelKeys.TRANSACTION, ModelKeys.TRANSACTION_NAME).isDefined()) { ModelNode transaction = cache.get(ModelKeys.TRANSACTION, ModelKeys.TRANSACTION_NAME); if (transaction.hasDefined(ModelKeys.STOP_TIMEOUT)) { builder.transaction().cacheStopTimeout(transaction.get(ModelKeys.STOP_TIMEOUT).asLong()); } if (transaction.hasDefined(ModelKeys.MODE)) { txMode = TransactionMode.valueOf(transaction.get(ModelKeys.MODE).asString()); } if (transaction.hasDefined(ModelKeys.LOCKING)) { lockingMode = LockingMode.valueOf(transaction.get(ModelKeys.LOCKING).asString()); } } builder .transaction() .transactionMode(txMode.getMode()) .lockingMode(lockingMode) .useSynchronization(!txMode.isXAEnabled()) .recovery() .enabled(txMode.isRecoveryEnabled()); if (txMode.isRecoveryEnabled()) { builder.transaction().syncCommitPhase(true).syncRollbackPhase(true); } if (cache.hasDefined(ModelKeys.BATCHING)) { InvocationBatchingConfigurationBuilder batchingBuilder = builder .transaction() .transactionMode(org.infinispan.transaction.TransactionMode.TRANSACTIONAL) .invocationBatching(); if (cache.get(ModelKeys.BATCHING).asBoolean()) { batchingBuilder.enable(); } else { batchingBuilder.disable(); } } // eviction is a child resource if (cache.hasDefined(ModelKeys.EVICTION) && cache.get(ModelKeys.EVICTION, ModelKeys.EVICTION_NAME).isDefined()) { ModelNode eviction = cache.get(ModelKeys.EVICTION, ModelKeys.EVICTION_NAME); if (eviction.hasDefined(ModelKeys.STRATEGY)) { builder .eviction() .strategy(EvictionStrategy.valueOf(eviction.get(ModelKeys.STRATEGY).asString())); } if (eviction.hasDefined(ModelKeys.MAX_ENTRIES)) { builder.eviction().maxEntries(eviction.get(ModelKeys.MAX_ENTRIES).asInt()); } } // expiration is a child resource if (cache.hasDefined(ModelKeys.EXPIRATION) && cache.get(ModelKeys.EXPIRATION, ModelKeys.EXPIRATION_NAME).isDefined()) { ModelNode expiration = cache.get(ModelKeys.EXPIRATION, ModelKeys.EXPIRATION_NAME); if (expiration.hasDefined(ModelKeys.MAX_IDLE)) { builder.expiration().maxIdle(expiration.get(ModelKeys.MAX_IDLE).asLong()); } if (expiration.hasDefined(ModelKeys.LIFESPAN)) { builder.expiration().lifespan(expiration.get(ModelKeys.LIFESPAN).asLong()); } if (expiration.hasDefined(ModelKeys.INTERVAL)) { builder.expiration().wakeUpInterval(expiration.get(ModelKeys.INTERVAL).asLong()); } } String storeKey = this.findStoreKey(cache); if (storeKey != null) { ModelNode store = this.getStoreModelNode(cache); builder .loaders() .shared( store.hasDefined(ModelKeys.SHARED) ? store.get(ModelKeys.SHARED).asBoolean() : false) .preload( store.hasDefined(ModelKeys.PRELOAD) ? store.get(ModelKeys.PRELOAD).asBoolean() : false) .passivation( store.hasDefined(ModelKeys.PASSIVATION) ? store.get(ModelKeys.PASSIVATION).asBoolean() : true); LoaderConfigurationBuilder storeBuilder = builder .loaders() .addCacheLoader() .fetchPersistentState( store.hasDefined(ModelKeys.FETCH_STATE) ? store.get(ModelKeys.FETCH_STATE).asBoolean() : true) .purgeOnStartup( store.hasDefined(ModelKeys.PURGE) ? store.get(ModelKeys.PURGE).asBoolean() : true) .purgeSynchronously(true); storeBuilder .singletonStore() .enabled( store.hasDefined(ModelKeys.SINGLETON) ? store.get(ModelKeys.SINGLETON).asBoolean() : false); this.buildCacheStore(storeBuilder, cacheName, store, storeKey, dependencies); } }