@Override public Object perform(InvocationContext ctx) throws Throwable { final boolean trace = log.isTraceEnabled(); LogFactory.pushNDC(cacheName, trace); try { switch (type) { case GET_TRANSACTIONS: return stateProvider.getTransactionsForSegments(getOrigin(), topologyId, segments); case START_STATE_TRANSFER: stateProvider.startOutboundTransfer(getOrigin(), topologyId, segments); // return a non-null value to ensure it will reach back to originator wrapped in a // SuccessfulResponse (a null would not be sent back) return true; case CANCEL_STATE_TRANSFER: stateProvider.cancelOutboundTransfer(getOrigin(), topologyId, segments); // originator does not care about the result, so we can return null return null; case GET_CACHE_LISTENERS: return stateProvider.getClusterListenersToInstall(); default: throw new CacheException("Unknown state request command type: " + type); } } finally { LogFactory.popNDC(trace); } }
@Override public void run() { LogFactory.pushNDC(cacheName, trace); try { while (true) { try { Modification take = changesDeque.take(); if (take == QUIT_SIGNAL) { lastAsyncProcessorShutsDownExecutor = true; ensureMoreWorkIsHandled(); return; } else { handleSafely(take); } } catch (InterruptedException e) { log.asyncStoreCoordinatorInterrupted(e); return; } catch (Throwable t) { log.unexpectedErrorInAsyncStoreCoordinator(t); } } } finally { LogFactory.popNDC(trace); } }
public static class EntryCreatedInterceptor extends BaseCustomInterceptor { Log log = LogFactory.getLog(EntryCreatedInterceptor.class); final CountDownLatch latch; volatile boolean assertKeySet; private EntryCreatedInterceptor(CountDownLatch latch) { this.latch = latch; } @Override public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable { // First execute the operation itself Object ret = super.visitPutKeyValueCommand(ctx, command); assertKeySet = (cache.keySet().size() == 1); // After entry has been committed to the container log.info("Cache entry created, now check in different thread"); latch.countDown(); // Force a bit of delay in the listener TestingUtil.sleepThread(3000); return ret; } }
private static class HangingCacheManager extends AbstractDelegatingEmbeddedCacheManager { static Log log = LogFactory.getLog(HangingCacheManager.class); final CountDownLatch latch; public HangingCacheManager(EmbeddedCacheManager delegate, CountDownLatch latch) { super(delegate); this.latch = latch; } @Override public <K, V> Cache<K, V> getCache(String cacheName) { log.info("Retrieve cache from hanging cache manager"); // TODO: Hacky but it's the easiest thing to do - consider ByteMan // ByteMan apparently supports testng since 1.5.1 but no clear // example out there, with more time it should be considered. String threadName = Thread.currentThread().getName(); if (threadName.startsWith("HotRod")) { log.info("Thread is a HotRod server worker thread, so force wait"); try { // Wait a max of 3 minutes, otherwise socket timeout's not working latch.await(180, TimeUnit.SECONDS); log.info("Wait finished, return the cache"); return super.getCache(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new CacheException(e); } } return super.getCache(cacheName); } }
/** * CatchThrowableProxy is a wrapper around interface that does not allow any exception to be * thrown when invoking methods on that interface. All exceptions are logged but not propagated to * the caller. */ static class CatchThrowableProxy implements java.lang.reflect.InvocationHandler { private static final Log log = LogFactory.getLog(CatchThrowableProxy.class); private Object obj; public static Object newInstance(Object obj) { return java.lang.reflect.Proxy.newProxyInstance( obj.getClass().getClassLoader(), obj.getClass().getInterfaces(), new CatchThrowableProxy(obj)); } private CatchThrowableProxy(Object obj) { this.obj = obj; } public Object invoke(Object proxy, Method m, Object[] args) throws Throwable { Object result = null; try { result = m.invoke(obj, args); } catch (Throwable t) { log.ignoringException(m.getName(), t.getMessage(), t.getCause()); } finally { } return result; } }
public static class CustomPojo implements Serializable { static final Log log = LogFactory.getLog(CustomPojo.class); private String name; public CustomPojo(String name) { this.name = name; } @Override public boolean equals(Object obj) { if (obj == null) { log.debug("null -> false"); return false; } log.debug(obj.getClass()); if (getClass() != obj.getClass()) { log.debug("class not same -> false"); return false; } final CustomPojo other = (CustomPojo) obj; return this.name.equals(other.name); } @Override public int hashCode() { return name.hashCode(); } }
/** * // TODO: Document this * * @author Pedro Ruivo * @since 4.0 */ public class TotalOrderVersionedCommitContextEntries extends NonVersionedCommitContextEntries { private final Log log = LogFactory.getLog(TotalOrderVersionedCommitContextEntries.class); private VersionGenerator versionGenerator; @Inject public final void injectVersionGenerator(VersionGenerator versionGenerator) { this.versionGenerator = versionGenerator; } @Override protected Log getLog() { return log; } @Override protected void commitContextEntry( CacheEntry entry, InvocationContext ctx, boolean skipOwnershipCheck) { if (ctx.isInTxScope()) { ClusteredRepeatableReadEntry clusterMvccEntry = (ClusteredRepeatableReadEntry) entry; EntryVersion existingVersion = clusterMvccEntry.getVersion(); EntryVersion newVersion; if (existingVersion == null) { newVersion = versionGenerator.generateNew(); } else { newVersion = versionGenerator.increment((IncrementableEntryVersion) existingVersion); } commitEntry(entry, newVersion, skipOwnershipCheck); } else { // This could be a state transfer call! commitEntry(entry, entry.getVersion(), skipOwnershipCheck); } } }
/** * {@link CacheResult} interceptor implementation. This interceptor uses the following algorithm * describes in JSR-107. * * <p>When a method annotated with {@link CacheResult} is invoked the following must occur. * * <ol> * <li>Generate a key based on InvocationContext using the specified {@linkplain * CacheKeyGenerator}. * <li>Use this key to look up the entry in the cache. * <li>If an entry is found return it as the result and do not call the annotated method. * <li>If no entry is found invoke the method. * <li>Use the result to populate the cache with this key/result pair. * </ol> * * There is a skipGet attribute which if set to true will cause the method body to always be invoked * and the return value put into the cache. The cache is not checked for the key before method body * invocation, skipping steps 2 and 3 from the list above. This can be used for annotating methods * that do a cache.put() with no other consequences. * * @author Kevin Pollet <*****@*****.**> (C) 2011 SERLI */ @Interceptor public class CacheResultInterceptor implements Serializable { private static final long serialVersionUID = 5275055951121834315L; private static final Log log = LogFactory.getLog(CacheResultInterceptor.class, Log.class); private final CacheResolver cacheResolver; private final CacheKeyInvocationContextFactory contextFactory; @Inject public CacheResultInterceptor( CacheResolver cacheResolver, CacheKeyInvocationContextFactory contextFactory) { this.cacheResolver = cacheResolver; this.contextFactory = contextFactory; } @AroundInvoke public Object cacheResult(InvocationContext invocationContext) throws Exception { if (log.isTraceEnabled()) { log.tracef("Interception of method named '%s'", invocationContext.getMethod().getName()); } final CacheKeyInvocationContext<CacheResult> cacheKeyInvocationContext = contextFactory.getCacheKeyInvocationContext(invocationContext); final CacheKeyGenerator cacheKeyGenerator = cacheKeyInvocationContext .unwrap(CacheKeyInvocationContextImpl.class) .getCacheKeyGenerator(); final CacheResult cacheResult = cacheKeyInvocationContext.getCacheAnnotation(); final CacheKey cacheKey = cacheKeyGenerator.generateCacheKey(cacheKeyInvocationContext); final Cache<CacheKey, Object> cache = cacheResolver.resolveCache(cacheKeyInvocationContext); Object result = null; if (!cacheResult.skipGet()) { result = cache.get(cacheKey); if (log.isTraceEnabled()) { log.tracef( "Entry with value '%s' has been found in cache '%s' with key '%s'", result, cache.getName(), cacheKey); } } if (result == null) { result = invocationContext.proceed(); if (result != null) { cache.put(cacheKey, result); if (log.isTraceEnabled()) { log.tracef( "Value '%s' cached in cache '%s' with key '%s'", result, cache.getName(), cacheKey); } } } return result; } }
/** * @author [email protected] * @since 4.0 */ public class EvictCommand extends RemoveCommand implements LocalCommand { private static final Log log = LogFactory.getLog(EvictCommand.class); public EvictCommand( Object key, CacheNotifier notifier, Set<Flag> flags, CommandInvocationId commandInvocationId) { super(key, null, notifier, flags, null, commandInvocationId); } @Override public Object acceptVisitor(InvocationContext ctx, Visitor visitor) throws Throwable { return visitor.visitEvictCommand(ctx, this); } @Override public Object perform(InvocationContext ctx) throws Throwable { if (key == null) { throw new NullPointerException("Key is null!!"); } super.perform(ctx); return null; } @Override public void notify( InvocationContext ctx, Object value, Metadata previousMetadata, boolean isPre) { // Eviction has no notion of pre/post event since 4.2.0.ALPHA4. // EvictionManagerImpl.onEntryEviction() triggers both pre and post events // with non-null values, so we should do the same here as an ugly workaround. if (!isPre) { if (log.isTraceEnabled()) log.tracef("Notify eviction listeners for key=%", key); notifier.notifyCacheEntryEvicted(key, value, ctx, this); } } @Override public byte getCommandId() { return -1; // these are not meant for replication! } @Override public String toString() { return new StringBuilder() .append("EvictCommand{key=") .append(key) .append(", value=") .append(value) .append(", flags=") .append(flags) .append("}") .toString(); } }
/** * Interceptor that allows for waiting for a command to be invoked, blocking that command and * subsequently allowing that command to be released. * * @author William Burns * @since 6.0 */ public class BlockingInterceptor extends CommandInterceptor { private static final Log log = LogFactory.getLog(BlockingInterceptor.class); private final CyclicBarrier barrier; private final Class<? extends VisitableCommand> commandClass; private final boolean blockAfter; private final boolean originLocalOnly; private final AtomicBoolean suspended = new AtomicBoolean(); public BlockingInterceptor( CyclicBarrier barrier, Class<? extends VisitableCommand> commandClass, boolean blockAfter, boolean originLocalOnly) { this.barrier = barrier; this.commandClass = commandClass; this.blockAfter = blockAfter; this.originLocalOnly = originLocalOnly; } public void suspend(boolean s) { this.suspended.set(s); } private void blockIfNeeded(InvocationContext ctx, VisitableCommand command) throws BrokenBarrierException, InterruptedException { if (suspended.get()) { log.tracef("Suspended, not blocking command %s", command); return; } if (commandClass.equals(command.getClass()) && (!originLocalOnly || ctx.isOriginLocal())) { log.tracef("Command blocking %s completion of %s", blockAfter ? "after" : "before", command); // The first arrive and await is to sync with main thread barrier.await(); // Now we actually block until main thread lets us go barrier.await(); log.tracef("Command completed blocking completion of %s", command); } else { log.trace("Command arrived but already found a blocker"); } } @Override protected Object handleDefault(InvocationContext ctx, VisitableCommand command) throws Throwable { try { if (!blockAfter) { blockIfNeeded(ctx, command); } return super.handleDefault(ctx, command); } finally { if (blockAfter) { blockIfNeeded(ctx, command); } } } }
private static int detectVersion() { Log log = LogFactory.getLog(LuceneVersionDetector.class, Log.class); int version = 3; try { Class.forName( "org.apache.lucene.store.IOContext", true, LuceneVersionDetector.class.getClassLoader()); version = 4; } catch (ClassNotFoundException e) { } log.detectedLuceneVersion(version); return version; }
/** * A per-entry lock container for ReentrantLocks * * @author Manik Surtani * @since 4.0 */ public class ReentrantPerEntryLockContainer extends AbstractPerEntryLockContainer<VisibleOwnerRefCountingReentrantLock> { private static final Log log = LogFactory.getLog(ReentrantPerEntryLockContainer.class); @Override protected Log getLog() { return log; } public ReentrantPerEntryLockContainer(int concurrencyLevel, Equivalence<Object> keyEquivalence) { super(concurrencyLevel, keyEquivalence); } @Override protected VisibleOwnerRefCountingReentrantLock newLock() { return new VisibleOwnerRefCountingReentrantLock(); } @Override public boolean ownsLock(Object key, Object ignored) { ReentrantLock l = getLockFromMap(key); return l != null && l.isHeldByCurrentThread(); } @Override public boolean isLocked(Object key) { ReentrantLock l = getLockFromMap(key); return l != null && l.isLocked(); } private ReentrantLock getLockFromMap(Object key) { return locks.get(key); } @Override protected void unlock(VisibleOwnerRefCountingReentrantLock l, Object unused) { l.unlock(); } @Override protected boolean tryLock( VisibleOwnerRefCountingReentrantLock lock, long timeout, TimeUnit unit, Object unused) throws InterruptedException { return lock.tryLock(timeout, unit); } @Override protected void lock(VisibleOwnerRefCountingReentrantLock lock, Object lockOwner) { lock.lock(); } }
/** * LuceneUserThread: base class to perform activities on the index, as searching, adding to index * and deleting. * * @author Sanne Grinovero * @since 4.0 */ public abstract class LuceneUserThread implements Runnable { private static final Log log = LogFactory.getLog(LuceneUserThread.class); protected final Directory directory; protected final SharedState state; LuceneUserThread(Directory dir, SharedState state) { this.directory = dir; this.state = state; } @Override public final void run() { try { state.waitForStart(); } catch (InterruptedException e1) { state.errorManage(e1); return; } try { beforeLoop(); } catch (IOException e) { log.error("unexpected error", e); state.errorManage(e); } while (!state.needToQuit()) { try { testLoop(); } catch (Exception e) { log.error("unexpected error", e); state.errorManage(e); } } try { cleanup(); } catch (IOException e) { log.error("unexpected error", e); state.errorManage(e); } } protected void beforeLoop() throws IOException { // defaults to no operation } protected abstract void testLoop() throws IOException; protected void cleanup() throws IOException { // defaults to no operation } }
@Override public Object perform(InvocationContext ctx) throws Throwable { final boolean trace = log.isTraceEnabled(); LogFactory.pushNDC(cacheName, trace); stateTransferManager.waitForJoinToStart(); try { switch (type) { case APPLY_STATE: stateTransferManager.applyState(state, sender, viewId); return null; case APPLY_LOCKS: stateTransferManager.applyLocks(locks, sender, viewId); return null; default: throw new CacheException("Unknown rehash control command type " + type); } } catch (Throwable t) { log.exceptionHandlingCommand(this, t); return null; } finally { LogFactory.popNDC(trace); } }
public void run() { LogFactory.pushNDC(cacheName, trace); try { clearAllReadLock.lock(); try { innerRun(); } catch (Throwable t) { runAgainAfterWaiting = false; log.unexpectedErrorInAsyncProcessor(t); } finally { clearAllReadLock.unlock(); } if (runAgainAfterWaiting) { try { Thread.sleep(10); } catch (InterruptedException e) { // just speedup ignoring more sleep but still make sure to store all data } ensureMoreWorkIsHandled(); } } finally { LogFactory.popNDC(trace); } }
public final class DistributedTaskLifecycleService { private static final Log log = LogFactory.getLog(DistributedTaskLifecycleService.class); private static DistributedTaskLifecycleService service; private ServiceLoader<DistributedTaskLifecycle> loader; private DistributedTaskLifecycleService() { loader = ServiceLoader.load(DistributedTaskLifecycle.class); } public static synchronized DistributedTaskLifecycleService getInstance() { if (service == null) { service = new DistributedTaskLifecycleService(); } return service; } public <T> void onPreExecute(Callable<T> task) { try { Iterator<DistributedTaskLifecycle> i = loader.iterator(); while (i.hasNext()) { DistributedTaskLifecycle cl = i.next(); cl.onPreExecute(task); } } catch (ServiceConfigurationError serviceError) { log.errorReadingProperties( new IOException( "Could not properly load and instantiate DistributedTaskLifecycle service ", serviceError)); } } public <T> void onPostExecute(Callable<T> task) { try { Iterator<DistributedTaskLifecycle> i = loader.iterator(); while (i.hasNext()) { DistributedTaskLifecycle cl = i.next(); cl.onPostExecute(task); } } catch (ServiceConfigurationError serviceError) { log.errorReadingProperties( new IOException( "Could not properly load and instantiate DistributedTaskLifecycle service ", serviceError)); } } }
/** * DistributedLazyIterator. * * <p>Lazily iterates on a distributed query * * @author Israel Lacerra <*****@*****.**> * @since 5.1 */ public class DistributedLazyIterator<E> extends DistributedIterator<E> { private final UUID queryId; private final ExecutorService asyncExecutor; private final ClusteredQueryInvoker invoker; private static final Log log = LogFactory.getLog(DistributedLazyIterator.class); public DistributedLazyIterator( Sort sort, int fetchSize, int resultSize, int maxResults, int firstResult, UUID id, HashMap<UUID, ClusteredTopDocs> topDocsResponses, ExecutorService asyncExecutor, AdvancedCache<?, ?> cache) { super(sort, fetchSize, resultSize, maxResults, firstResult, topDocsResponses, cache); this.queryId = id; this.asyncExecutor = asyncExecutor; this.invoker = new ClusteredQueryInvoker(cache, asyncExecutor); } @Override public void close() { ClusteredQueryCommand killQuery = ClusteredQueryCommand.destroyLazyQuery(cache, queryId); ClusteredQueryInvoker invoker = new ClusteredQueryInvoker(cache, asyncExecutor); try { invoker.broadcast(killQuery); } catch (Exception e) { log.error("Could not close the distributed iterator", e); } } @Override protected E fetchValue(int scoreIndex, ClusteredTopDocs topDoc) { Object value = null; try { value = invoker.getValue(scoreIndex, topDoc.getNodeAddress(), queryId); } catch (Exception e) { log.error("Error while trying to remoting fetch next value: " + e.getMessage()); } return (E) value; } }
public static class PojoValue implements Externalizable { static AtomicBoolean holdUp = new AtomicBoolean(); Log log = LogFactory.getLog(PojoValue.class); volatile int value; public PojoValue() {} public PojoValue(int value) { this.value = value; } @Override public void writeExternal(ObjectOutput out) throws IOException { String threadName = Thread.currentThread().getName(); if (!holdUp.get()) { log.debug("In streaming..."); holdUp.compareAndSet(false, true); log.debug("Holding up..."); TestingUtil.sleepThread(1000); // Sleep for 2 seconds to hold up state transfer } out.writeInt(value); } @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { value = in.readInt(); } @Override public int hashCode() { return value + 31; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PojoValue pojo = (PojoValue) o; if (value != pojo.value) return false; return true; } }
/** * Rpc to obtain all in-doubt prepared transactions stored on remote nodes. A transaction is in * doubt if it is prepared and the node where it started has crashed. * * @author [email protected] * @since 5.0 */ public class GetInDoubtTransactionsCommand extends RecoveryCommand { private static final Log log = LogFactory.getLog(GetInDoubtTransactionsCommand.class); public static final int COMMAND_ID = 21; private GetInDoubtTransactionsCommand() { super(null); // For command id uniqueness test } public GetInDoubtTransactionsCommand(String cacheName) { super(cacheName); } @Override public List<Xid> perform(InvocationContext ctx) throws Throwable { List<Xid> localInDoubtTransactions = recoveryManager.getInDoubtTransactions(); log.tracef("Returning result %s", localInDoubtTransactions); return localInDoubtTransactions; } @Override public byte getCommandId() { return COMMAND_ID; } @Override public Object[] getParameters() { return Util.EMPTY_OBJECT_ARRAY; } @Override public void setParameters(int commandId, Object[] parameters) { if (commandId != COMMAND_ID) throw new IllegalStateException("Expected " + COMMAND_ID + "and received " + commandId); // No parameters } @Override public String toString() { return getClass().getSimpleName() + " { cacheName = " + cacheName + "}"; } }
/** * Rpc to obtain all in-doubt prepared transactions stored on remote nodes. A transaction is in * doubt if it is prepared and the node where it started has crashed. * * @author [email protected] * @since 5.0 */ public class GetInDoubtTransactionsCommand extends RecoveryCommand { private static Log log = LogFactory.getLog(GetInDoubtTransactionsCommand.class); public static final int COMMAND_ID = Ids.GET_IN_DOUBT_TX_COMMAND; public GetInDoubtTransactionsCommand() {} public GetInDoubtTransactionsCommand(String cacheName) { this.cacheName = cacheName; } @Override public List<Xid> perform(InvocationContext ctx) throws Throwable { List<Xid> localInDoubtTransactions = recoveryManager.getLocalInDoubtTransactions(); if (log.isTraceEnabled()) log.trace("Returning result %s", localInDoubtTransactions); return localInDoubtTransactions; } @Override public byte getCommandId() { return COMMAND_ID; } @Override public Object[] getParameters() { return new Object[] {cacheName}; } @Override public void setParameters(int commandId, Object[] parameters) { if (commandId != COMMAND_ID) throw new IllegalStateException("Expected " + COMMAND_ID + "and received " + commandId); cacheName = (String) parameters[0]; } @Override public String toString() { return getClass().getSimpleName() + " { cacheName = " + cacheName + "}"; } }
/** * This class is an interceptor that will index data only if it has come from a local source. * * <p>Currently, this is a property that is determined by setting "infinispan.query.indexLocalOnly" * as a System property to "true". * * @author Navin Surtani * @since 4.0 */ public class LocalQueryInterceptor extends QueryInterceptor { private static final Log log = LogFactory.getLog(LocalQueryInterceptor.class, Log.class); public LocalQueryInterceptor(SearchFactoryIntegrator searchFactory) { super(searchFactory); } @Override protected Log getLog() { return log; } @Override protected boolean shouldModifyIndexes(FlagAffectedCommand command, InvocationContext ctx) { // will index only local updates that were not flagged with SKIP_INDEXING and are not caused // internally by state transfer return ctx.isOriginLocal() && !command.hasFlag(Flag.SKIP_INDEXING) && !command.hasFlag(Flag.PUT_FOR_STATE_TRANSFER); } }
/** * An extension of {@link ReadCommittedEntry} that provides Repeatable Read semantics * * @author Manik Surtani (<a href="mailto:[email protected]">[email protected]</a>) * @since 4.0 */ public class RepeatableReadEntry extends ReadCommittedEntry { private static final Log log = LogFactory.getLog(RepeatableReadEntry.class); public RepeatableReadEntry(Object key, Object value, long lifespan) { super(key, value, lifespan); } @Override public void copyForUpdate(DataContainer container, boolean writeSkewCheck) { if (isChanged()) return; // already copied // mark entry as changed. setChanged(); if (writeSkewCheck) { // check for write skew. InternalCacheEntry ice = container.get(key); Object actualValue = ice == null ? null : ice.getValue(); // Note that this identity-check is intentional. We don't *want* to call actualValue.equals() // since that defeats the purpose. // the implicit "versioning" we have in R_R creates a new wrapper "value" instance for every // update. if (actualValue != null && actualValue != value) { String errormsg = new StringBuilder() .append("Detected write skew on key [") .append(getKey()) .append("]. Another process has changed the entry since we last read it!") .toString(); if (log.isWarnEnabled()) log.warn(errormsg + ". Unable to copy entry for update."); throw new CacheException(errormsg); } } // make a backup copy oldValue = value; } }
/** * @author Mircea Markus * @since 5.2 */ public class BaseBackupInterceptor extends CommandInterceptor { protected BackupSender backupSender; protected TransactionTable txTable; private static final Log log = LogFactory.getLog(BaseBackupInterceptor.class); @Inject void init(BackupSender sender, TransactionTable txTable) { this.backupSender = sender; this.txTable = txTable; } protected boolean isTxFromRemoteSite(GlobalTransaction gtx) { LocalTransaction remoteTx = txTable.getLocalTransaction(gtx); return remoteTx != null && remoteTx.isFromRemoteSite(); } protected boolean shouldInvokeRemoteTxCommand(TxInvocationContext ctx) { // ISPN-2362: For backups, we should only replicate to the remote site if there are // modifications to replay. boolean shouldBackupRemotely = ctx.isOriginLocal() && ctx.hasModifications() && !ctx.getCacheTransaction().isFromStateTransfer(); getLog().tracef("Should backup remotely? %s", shouldBackupRemotely); return shouldBackupRemotely; } protected final boolean skipXSiteBackup(FlagAffectedCommand command) { return command.hasFlag(Flag.SKIP_XSITE_BACKUP); } @Override protected Log getLog() { return log; } }
/** * Parent tests for both transactional and read-only tests are defined in this class. * * @author Galder Zamarreño * @since 4.1 */ public abstract class AbstractFunctionalTestCase extends SingleNodeTestCase { static final Log log = LogFactory.getLog(AbstractFunctionalTestCase.class); @Test public void testEmptySecondLevelCacheEntry() throws Exception { sessionFactory().getCache().evictCollectionRegion(Item.class.getName() + ".items"); Statistics stats = sessionFactory().getStatistics(); stats.clear(); SecondLevelCacheStatistics statistics = stats.getSecondLevelCacheStatistics(Item.class.getName() + ".items"); Map cacheEntries = statistics.getEntries(); assertEquals(0, cacheEntries.size()); } @Test public void testInsertDeleteEntity() throws Exception { final Statistics stats = sessionFactory().getStatistics(); stats.clear(); final Item item = new Item("chris", "Chris's Item"); withTx( tm, new Callable<Void>() { @Override public Void call() throws Exception { Session s = openSession(); s.getTransaction().begin(); s.persist(item); s.getTransaction().commit(); s.close(); return null; } }); log.info("Entry persisted, let's load and delete it."); withTx( tm, new Callable<Void>() { @Override public Void call() throws Exception { Session s = openSession(); s.getTransaction().begin(); Item found = (Item) s.load(Item.class, item.getId()); log.info(stats.toString()); assertEquals(item.getDescription(), found.getDescription()); assertEquals(0, stats.getSecondLevelCacheMissCount()); assertEquals(1, stats.getSecondLevelCacheHitCount()); s.delete(found); s.getTransaction().commit(); s.close(); return null; } }); } @Test public void testInsertClearCacheDeleteEntity() throws Exception { final Statistics stats = sessionFactory().getStatistics(); stats.clear(); final Item item = new Item("chris", "Chris's Item"); withTx( tm, new Callable<Void>() { @Override public Void call() throws Exception { Session s = openSession(); s.getTransaction().begin(); s.persist(item); s.getTransaction().commit(); assertEquals(0, stats.getSecondLevelCacheMissCount()); assertEquals(0, stats.getSecondLevelCacheHitCount()); assertEquals(1, stats.getSecondLevelCachePutCount()); s.close(); return null; } }); log.info("Entry persisted, let's load and delete it."); cleanupCache(); withTx( tm, new Callable<Void>() { @Override public Void call() throws Exception { Session s = openSession(); s.getTransaction().begin(); Item found = (Item) s.load(Item.class, item.getId()); log.info(stats.toString()); assertEquals(item.getDescription(), found.getDescription()); assertEquals(1, stats.getSecondLevelCacheMissCount()); assertEquals(0, stats.getSecondLevelCacheHitCount()); assertEquals(2, stats.getSecondLevelCachePutCount()); s.delete(found); s.getTransaction().commit(); s.close(); return null; } }); } }
/** * Tests the interceptor chain and surrounding logic * * @author Manik Surtani */ @Test(groups = "functional", testName = "loaders.CacheLoaderFunctionalTest") public class CacheLoaderFunctionalTest extends AbstractInfinispanTest { private static final Log log = LogFactory.getLog(CacheLoaderFunctionalTest.class); Cache cache; CacheStore store; TransactionManager tm; Configuration cfg; EmbeddedCacheManager cm; long lifespan = 60000000; // very large lifespan so nothing actually expires @BeforeTest public void setUp() { cfg = new Configuration() .fluent() .loaders() .addCacheLoader( new DummyInMemoryCacheStore.Cfg() .storeName(this.getClass().getName())) // in order to use the same store .transaction() .transactionMode(TransactionMode.TRANSACTIONAL) .build(); cm = TestCacheManagerFactory.createCacheManager(cfg); cache = cm.getCache(); store = TestingUtil.extractComponent(cache, CacheLoaderManager.class).getCacheStore(); tm = TestingUtil.getTransactionManager(cache); } @AfterTest public void tearDown() { TestingUtil.killCacheManagers(cm); cache = null; cm = null; cfg = null; tm = null; store = null; } @AfterMethod public void afterMethod() throws CacheLoaderException { if (cache != null) cache.clear(); if (store != null) store.clear(); } private void assertInCacheAndStore(Object key, Object value) throws CacheLoaderException { assertInCacheAndStore(key, value, -1); } private void assertInCacheAndStore(Object key, Object value, long lifespanMillis) throws CacheLoaderException { assertInCacheAndStore(cache, store, key, value, lifespanMillis); } private void assertInCacheAndStore(Cache cache, CacheStore store, Object key, Object value) throws CacheLoaderException { assertInCacheAndStore(cache, store, key, value, -1); } private void assertInCacheAndStore( Cache cache, CacheStore store, Object key, Object value, long lifespanMillis) throws CacheLoaderException { InternalCacheEntry se = cache.getAdvancedCache().getDataContainer().get(key, null); testStoredEntry(se, value, lifespanMillis, "Cache", key); se = store.load(key); testStoredEntry(se, value, lifespanMillis, "Store", key); } private void testStoredEntry( InternalCacheEntry entry, Object expectedValue, long expectedLifespan, String src, Object key) { assert entry != null : src + " entry for key " + key + " should NOT be null"; assert entry.getValue().equals(expectedValue) : src + " should contain value " + expectedValue + " under key " + entry.getKey() + " but was " + entry.getValue() + ". Entry is " + entry; assert entry.getLifespan() == expectedLifespan : src + " expected lifespan for key " + key + " to be " + expectedLifespan + " but was " + entry.getLifespan() + ". Entry is " + entry; } private void assertNotInCacheAndStore(Cache cache, CacheStore store, Object... keys) throws CacheLoaderException { for (Object key : keys) { assert !cache.getAdvancedCache().getDataContainer().containsKey(key, null) : "Cache should not contain key " + key; assert !store.containsKey(key) : "Store should not contain key " + key; } } private void assertNotInCacheAndStore(Object... keys) throws CacheLoaderException { assertNotInCacheAndStore(cache, store, keys); } private void assertInStoreNotInCache(Object... keys) throws CacheLoaderException { assertInStoreNotInCache(cache, store, keys); } private void assertInStoreNotInCache(Cache cache, CacheStore store, Object... keys) throws CacheLoaderException { for (Object key : keys) { assert !cache.getAdvancedCache().getDataContainer().containsKey(key, null) : "Cache should not contain key " + key; assert store.containsKey(key) : "Store should contain key " + key; } } private void assertInCacheAndNotInStore(Object... keys) throws CacheLoaderException { assertInCacheAndNotInStore(cache, store, keys); } private void assertInCacheAndNotInStore(Cache cache, CacheStore store, Object... keys) throws CacheLoaderException { for (Object key : keys) { assert cache.getAdvancedCache().getDataContainer().containsKey(key, null) : "Cache should not contain key " + key; assert !store.containsKey(key) : "Store should contain key " + key; } } public void testStoreAndRetrieve() throws CacheLoaderException { assertNotInCacheAndStore("k1", "k2", "k3", "k4", "k5", "k6", "k7"); cache.put("k1", "v1"); cache.put("k2", "v2", lifespan, MILLISECONDS); cache.putAll(Collections.singletonMap("k3", "v3")); cache.putAll(Collections.singletonMap("k4", "v4"), lifespan, MILLISECONDS); cache.putIfAbsent("k5", "v5"); cache.putIfAbsent("k6", "v6", lifespan, MILLISECONDS); cache.putIfAbsent("k5", "v5-SHOULD-NOT-PUT"); cache.putIfAbsent("k6", "v6-SHOULD-NOT-PUT", lifespan, MILLISECONDS); cache.putForExternalRead("k7", "v7"); cache.putForExternalRead("k7", "v7-SHOULD-NOT-PUT"); for (int i = 1; i < 8; i++) { // even numbers have lifespans if (i % 2 == 1) assertInCacheAndStore("k" + i, "v" + i); else assertInCacheAndStore("k" + i, "v" + i, lifespan); } assert !cache.remove("k1", "some rubbish"); for (int i = 1; i < 8; i++) { // even numbers have lifespans if (i % 2 == 1) assertInCacheAndStore("k" + i, "v" + i); else assertInCacheAndStore("k" + i, "v" + i, lifespan); } log.info("cache.get(\"k1\") = " + cache.get("k1")); assert cache.remove("k1", "v1"); log.info("cache.get(\"k1\") = " + cache.get("k1")); assert cache.remove("k2").equals("v2"); assertNotInCacheAndStore("k1", "k2"); for (int i = 3; i < 8; i++) { // even numbers have lifespans if (i % 2 == 1) assertInCacheAndStore("k" + i, "v" + i); else assertInCacheAndStore("k" + i, "v" + i, lifespan); } cache.clear(); assertNotInCacheAndStore("k1", "k2", "k3", "k4", "k5", "k6", "k7"); } public void testReplaceMethods() throws CacheLoaderException { assertNotInCacheAndStore("k1", "k2", "k3", "k4"); cache.replace("k1", "v1-SHOULD-NOT-STORE"); assertNoLocks(cache); cache.replace("k2", "v2-SHOULD-NOT-STORE", lifespan, MILLISECONDS); assertNoLocks(cache); assertNotInCacheAndStore("k1", "k2", "k3", "k4"); cache.put("k1", "v1"); assertNoLocks(cache); cache.put("k2", "v2"); assertNoLocks(cache); cache.put("k3", "v3"); assertNoLocks(cache); cache.put("k4", "v4"); assertNoLocks(cache); for (int i = 1; i < 5; i++) assertInCacheAndStore("k" + i, "v" + i); cache.replace("k1", "v1-SHOULD-NOT-STORE", "v1-STILL-SHOULD-NOT-STORE"); assertNoLocks(cache); cache.replace("k2", "v2-SHOULD-NOT-STORE", "v2-STILL-SHOULD-NOT-STORE", lifespan, MILLISECONDS); assertNoLocks(cache); for (int i = 1; i < 5; i++) assertInCacheAndStore("k" + i, "v" + i); cache.replace("k1", "v1-REPLACED"); assertNoLocks(cache); cache.replace("k2", "v2-REPLACED", lifespan, MILLISECONDS); assertInCacheAndStore("k2", "v2-REPLACED", lifespan); assertNoLocks(cache); cache.replace("k3", "v3", "v3-REPLACED"); assertNoLocks(cache); cache.replace("k4", "v4", "v4-REPLACED", lifespan, MILLISECONDS); assertNoLocks(cache); for (int i = 1; i < 5; i++) { // even numbers have lifespans if (i % 2 == 1) assertInCacheAndStore("k" + i, "v" + i + "-REPLACED"); else assertInCacheAndStore("k" + i, "v" + i + "-REPLACED", lifespan); } assertNoLocks(cache); } public void testLoading() throws CacheLoaderException { assertNotInCacheAndStore("k1", "k2", "k3", "k4"); for (int i = 1; i < 5; i++) store.store(TestInternalCacheEntryFactory.create("k" + i, "v" + i)); for (int i = 1; i < 5; i++) assert cache.get("k" + i).equals("v" + i); // make sure we have no stale locks!! assertNoLocks(cache); for (int i = 1; i < 5; i++) cache.evict("k" + i); // make sure we have no stale locks!! assertNoLocks(cache); assert cache.putIfAbsent("k1", "v1-SHOULD-NOT-STORE").equals("v1"); assert cache.remove("k2").equals("v2"); assert cache.replace("k3", "v3-REPLACED").equals("v3"); assert cache.replace("k4", "v4", "v4-REPLACED"); // make sure we have no stale locks!! assertNoLocks(cache); assert cache.size() == 3 : "Expected the cache to contain 3 elements but contained " + cache.size(); for (int i = 1; i < 5; i++) cache.evict("k" + i); // make sure we have no stale locks!! assertNoLocks(cache); assert cache.isEmpty(); // cache size ops will not trigger a load cache.clear(); // this should propagate to the loader though assertNotInCacheAndStore("k1", "k2", "k3", "k4"); // make sure we have no stale locks!! assertNoLocks(cache); } public void testPreloading() throws CacheLoaderException { Configuration preloadingCfg = cfg.clone(); preloadingCfg.getCacheLoaderManagerConfig().setPreload(true); ((DummyInMemoryCacheStore.Cfg) preloadingCfg.getCacheLoaderManagerConfig().getFirstCacheLoaderConfig()) .setStoreName("preloadingCache"); cm.defineConfiguration("preloadingCache", preloadingCfg); Cache preloadingCache = cm.getCache("preloadingCache"); CacheStore preloadingStore = TestingUtil.extractComponent(preloadingCache, CacheLoaderManager.class).getCacheStore(); assert preloadingCache.getConfiguration().getCacheLoaderManagerConfig().isPreload(); assertNotInCacheAndStore(preloadingCache, preloadingStore, "k1", "k2", "k3", "k4"); preloadingCache.put("k1", "v1"); preloadingCache.put("k2", "v2", lifespan, MILLISECONDS); preloadingCache.put("k3", "v3"); preloadingCache.put("k4", "v4", lifespan, MILLISECONDS); for (int i = 1; i < 5; i++) { if (i % 2 == 1) assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i); else assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i, lifespan); } DataContainer c = preloadingCache.getAdvancedCache().getDataContainer(); assert c.size(null) == 4; preloadingCache.stop(); assert c.size(null) == 0; preloadingCache.start(); assert preloadingCache.getConfiguration().getCacheLoaderManagerConfig().isPreload(); c = preloadingCache.getAdvancedCache().getDataContainer(); assert c.size(null) == 4; for (int i = 1; i < 5; i++) { if (i % 2 == 1) assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i); else assertInCacheAndStore(preloadingCache, preloadingStore, "k" + i, "v" + i, lifespan); } } public void testPurgeOnStartup() throws CacheLoaderException { Configuration purgingCfg = cfg.clone(); CacheStoreConfig firstCacheLoaderConfig = (CacheStoreConfig) purgingCfg.getCacheLoaderManagerConfig().getFirstCacheLoaderConfig(); firstCacheLoaderConfig.setPurgeOnStartup(true); ((DummyInMemoryCacheStore.Cfg) purgingCfg.getCacheLoaderManagerConfig().getFirstCacheLoaderConfig()) .setStoreName("purgingCache"); cm.defineConfiguration("purgingCache", purgingCfg); Cache purgingCache = cm.getCache("purgingCache"); CacheStore purgingStore = TestingUtil.extractComponent(purgingCache, CacheLoaderManager.class).getCacheStore(); assertNotInCacheAndStore(purgingCache, purgingStore, "k1", "k2", "k3", "k4"); purgingCache.put("k1", "v1"); purgingCache.put("k2", "v2", lifespan, MILLISECONDS); purgingCache.put("k3", "v3"); purgingCache.put("k4", "v4", lifespan, MILLISECONDS); for (int i = 1; i < 5; i++) { if (i % 2 == 1) assertInCacheAndStore(purgingCache, purgingStore, "k" + i, "v" + i); else assertInCacheAndStore(purgingCache, purgingStore, "k" + i, "v" + i, lifespan); } DataContainer c = purgingCache.getAdvancedCache().getDataContainer(); assert c.size(null) == 4; purgingCache.stop(); assert c.size(null) == 0; purgingCache.start(); c = purgingCache.getAdvancedCache().getDataContainer(); assert c.size(null) == 0; assertNotInCacheAndStore(purgingCache, purgingStore, "k1", "k2", "k3", "k4"); } public void testTransactionalWrites() throws Exception { assert cache.getStatus() == ComponentStatus.RUNNING; assertNotInCacheAndStore("k1", "k2"); tm.begin(); cache.put("k1", "v1"); cache.put("k2", "v2", lifespan, MILLISECONDS); Transaction t = tm.suspend(); assertNotInCacheAndStore("k1", "k2"); tm.resume(t); tm.commit(); assertInCacheAndStore("k1", "v1"); assertInCacheAndStore("k2", "v2", lifespan); tm.begin(); cache.clear(); t = tm.suspend(); assertInCacheAndStore("k1", "v1"); assertInCacheAndStore("k2", "v2", lifespan); tm.resume(t); tm.commit(); assertNotInCacheAndStore("k1", "k2"); tm.begin(); cache.put("k1", "v1"); cache.put("k2", "v2", lifespan, MILLISECONDS); t = tm.suspend(); assertNotInCacheAndStore("k1", "k2"); tm.resume(t); tm.rollback(); assertNotInCacheAndStore("k1", "k2"); cache.put("k1", "v1"); cache.put("k2", "v2", lifespan, MILLISECONDS); assertInCacheAndStore("k1", "v1"); assertInCacheAndStore("k2", "v2", lifespan); tm.begin(); cache.clear(); t = tm.suspend(); assertInCacheAndStore("k1", "v1"); assertInCacheAndStore("k2", "v2", lifespan); tm.resume(t); tm.rollback(); assertInCacheAndStore("k1", "v1"); assertInCacheAndStore("k2", "v2", lifespan); } public void testTransactionalReplace(Method m) throws Exception { assert cache.getStatus() == ComponentStatus.RUNNING; assertNotInCacheAndStore(k(m, 1)); assertNotInCacheAndStore(k(m, 2)); cache.put(k(m, 2), v(m)); tm.begin(); cache.put(k(m, 1), v(m, 1)); cache.replace(k(m, 2), v(m, 1)); Transaction t = tm.suspend(); assertNotInCacheAndStore(k(m, 1)); assertInCacheAndStore(k(m, 2), v(m)); tm.resume(t); tm.commit(); assertInCacheAndStore(k(m, 1), v(m, 1)); assertInCacheAndStore(k(m, 2), v(m, 1)); } public void testEvictAndRemove() throws CacheLoaderException { assertNotInCacheAndStore("k1", "k2"); cache.put("k1", "v1"); cache.put("k2", "v2", lifespan, MILLISECONDS); cache.evict("k1"); cache.evict("k2"); assert "v1".equals(cache.remove("k1")); assert "v2".equals(cache.remove("k2")); } public void testLoadingToMemory() throws CacheLoaderException { assertNotInCacheAndStore("k1", "k2"); store.store(TestInternalCacheEntryFactory.create("k1", "v1")); store.store(TestInternalCacheEntryFactory.create("k2", "v2")); assertInStoreNotInCache("k1", "k2"); assert "v1".equals(cache.get("k1")); assert "v2".equals(cache.get("k2")); assertInCacheAndStore("k1", "v1"); assertInCacheAndStore("k2", "v2"); store.remove("k1"); store.remove("k2"); assertInCacheAndNotInStore("k1", "k2"); assert "v1".equals(cache.get("k1")); assert "v2".equals(cache.get("k2")); } public void testSkipLocking(Method m) { String name = m.getName(); AdvancedCache advancedCache = cache.getAdvancedCache(); advancedCache.put("k-" + name, "v-" + name); advancedCache.withFlags(Flag.SKIP_LOCKING).put("k-" + name, "v2-" + name); } public void testDuplicatePersistence(Method m) throws Exception { String key = "k-" + m.getName(); String value = "v-" + m.getName(); cache.put(key, value); assert value.equals(cache.get(key)); cache.stop(); cache.start(); tm.begin(); cache.containsKey(key); // Necessary call to force locks being acquired in advance cache.getAdvancedCache().withFlags(Flag.FORCE_WRITE_LOCK).get(key); cache.put(key, value); tm.commit(); assert value.equals(cache.get(key)); } public void testGetCacheLoadersFromConfigAfterStart() { cache.getConfiguration().getCacheLoaders(); cache.getConfiguration().getCacheLoaders(); } }
/** * @author [email protected] * @author Galder Zamarreño * @author Sanne Grinovero <*****@*****.**> (C) 2011 Red Hat Inc. * @since 4.0 */ public class CommandsFactoryImpl implements CommandsFactory { private static final Log log = LogFactory.getLog(CommandsFactoryImpl.class); private static final boolean trace = log.isTraceEnabled(); private DataContainer dataContainer; private CacheNotifier<Object, Object> notifier; private Cache<Object, Object> cache; private String cacheName; private boolean totalOrderProtocol; private InterceptorChain interceptorChain; private DistributionManager distributionManager; private InvocationContextFactory icf; private TransactionTable txTable; private Configuration configuration; private RecoveryManager recoveryManager; private StateProvider stateProvider; private StateConsumer stateConsumer; private LockManager lockManager; private InternalEntryFactory entryFactory; private MapReduceManager mapReduceManager; private StateTransferManager stateTransferManager; private BackupSender backupSender; private CancellationService cancellationService; private XSiteStateProvider xSiteStateProvider; private XSiteStateConsumer xSiteStateConsumer; private XSiteStateTransferManager xSiteStateTransferManager; private EntryRetriever entryRetriever; private GroupManager groupManager; private LocalStreamManager localStreamManager; private ClusterStreamManager clusterStreamManager; private ClusteringDependentLogic clusteringDependentLogic; private TimeService timeService; private Map<Byte, ModuleCommandInitializer> moduleCommandInitializers; private ExternalizerTable externalizerTable; @Inject public void setupDependencies( DataContainer container, CacheNotifier<Object, Object> notifier, Cache<Object, Object> cache, InterceptorChain interceptorChain, DistributionManager distributionManager, InvocationContextFactory icf, TransactionTable txTable, Configuration configuration, @ComponentName(KnownComponentNames.MODULE_COMMAND_INITIALIZERS) Map<Byte, ModuleCommandInitializer> moduleCommandInitializers, RecoveryManager recoveryManager, StateProvider stateProvider, StateConsumer stateConsumer, LockManager lockManager, InternalEntryFactory entryFactory, MapReduceManager mapReduceManager, StateTransferManager stm, BackupSender backupSender, CancellationService cancellationService, TimeService timeService, XSiteStateProvider xSiteStateProvider, XSiteStateConsumer xSiteStateConsumer, XSiteStateTransferManager xSiteStateTransferManager, EntryRetriever entryRetriever, GroupManager groupManager, PartitionHandlingManager partitionHandlingManager, LocalStreamManager localStreamManager, ClusterStreamManager clusterStreamManager, ClusteringDependentLogic clusteringDependentLogic, ExternalizerTable externalizerTable) { this.dataContainer = container; this.notifier = notifier; this.cache = cache; this.interceptorChain = interceptorChain; this.distributionManager = distributionManager; this.icf = icf; this.txTable = txTable; this.configuration = configuration; this.moduleCommandInitializers = moduleCommandInitializers; this.recoveryManager = recoveryManager; this.stateProvider = stateProvider; this.stateConsumer = stateConsumer; this.lockManager = lockManager; this.entryFactory = entryFactory; this.mapReduceManager = mapReduceManager; this.stateTransferManager = stm; this.backupSender = backupSender; this.cancellationService = cancellationService; this.xSiteStateConsumer = xSiteStateConsumer; this.xSiteStateProvider = xSiteStateProvider; this.xSiteStateTransferManager = xSiteStateTransferManager; this.entryRetriever = entryRetriever; this.groupManager = groupManager; this.localStreamManager = localStreamManager; this.clusterStreamManager = clusterStreamManager; this.clusteringDependentLogic = clusteringDependentLogic; this.timeService = timeService; this.externalizerTable = externalizerTable; } @Start(priority = 1) // needs to happen early on public void start() { cacheName = cache.getName(); this.totalOrderProtocol = configuration.transaction().transactionProtocol().isTotalOrder(); } @Override public PutKeyValueCommand buildPutKeyValueCommand( Object key, Object value, Metadata metadata, Set<Flag> flags) { return new PutKeyValueCommand( key, value, false, notifier, metadata, flags, configuration.dataContainer().valueEquivalence(), generateUUID()); } @Override public RemoveCommand buildRemoveCommand(Object key, Object value, Set<Flag> flags) { return new RemoveCommand( key, value, notifier, flags, configuration.dataContainer().valueEquivalence(), generateUUID()); } @Override public InvalidateCommand buildInvalidateCommand(Set<Flag> flags, Object... keys) { return new InvalidateCommand(notifier, flags, generateUUID(), keys); } @Override public InvalidateCommand buildInvalidateFromL1Command(Set<Flag> flags, Collection<Object> keys) { return new InvalidateL1Command( dataContainer, distributionManager, notifier, flags, keys, generateUUID()); } @Override public InvalidateCommand buildInvalidateFromL1Command( Address origin, Set<Flag> flags, Collection<Object> keys) { return new InvalidateL1Command( origin, dataContainer, distributionManager, notifier, flags, keys, generateUUID()); } @Override public RemoveExpiredCommand buildRemoveExpiredCommand(Object key, Object value, Long lifespan) { return new RemoveExpiredCommand( key, value, lifespan, notifier, configuration.dataContainer().valueEquivalence(), timeService, generateUUID()); } @Override public ReplaceCommand buildReplaceCommand( Object key, Object oldValue, Object newValue, Metadata metadata, Set<Flag> flags) { return new ReplaceCommand( key, oldValue, newValue, notifier, metadata, flags, configuration.dataContainer().valueEquivalence(), generateUUID()); } @Override public SizeCommand buildSizeCommand(Set<Flag> flags) { return new SizeCommand(cache, flags); } @Override public KeySetCommand buildKeySetCommand(Set<Flag> flags) { return new KeySetCommand(cache, flags); } @Override public EntrySetCommand buildEntrySetCommand(Set<Flag> flags) { return new EntrySetCommand(cache, flags); } @Override public GetKeyValueCommand buildGetKeyValueCommand(Object key, Set<Flag> flags) { return new GetKeyValueCommand(key, flags); } @Override public GetAllCommand buildGetAllCommand( Collection<?> keys, Set<Flag> flags, boolean returnEntries) { return new GetAllCommand(keys, flags, returnEntries, entryFactory); } @Override public PutMapCommand buildPutMapCommand(Map<?, ?> map, Metadata metadata, Set<Flag> flags) { return new PutMapCommand(map, notifier, metadata, flags, generateUUID()); } @Override public ClearCommand buildClearCommand(Set<Flag> flags) { return new ClearCommand(notifier, dataContainer, flags); } @Override public EvictCommand buildEvictCommand(Object key, Set<Flag> flags) { return new EvictCommand(key, notifier, flags, generateUUID(), entryFactory); } @Override public PrepareCommand buildPrepareCommand( GlobalTransaction gtx, List<WriteCommand> modifications, boolean onePhaseCommit) { return totalOrderProtocol ? new TotalOrderNonVersionedPrepareCommand(cacheName, gtx, modifications) : new PrepareCommand(cacheName, gtx, modifications, onePhaseCommit); } @Override public VersionedPrepareCommand buildVersionedPrepareCommand( GlobalTransaction gtx, List<WriteCommand> modifications, boolean onePhase) { return totalOrderProtocol ? new TotalOrderVersionedPrepareCommand(cacheName, gtx, modifications, onePhase) : new VersionedPrepareCommand(cacheName, gtx, modifications, onePhase); } @Override public CommitCommand buildCommitCommand(GlobalTransaction gtx) { return totalOrderProtocol ? new TotalOrderCommitCommand(cacheName, gtx) : new CommitCommand(cacheName, gtx); } @Override public VersionedCommitCommand buildVersionedCommitCommand(GlobalTransaction gtx) { return totalOrderProtocol ? new TotalOrderVersionedCommitCommand(cacheName, gtx) : new VersionedCommitCommand(cacheName, gtx); } @Override public RollbackCommand buildRollbackCommand(GlobalTransaction gtx) { return totalOrderProtocol ? new TotalOrderRollbackCommand(cacheName, gtx) : new RollbackCommand(cacheName, gtx); } @Override public MultipleRpcCommand buildReplicateCommand(List<ReplicableCommand> toReplicate) { return new MultipleRpcCommand(toReplicate, cacheName); } @Override public SingleRpcCommand buildSingleRpcCommand(ReplicableCommand call) { return new SingleRpcCommand(cacheName, call); } @Override public ClusteredGetCommand buildClusteredGetCommand( Object key, Set<Flag> flags, boolean acquireRemoteLock, GlobalTransaction gtx) { return new ClusteredGetCommand( key, cacheName, flags, acquireRemoteLock, gtx, configuration.dataContainer().keyEquivalence()); } /** @param isRemote true if the command is deserialized and is executed remote. */ @Override public void initializeReplicableCommand(ReplicableCommand c, boolean isRemote) { if (c == null) return; switch (c.getCommandId()) { case PutKeyValueCommand.COMMAND_ID: ((PutKeyValueCommand) c).init(notifier, configuration); break; case ReplaceCommand.COMMAND_ID: ((ReplaceCommand) c).init(notifier, configuration); break; case PutMapCommand.COMMAND_ID: ((PutMapCommand) c).init(notifier); break; case RemoveCommand.COMMAND_ID: ((RemoveCommand) c).init(notifier, configuration); break; case MultipleRpcCommand.COMMAND_ID: MultipleRpcCommand rc = (MultipleRpcCommand) c; rc.init(interceptorChain, icf); if (rc.getCommands() != null) for (ReplicableCommand nested : rc.getCommands()) { initializeReplicableCommand(nested, false); } break; case SingleRpcCommand.COMMAND_ID: SingleRpcCommand src = (SingleRpcCommand) c; src.init(interceptorChain, icf); if (src.getCommand() != null) initializeReplicableCommand(src.getCommand(), false); break; case InvalidateCommand.COMMAND_ID: InvalidateCommand ic = (InvalidateCommand) c; ic.init(notifier, configuration); break; case InvalidateL1Command.COMMAND_ID: InvalidateL1Command ilc = (InvalidateL1Command) c; ilc.init(configuration, distributionManager, notifier, dataContainer); break; case PrepareCommand.COMMAND_ID: case VersionedPrepareCommand.COMMAND_ID: case TotalOrderNonVersionedPrepareCommand.COMMAND_ID: case TotalOrderVersionedPrepareCommand.COMMAND_ID: PrepareCommand pc = (PrepareCommand) c; pc.init(interceptorChain, icf, txTable); pc.initialize(notifier, recoveryManager); if (pc.getModifications() != null) for (ReplicableCommand nested : pc.getModifications()) { initializeReplicableCommand(nested, false); } pc.markTransactionAsRemote(isRemote); if (configuration.deadlockDetection().enabled() && isRemote) { DldGlobalTransaction transaction = (DldGlobalTransaction) pc.getGlobalTransaction(); transaction.setLocksHeldAtOrigin(pc.getAffectedKeys()); } break; case CommitCommand.COMMAND_ID: case VersionedCommitCommand.COMMAND_ID: case TotalOrderCommitCommand.COMMAND_ID: case TotalOrderVersionedCommitCommand.COMMAND_ID: CommitCommand commitCommand = (CommitCommand) c; commitCommand.init(interceptorChain, icf, txTable); commitCommand.markTransactionAsRemote(isRemote); break; case RollbackCommand.COMMAND_ID: case TotalOrderRollbackCommand.COMMAND_ID: RollbackCommand rollbackCommand = (RollbackCommand) c; rollbackCommand.init(interceptorChain, icf, txTable); rollbackCommand.markTransactionAsRemote(isRemote); break; case ClearCommand.COMMAND_ID: ClearCommand cc = (ClearCommand) c; cc.init(notifier, dataContainer); break; case ClusteredGetCommand.COMMAND_ID: ClusteredGetCommand clusteredGetCommand = (ClusteredGetCommand) c; clusteredGetCommand.initialize( icf, this, entryFactory, interceptorChain, distributionManager, txTable, configuration.dataContainer().keyEquivalence()); break; case LockControlCommand.COMMAND_ID: LockControlCommand lcc = (LockControlCommand) c; lcc.init(interceptorChain, icf, txTable); lcc.markTransactionAsRemote(isRemote); if (configuration.deadlockDetection().enabled() && isRemote) { DldGlobalTransaction gtx = (DldGlobalTransaction) lcc.getGlobalTransaction(); RemoteTransaction transaction = txTable.getRemoteTransaction(gtx); if (transaction != null) { if (!configuration.clustering().cacheMode().isDistributed()) { Set<Object> keys = txTable.getLockedKeysForRemoteTransaction(gtx); GlobalTransaction gtx2 = transaction.getGlobalTransaction(); ((DldGlobalTransaction) gtx2).setLocksHeldAtOrigin(keys); gtx.setLocksHeldAtOrigin(keys); } else { GlobalTransaction gtx2 = transaction.getGlobalTransaction(); ((DldGlobalTransaction) gtx2).setLocksHeldAtOrigin(gtx.getLocksHeldAtOrigin()); } } } break; case StateRequestCommand.COMMAND_ID: ((StateRequestCommand) c).init(stateProvider); break; case StateResponseCommand.COMMAND_ID: ((StateResponseCommand) c).init(stateConsumer); break; case GetInDoubtTransactionsCommand.COMMAND_ID: GetInDoubtTransactionsCommand gptx = (GetInDoubtTransactionsCommand) c; gptx.init(recoveryManager); break; case TxCompletionNotificationCommand.COMMAND_ID: TxCompletionNotificationCommand ftx = (TxCompletionNotificationCommand) c; ftx.init(txTable, lockManager, recoveryManager, stateTransferManager); break; case MapCombineCommand.COMMAND_ID: MapCombineCommand mrc = (MapCombineCommand) c; mrc.init(mapReduceManager); break; case ReduceCommand.COMMAND_ID: ReduceCommand reduceCommand = (ReduceCommand) c; reduceCommand.init(mapReduceManager); break; case DistributedExecuteCommand.COMMAND_ID: DistributedExecuteCommand dec = (DistributedExecuteCommand) c; dec.init(cache); break; case GetInDoubtTxInfoCommand.COMMAND_ID: GetInDoubtTxInfoCommand gidTxInfoCommand = (GetInDoubtTxInfoCommand) c; gidTxInfoCommand.init(recoveryManager); break; case CompleteTransactionCommand.COMMAND_ID: CompleteTransactionCommand ccc = (CompleteTransactionCommand) c; ccc.init(recoveryManager); break; case ApplyDeltaCommand.COMMAND_ID: break; case CreateCacheCommand.COMMAND_ID: CreateCacheCommand createCacheCommand = (CreateCacheCommand) c; createCacheCommand.init(cache.getCacheManager()); break; case XSiteAdminCommand.COMMAND_ID: XSiteAdminCommand xSiteAdminCommand = (XSiteAdminCommand) c; xSiteAdminCommand.init(backupSender); break; case CancelCommand.COMMAND_ID: CancelCommand cancelCommand = (CancelCommand) c; cancelCommand.init(cancellationService); break; case XSiteStateTransferControlCommand.COMMAND_ID: XSiteStateTransferControlCommand xSiteStateTransferControlCommand = (XSiteStateTransferControlCommand) c; xSiteStateTransferControlCommand.initialize( xSiteStateProvider, xSiteStateConsumer, xSiteStateTransferManager); break; case XSiteStatePushCommand.COMMAND_ID: XSiteStatePushCommand xSiteStatePushCommand = (XSiteStatePushCommand) c; xSiteStatePushCommand.initialize(xSiteStateConsumer); break; case EntryRequestCommand.COMMAND_ID: EntryRequestCommand entryRequestCommand = (EntryRequestCommand) c; entryRequestCommand.init(entryRetriever); break; case EntryResponseCommand.COMMAND_ID: EntryResponseCommand entryResponseCommand = (EntryResponseCommand) c; entryResponseCommand.init(entryRetriever); break; case GetKeysInGroupCommand.COMMAND_ID: GetKeysInGroupCommand getKeysInGroupCommand = (GetKeysInGroupCommand) c; getKeysInGroupCommand.setGroupManager(groupManager); break; case ClusteredGetAllCommand.COMMAND_ID: ClusteredGetAllCommand clusteredGetAllCommand = (ClusteredGetAllCommand) c; clusteredGetAllCommand.init( icf, this, entryFactory, interceptorChain, txTable, configuration.dataContainer().keyEquivalence()); break; case StreamRequestCommand.COMMAND_ID: StreamRequestCommand streamRequestCommand = (StreamRequestCommand) c; streamRequestCommand.inject(localStreamManager); break; case StreamResponseCommand.COMMAND_ID: StreamResponseCommand streamResponseCommand = (StreamResponseCommand) c; streamResponseCommand.inject(clusterStreamManager); break; case StreamSegmentResponseCommand.COMMAND_ID: StreamSegmentResponseCommand streamSegmentResponseCommand = (StreamSegmentResponseCommand) c; streamSegmentResponseCommand.inject(clusterStreamManager); break; case RemoveExpiredCommand.COMMAND_ID: RemoveExpiredCommand removeExpiredCommand = (RemoveExpiredCommand) c; removeExpiredCommand.init(notifier, configuration); break; default: ModuleCommandInitializer mci = moduleCommandInitializers.get(c.getCommandId()); if (mci != null) { mci.initializeReplicableCommand(c, isRemote); } else { if (trace) log.tracef("Nothing to initialize for command: %s", c); } } } @Override public LockControlCommand buildLockControlCommand( Collection<?> keys, Set<Flag> flags, GlobalTransaction gtx) { return new LockControlCommand(keys, cacheName, flags, gtx); } @Override public LockControlCommand buildLockControlCommand( Object key, Set<Flag> flags, GlobalTransaction gtx) { return new LockControlCommand(key, cacheName, flags, gtx); } @Override public LockControlCommand buildLockControlCommand(Collection<?> keys, Set<Flag> flags) { return new LockControlCommand(keys, cacheName, flags, null); } @Override public StateRequestCommand buildStateRequestCommand( StateRequestCommand.Type subtype, Address sender, int viewId, Set<Integer> segments) { return new StateRequestCommand(cacheName, subtype, sender, viewId, segments); } @Override public StateResponseCommand buildStateResponseCommand( Address sender, int topologyId, Collection<StateChunk> stateChunks) { return new StateResponseCommand(cacheName, sender, topologyId, stateChunks); } @Override public String getCacheName() { return cacheName; } @Override public GetInDoubtTransactionsCommand buildGetInDoubtTransactionsCommand() { return new GetInDoubtTransactionsCommand(cacheName); } @Override public TxCompletionNotificationCommand buildTxCompletionNotificationCommand( Xid xid, GlobalTransaction globalTransaction) { return new TxCompletionNotificationCommand(xid, globalTransaction, cacheName); } @Override public TxCompletionNotificationCommand buildTxCompletionNotificationCommand(long internalId) { return new TxCompletionNotificationCommand(internalId, cacheName); } @Override public <T> DistributedExecuteCommand<T> buildDistributedExecuteCommand( Callable<T> callable, Address sender, Collection keys) { return new DistributedExecuteCommand<T>(cacheName, keys, callable); } @Override public <KIn, VIn, KOut, VOut> MapCombineCommand<KIn, VIn, KOut, VOut> buildMapCombineCommand( String taskId, Mapper<KIn, VIn, KOut, VOut> m, Reducer<KOut, VOut> r, Collection<KIn> keys) { return new MapCombineCommand<KIn, VIn, KOut, VOut>(taskId, m, r, cacheName, keys); } @Override public GetInDoubtTxInfoCommand buildGetInDoubtTxInfoCommand() { return new GetInDoubtTxInfoCommand(cacheName); } @Override public CompleteTransactionCommand buildCompleteTransactionCommand(Xid xid, boolean commit) { return new CompleteTransactionCommand(cacheName, xid, commit); } @Override public ApplyDeltaCommand buildApplyDeltaCommand( Object deltaAwareValueKey, Delta delta, Collection keys) { return new ApplyDeltaCommand(deltaAwareValueKey, delta, keys, generateUUID()); } @Override public CreateCacheCommand buildCreateCacheCommand( String cacheNameToCreate, String cacheConfigurationName) { return new CreateCacheCommand(cacheName, cacheNameToCreate, cacheConfigurationName); } @Override public CreateCacheCommand buildCreateCacheCommand( String cacheNameToCreate, String cacheConfigurationName, int size) { return new CreateCacheCommand(cacheName, cacheNameToCreate, cacheConfigurationName, size); } @Override public <KOut, VOut> ReduceCommand<KOut, VOut> buildReduceCommand( String taskId, String destintationCache, Reducer<KOut, VOut> r, Collection<KOut> keys) { return new ReduceCommand<KOut, VOut>(taskId, r, destintationCache, keys); } @Override public CancelCommand buildCancelCommandCommand(UUID commandUUID) { return new CancelCommand(cacheName, commandUUID); } @Override public XSiteStateTransferControlCommand buildXSiteStateTransferControlCommand( StateTransferControl control, String siteName) { return new XSiteStateTransferControlCommand(cacheName, control, siteName); } @Override public XSiteAdminCommand buildXSiteAdminCommand( String siteName, AdminOperation op, Integer afterFailures, Long minTimeToWait) { return new XSiteAdminCommand(cacheName, siteName, op, afterFailures, minTimeToWait); } @Override public XSiteStatePushCommand buildXSiteStatePushCommand(XSiteState[] chunk, long timeoutMillis) { return new XSiteStatePushCommand(cacheName, chunk, timeoutMillis); } @Override public SingleXSiteRpcCommand buildSingleXSiteRpcCommand(VisitableCommand command) { return new SingleXSiteRpcCommand(cacheName, command); } @Override public <K, V, C> EntryRequestCommand<K, V, C> buildEntryRequestCommand( UUID identifier, Set<Integer> segments, Set<K> keysToFilter, KeyValueFilter<? super K, ? super V> filter, Converter<? super K, ? super V, C> converter, Set<Flag> flags) { return new EntryRequestCommand<K, V, C>( cacheName, identifier, cache.getCacheManager().getAddress(), segments, keysToFilter, filter, converter, flags); } @Override public <K, C> EntryResponseCommand<K, C> buildEntryResponseCommand( UUID identifier, Set<Integer> completedSegments, Set<Integer> inDoubtSegments, Collection<CacheEntry<K, C>> values, CacheException e) { return new EntryResponseCommand<>( cache.getCacheManager().getAddress(), cacheName, identifier, completedSegments, inDoubtSegments, values, e); } @Override public GetKeysInGroupCommand buildGetKeysInGroupCommand(Set<Flag> flags, String groupName) { return new GetKeysInGroupCommand(flags, groupName).setGroupManager(groupManager); } @Override public <K> StreamRequestCommand<K> buildStreamRequestCommand( Object id, boolean parallelStream, StreamRequestCommand.Type type, Set<Integer> segments, Set<K> keys, Set<K> excludedKeys, boolean includeLoader, Object terminalOperation) { return new StreamRequestCommand<>( cacheName, cache.getCacheManager().getAddress(), id, parallelStream, type, segments, keys, excludedKeys, includeLoader, terminalOperation); } @Override public <R> StreamResponseCommand<R> buildStreamResponseCommand( Object identifier, boolean complete, Set<Integer> lostSegments, R response) { if (lostSegments.isEmpty()) { return new StreamResponseCommand<>( cacheName, cache.getCacheManager().getAddress(), identifier, complete, response); } else { return new StreamSegmentResponseCommand<>( cacheName, cache.getCacheManager().getAddress(), identifier, complete, response, lostSegments); } } @Override public GetCacheEntryCommand buildGetCacheEntryCommand(Object key, Set<Flag> explicitFlags) { return new GetCacheEntryCommand(key, explicitFlags, entryFactory); } @Override public ClusteredGetAllCommand buildClusteredGetAllCommand( List<?> keys, Set<Flag> flags, GlobalTransaction gtx) { return new ClusteredGetAllCommand( cacheName, keys, flags, gtx, configuration.dataContainer().keyEquivalence()); } private CommandInvocationId generateUUID() { return CommandInvocationId.generateId(clusteringDependentLogic.getAddress()); } @Override public <K, V, R> ReadOnlyKeyCommand<K, V, R> buildReadOnlyKeyCommand( K key, Function<ReadEntryView<K, V>, R> f) { return new ReadOnlyKeyCommand<>(key, f); } @Override public <K, V, R> ReadOnlyManyCommand<K, V, R> buildReadOnlyManyCommand( Set<? extends K> keys, Function<ReadEntryView<K, V>, R> f) { return new ReadOnlyManyCommand<>(keys, f); } @Override public <K, V, R> ReadWriteKeyValueCommand<K, V, R> buildReadWriteKeyValueCommand( K key, V value, BiFunction<V, ReadWriteEntryView<K, V>, R> f, Params params) { return new ReadWriteKeyValueCommand<>( key, value, f, generateUUID(), getValueMatcher(f), params); } @Override public <K, V, R> ReadWriteKeyCommand<K, V, R> buildReadWriteKeyCommand( K key, Function<ReadWriteEntryView<K, V>, R> f, Params params) { return new ReadWriteKeyCommand<>(key, f, generateUUID(), getValueMatcher(f), params); } @Override public <K, V, R> ReadWriteManyCommand<K, V, R> buildReadWriteManyCommand( Set<? extends K> keys, Function<ReadWriteEntryView<K, V>, R> f) { return new ReadWriteManyCommand<>(keys, f); } @Override public <K, V, R> ReadWriteManyEntriesCommand<K, V, R> buildReadWriteManyEntriesCommand( Map<? extends K, ? extends V> entries, BiFunction<V, ReadWriteEntryView<K, V>, R> f) { return new ReadWriteManyEntriesCommand<>(entries, f); } @Override public <K, V> WriteOnlyKeyCommand<K, V> buildWriteOnlyKeyCommand( K key, Consumer<WriteEntryView<V>> f, Params params) { return new WriteOnlyKeyCommand<>(key, f, generateUUID(), getValueMatcher(f), params); } @Override public <K, V> WriteOnlyKeyValueCommand<K, V> buildWriteOnlyKeyValueCommand( K key, V value, BiConsumer<V, WriteEntryView<V>> f, Params params) { return new WriteOnlyKeyValueCommand<>( key, value, f, generateUUID(), getValueMatcher(f), params); } @Override public <K, V> WriteOnlyManyCommand<K, V> buildWriteOnlyManyCommand( Set<? extends K> keys, Consumer<WriteEntryView<V>> f) { return new WriteOnlyManyCommand<>(keys, f); } @Override public <K, V> WriteOnlyManyEntriesCommand<K, V> buildWriteOnlyManyEntriesCommand( Map<? extends K, ? extends V> entries, BiConsumer<V, WriteEntryView<V>> f, Params params) { WriteOnlyManyEntriesCommand<K, V> cmd = new WriteOnlyManyEntriesCommand<>(entries, f); cmd.setParams(params); return cmd; } private ValueMatcher getValueMatcher(Object o) { SerializeFunctionWith ann = o.getClass().getAnnotation(SerializeFunctionWith.class); if (ann != null) return ValueMatcher.valueOf(ann.valueMatcher().toString()); Externalizer ext = externalizerTable.getExternalizer(o); if (ext != null && ext instanceof LambdaExternalizer) return ValueMatcher.valueOf(((LambdaExternalizer) ext).valueMatcher(o).toString()); return ValueMatcher.MATCH_ALWAYS; } }
/** * RhqPluginDoclet. * * @author Galder Zamarreño * @since 4.0 */ public class RhqPluginXmlGenerator { private static final Log log = LogFactory.getLog(RhqPluginXmlGenerator.class); private static ClassPool classPool; private static String cp; public static void main(String[] args) throws Exception { cp = System.getProperty("java.class.path"); start(null); } public static boolean validOptions(String options[][], DocErrorReporter reporter) { for (String[] option : options) { if (option[0].equals("-classpath")) cp = option[1]; } return true; } public static boolean start(RootDoc rootDoc) throws Exception { List<Class<?>> mbeanIspnClasses = getMBeanClasses(); List<Class<?>> globalClasses = new ArrayList<Class<?>>(); List<Class<?>> namedCacheClasses = new ArrayList<Class<?>>(); for (Class<?> clazz : mbeanIspnClasses) { Scope scope = clazz.getAnnotation(Scope.class); if (scope != null && scope.value() == Scopes.GLOBAL) { debug("Add as global class " + clazz); globalClasses.add(clazz); } else { debug("Add as named cache class " + clazz); namedCacheClasses.add(clazz); } } // Init the Javassist class pool. classPool = ClassPool.getDefault(); classPool.insertClassPath(new ClassClassPath(RhqPluginXmlGenerator.class)); PluginGen pg = new PluginGen(); Props root = new Props(); root.setPluginName("Infinispan"); root.setPluginDescription("Supports management and monitoring of Infinispan"); root.setName("Infinispan Cache Manager"); root.setPkg("org.infinispan.rhq"); root.setDependsOnJmxPlugin(true); root.setDiscoveryClass("CacheManagerDiscovery"); root.setComponentClass("CacheManagerComponent"); root.setSingleton(false); root.setCategory(ResourceCategory.SERVICE); Set<TypeKey> servers = new HashSet<TypeKey>(); servers.add(new TypeKey("JMX Server", "JMX")); servers.add(new TypeKey("JBossAS Server", "JBossAS")); servers.add(new TypeKey("JBossAS Server", "JBossAS5")); root.setRunsInsides(servers); populateMetricsAndOperations(globalClasses, root, false); Props cache = new Props(); cache.setName("Infinispan Cache"); cache.setPkg("org.infinispan.rhq"); cache.setDependsOnJmxPlugin(true); cache.setDiscoveryClass("CacheDiscovery"); cache.setComponentClass("CacheComponent"); cache.setSingleton(false); cache.setCategory(ResourceCategory.SERVICE); populateMetricsAndOperations(namedCacheClasses, cache, true); root.getChildren().add(cache); String metaInfDir = "../../../src/main/resources/META-INF"; new File(metaInfDir).mkdirs(); String targetMetaInfDir = "../../../target/classes/META-INF"; new File(targetMetaInfDir).mkdirs(); pg.createFile(root, "descriptor", "rhq-plugin.xml", metaInfDir); copyFile( new File(metaInfDir + "/rhq-plugin.xml"), new File(targetMetaInfDir + "/rhq-plugin.xml")); return true; } private static void copyFile(File in, File out) throws IOException { FileChannel inCh = new FileInputStream(in).getChannel(); FileChannel outCh = new FileOutputStream(out).getChannel(); try { inCh.transferTo(0, inCh.size(), outCh); } catch (IOException e) { throw e; } finally { if (inCh != null) inCh.close(); if (outCh != null) outCh.close(); } } private static List<Class<?>> getMBeanClasses() throws IOException { try { return ClassFinder.withAnnotationDeclared(ClassFinder.infinispanClasses(cp), MBean.class); } catch (Exception e) { IOException ioe = new IOException("Unable to get Infinispan classes"); ioe.initCause(e); throw ioe; } } private static void populateMetricsAndOperations( List<Class<?>> classes, Props props, boolean withNamePrefix) throws Exception { props.setHasOperations(true); props.setHasMetrics(true); for (Class<?> clazz : classes) { MBean mbean = clazz.getAnnotation(MBean.class); String prefix = withNamePrefix ? mbean.objectName() + '.' : ""; CtClass ctClass = classPool.get(clazz.getName()); CtMethod[] ctMethods = ctClass.getMethods(); for (CtMethod ctMethod : ctMethods) { ManagedAttribute managedAttr = (ManagedAttribute) ctMethod.getAnnotation(ManagedAttribute.class); ManagedOperation managedOp = (ManagedOperation) ctMethod.getAnnotation(ManagedOperation.class); Metric rhqMetric = (Metric) ctMethod.getAnnotation(Metric.class); if (rhqMetric != null) { debug("Metric annotation found " + rhqMetric); // Property and description resolution are the reason why annotation scanning is done // here. // These two fields are calculated from either the method name or the Managed* // annotations, // and so, only the infinispan side knows about that. String property = prefix + getPropertyFromBeanConvention(ctMethod); if (!rhqMetric.property().isEmpty()) { property = prefix + rhqMetric.property(); } MetricProps metric = new MetricProps(property); String displayName = withNamePrefix ? "[" + mbean.objectName() + "] " + rhqMetric.displayName() : rhqMetric.displayName(); metric.setDisplayName(displayName); metric.setDisplayType(rhqMetric.displayType()); metric.setDataType(rhqMetric.dataType()); metric.setUnits(rhqMetric.units()); if (managedAttr != null) { debug("Metric has ManagedAttribute annotation " + managedAttr); metric.setDescription(managedAttr.description()); } else if (managedOp != null) { debug("Metric has ManagedOperation annotation " + managedOp); metric.setDescription(managedOp.description()); } else { log.debug( "Metric has no managed annotations, so take the description from the display name."); metric.setDescription(rhqMetric.displayName()); } props.getMetrics().add(metric); } Operation rhqOperation = (Operation) ctMethod.getAnnotation(Operation.class); if (rhqOperation != null) { debug("Operation annotation found " + rhqOperation); String name; if (!rhqOperation.name().isEmpty()) { name = prefix + rhqOperation.name(); } else { name = prefix + ctMethod.getName(); } OperationProps operation = new OperationProps(name); String displayName = withNamePrefix ? "[" + mbean.objectName() + "] " + rhqOperation.displayName() : rhqOperation.displayName(); operation.setDisplayName(displayName); if (managedAttr != null) { debug("Operation has ManagedAttribute annotation " + managedAttr); operation.setDescription(managedAttr.description()); } else if (managedOp != null) { debug("Operation has ManagedOperation annotation " + managedOp); operation.setDescription(managedOp.description()); } else { debug( "Operation has no managed annotations, so take the description from the display name."); operation.setDescription(rhqOperation.displayName()); } Object[][] paramAnnotations = ctMethod.getParameterAnnotations(); int i = 0; for (Object[] paramAnnotationsInEach : paramAnnotations) { boolean hadParameter = false; for (Object annot : paramAnnotationsInEach) { debug("Parameter annotation " + annot); if (annot instanceof Parameter) { Parameter param = (Parameter) annot; SimpleProperty prop = new SimpleProperty(param.name()); prop.setDescription(param.description()); operation.getParams().add(prop); hadParameter = true; } } if (!hadParameter) { operation.getParams().add(new SimpleProperty("p" + i++)); } } CtClass returnType = ctMethod.getReturnType(); if (!returnType.equals(CtClass.voidType)) { if (!returnType.equals(Void.TYPE)) { SimpleProperty prop = new SimpleProperty("operationResult"); operation.setResult(prop); } } props.getOperations().add(operation); } } CtField[] ctFields = ctClass.getDeclaredFields(); for (CtField ctField : ctFields) { debug("Inspecting field " + ctField); Metric rhqMetric = (Metric) ctField.getAnnotation(Metric.class); if (rhqMetric != null) { debug("Field " + ctField + " contains Metric annotation " + rhqMetric); String property; if (!rhqMetric.property().isEmpty()) { property = prefix + rhqMetric.property(); } else { property = prefix + getPropertyFromBeanConvention(ctField); } MetricProps metric = new MetricProps(property); String displayName = withNamePrefix ? "[" + mbean.objectName() + "] " + rhqMetric.displayName() : rhqMetric.displayName(); metric.setDisplayName(displayName); metric.setDisplayType(rhqMetric.displayType()); metric.setDataType(rhqMetric.dataType()); metric.setUnits(rhqMetric.units()); ManagedAttribute managedAttr = (ManagedAttribute) ctField.getAnnotation(ManagedAttribute.class); if (managedAttr != null) { debug("Metric has ManagedAttribute annotation " + managedAttr); metric.setDescription(managedAttr.description()); } else { log.debug( "Metric has no managed annotations, so take the description from the display name."); metric.setDescription(rhqMetric.displayName()); } props.getMetrics().add(metric); } } } } private static String getPropertyFromBeanConvention(CtMethod ctMethod) { String getterOrSetter = ctMethod.getName(); if (getterOrSetter.startsWith("get") || getterOrSetter.startsWith("set")) { String withoutGet = getterOrSetter.substring(4); // not specifically Bean convention, but this is what is bound in JMX. return Character.toUpperCase(getterOrSetter.charAt(3)) + withoutGet; } else if (getterOrSetter.startsWith("is")) { String withoutIs = getterOrSetter.substring(3); return Character.toUpperCase(getterOrSetter.charAt(2)) + withoutIs; } return getterOrSetter; } private static String getPropertyFromBeanConvention(CtField ctField) { String fieldName = ctField.getName(); String withoutFirstChar = fieldName.substring(1); return Character.toUpperCase(fieldName.charAt(0)) + withoutFirstChar; } private static void debug(Object o) { // if (log.isDebugEnabled()) log.debug(o); // System.out.println(o); } }
/** * Helper class that handles all notifications to registered listeners. * * @author Manik Surtani (manik AT infinispan DOT org) * @author [email protected] * @since 4.0 */ public final class CacheNotifierImpl extends AbstractListenerImpl implements CacheNotifier, ClassLoaderAwareListenable { private static final Log log = LogFactory.getLog(CacheNotifierImpl.class); private static final Map<Class<? extends Annotation>, Class<?>> allowedListeners = new HashMap<Class<? extends Annotation>, Class<?>>(16); static { allowedListeners.put(CacheEntryCreated.class, CacheEntryCreatedEvent.class); allowedListeners.put(CacheEntryRemoved.class, CacheEntryRemovedEvent.class); allowedListeners.put(CacheEntryVisited.class, CacheEntryVisitedEvent.class); allowedListeners.put(CacheEntryModified.class, CacheEntryModifiedEvent.class); allowedListeners.put(CacheEntryActivated.class, CacheEntryActivatedEvent.class); allowedListeners.put(CacheEntryPassivated.class, CacheEntryPassivatedEvent.class); allowedListeners.put(CacheEntryLoaded.class, CacheEntryLoadedEvent.class); allowedListeners.put(CacheEntriesEvicted.class, CacheEntriesEvictedEvent.class); allowedListeners.put(TransactionRegistered.class, TransactionRegisteredEvent.class); allowedListeners.put(TransactionCompleted.class, TransactionCompletedEvent.class); allowedListeners.put(CacheEntryInvalidated.class, CacheEntryInvalidatedEvent.class); allowedListeners.put(DataRehashed.class, DataRehashedEvent.class); allowedListeners.put(TopologyChanged.class, TopologyChangedEvent.class); // For backward compat allowedListeners.put(CacheEntryEvicted.class, CacheEntryEvictedEvent.class); } final List<ListenerInvocation> cacheEntryCreatedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> cacheEntryRemovedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> cacheEntryVisitedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> cacheEntryModifiedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> cacheEntryActivatedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> cacheEntryPassivatedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> cacheEntryLoadedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> cacheEntryInvalidatedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> cacheEntriesEvictedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> transactionRegisteredListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> transactionCompletedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> dataRehashedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); final List<ListenerInvocation> topologyChangedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); // For backward compat final List<ListenerInvocation> cacheEntryEvictedListeners = new CopyOnWriteArrayList<ListenerInvocation>(); private Cache<Object, Object> cache; public CacheNotifierImpl() { listenersMap.put(CacheEntryCreated.class, cacheEntryCreatedListeners); listenersMap.put(CacheEntryRemoved.class, cacheEntryRemovedListeners); listenersMap.put(CacheEntryVisited.class, cacheEntryVisitedListeners); listenersMap.put(CacheEntryModified.class, cacheEntryModifiedListeners); listenersMap.put(CacheEntryActivated.class, cacheEntryActivatedListeners); listenersMap.put(CacheEntryPassivated.class, cacheEntryPassivatedListeners); listenersMap.put(CacheEntryLoaded.class, cacheEntryLoadedListeners); listenersMap.put(CacheEntriesEvicted.class, cacheEntriesEvictedListeners); listenersMap.put(TransactionRegistered.class, transactionRegisteredListeners); listenersMap.put(TransactionCompleted.class, transactionCompletedListeners); listenersMap.put(CacheEntryInvalidated.class, cacheEntryInvalidatedListeners); listenersMap.put(DataRehashed.class, dataRehashedListeners); listenersMap.put(TopologyChanged.class, topologyChangedListeners); // For backward compat listenersMap.put(CacheEntryEvicted.class, cacheEntryEvictedListeners); } @Inject void injectDependencies(Cache<Object, Object> cache) { this.cache = cache; } @Override protected Log getLog() { return log; } @Override protected Map<Class<? extends Annotation>, Class<?>> getAllowedMethodAnnotations() { return allowedListeners; } @Override public void notifyCacheEntryCreated( Object key, Object value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (!cacheEntryCreatedListeners.isEmpty()) { boolean originLocal = ctx.isOriginLocal(); EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_CREATED); e.setOriginLocal(originLocal); // Added capability to set cache entry created value in order // to avoid breaking behaviour of CacheEntryModifiedEvent.getValue() // when isPre=false. e.setValue(value); e.setPre(pre); e.setKey(key); setTx(ctx, e); for (ListenerInvocation listener : cacheEntryCreatedListeners) listener.invoke(e); } } @Override public void notifyCacheEntryModified( Object key, Object value, boolean created, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (!cacheEntryModifiedListeners.isEmpty()) { boolean originLocal = ctx.isOriginLocal(); EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_MODIFIED); e.setOriginLocal(originLocal); e.setValue(value); e.setPre(pre); e.setKey(key); // Even if CacheEntryCreatedEvent.getValue() has been added, to // avoid breaking old behaviour and make it easy to comply with // JSR-107 specification TCK, it's necessary to find out whether a // modification is the result of a cache entry being created or not. // This is needed because on JSR-107, a modification is only fired // when the entry is updated, and only one event is fired, so you // want to fire it when isPre=false. e.setCreated(created); setTx(ctx, e); for (ListenerInvocation listener : cacheEntryModifiedListeners) listener.invoke(e); } } @Override public void notifyCacheEntryRemoved( Object key, Object value, Object oldValue, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryRemovedListeners)) { boolean originLocal = ctx.isOriginLocal(); EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_REMOVED); e.setOriginLocal(originLocal); e.setValue(value); e.setOldValue(oldValue); e.setPre(pre); e.setKey(key); setTx(ctx, e); for (ListenerInvocation listener : cacheEntryRemovedListeners) listener.invoke(e); } } @Override public void notifyCacheEntryVisited( Object key, Object value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryVisitedListeners)) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_VISITED); e.setPre(pre); e.setKey(key); e.setValue(value); setTx(ctx, e); for (ListenerInvocation listener : cacheEntryVisitedListeners) listener.invoke(e); } } @Override public void notifyCacheEntriesEvicted( Collection<InternalCacheEntry> entries, InvocationContext ctx, FlagAffectedCommand command) { if (!entries.isEmpty()) { if (isNotificationAllowed(command, cacheEntriesEvictedListeners)) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED); Map<Object, Object> evictedKeysAndValues = transformCollectionToMap( entries, new InfinispanCollections.MapMakerFunction<Object, Object, InternalCacheEntry>() { @Override public Map.Entry<Object, Object> transform(final InternalCacheEntry input) { return new Map.Entry<Object, Object>() { @Override public Object getKey() { return input.getKey(); } @Override public Object getValue() { return input.getValue(); } @Override public Object setValue(Object value) { throw new UnsupportedOperationException(); } }; } }); e.setEntries(evictedKeysAndValues); for (ListenerInvocation listener : cacheEntriesEvictedListeners) listener.invoke(e); } // For backward compat if (isNotificationAllowed(command, cacheEntryEvictedListeners)) { for (InternalCacheEntry ice : entries) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED); e.setKey(ice.getKey()); e.setValue(ice.getValue()); for (ListenerInvocation listener : cacheEntryEvictedListeners) listener.invoke(e); } } } } @Override public void notifyCacheEntryEvicted( Object key, Object value, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntriesEvictedListeners)) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED); e.setEntries(Collections.singletonMap(key, value)); for (ListenerInvocation listener : cacheEntriesEvictedListeners) listener.invoke(e); } // For backward compat if (isNotificationAllowed(command, cacheEntryEvictedListeners)) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_EVICTED); e.setKey(key); e.setValue(value); for (ListenerInvocation listener : cacheEntryEvictedListeners) listener.invoke(e); } } @Override public void notifyCacheEntryInvalidated( final Object key, Object value, final boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryInvalidatedListeners)) { final boolean originLocal = ctx.isOriginLocal(); EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_INVALIDATED); e.setOriginLocal(originLocal); e.setPre(pre); e.setKey(key); e.setValue(value); setTx(ctx, e); for (ListenerInvocation listener : cacheEntryInvalidatedListeners) listener.invoke(e); } } @Override public void notifyCacheEntryLoaded( Object key, Object value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryLoadedListeners)) { boolean originLocal = ctx.isOriginLocal(); EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_LOADED); e.setOriginLocal(originLocal); e.setPre(pre); e.setKey(key); e.setValue(value); setTx(ctx, e); for (ListenerInvocation listener : cacheEntryLoadedListeners) listener.invoke(e); } } @Override public void notifyCacheEntryActivated( Object key, Object value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryActivatedListeners)) { boolean originLocal = ctx.isOriginLocal(); EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_ACTIVATED); e.setOriginLocal(originLocal); e.setPre(pre); e.setKey(key); e.setValue(value); setTx(ctx, e); for (ListenerInvocation listener : cacheEntryActivatedListeners) listener.invoke(e); } } private void setTx(InvocationContext ctx, EventImpl<Object, Object> e) { if (ctx != null && ctx.isInTxScope()) { GlobalTransaction tx = ((TxInvocationContext) ctx).getGlobalTransaction(); e.setTransactionId(tx); } } @Override public void notifyCacheEntryPassivated( Object key, Object value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryPassivatedListeners)) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, CACHE_ENTRY_PASSIVATED); e.setPre(pre); e.setKey(key); e.setValue(value); for (ListenerInvocation listener : cacheEntryPassivatedListeners) listener.invoke(e); } } @Override public void notifyTransactionCompleted( GlobalTransaction transaction, boolean successful, InvocationContext ctx) { if (!transactionCompletedListeners.isEmpty()) { boolean isOriginLocal = ctx.isOriginLocal(); EventImpl<Object, Object> e = EventImpl.createEvent(cache, TRANSACTION_COMPLETED); e.setOriginLocal(isOriginLocal); e.setTransactionId(transaction); e.setTransactionSuccessful(successful); for (ListenerInvocation listener : transactionCompletedListeners) listener.invoke(e); } } @Override public void notifyTransactionRegistered( GlobalTransaction globalTransaction, InvocationContext ctx) { if (!transactionRegisteredListeners.isEmpty()) { boolean isOriginLocal = ctx.isOriginLocal(); EventImpl<Object, Object> e = EventImpl.createEvent(cache, TRANSACTION_REGISTERED); e.setOriginLocal(isOriginLocal); e.setTransactionId(globalTransaction); for (ListenerInvocation listener : transactionRegisteredListeners) listener.invoke(e); } } @Override public void notifyDataRehashed( ConsistentHash oldCH, ConsistentHash newCH, int newTopologyId, boolean pre) { if (!dataRehashedListeners.isEmpty()) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, DATA_REHASHED); e.setPre(pre); e.setConsistentHashAtStart(oldCH); e.setConsistentHashAtEnd(newCH); e.setNewTopologyId(newTopologyId); for (ListenerInvocation listener : dataRehashedListeners) listener.invoke(e); } } @Override public void notifyTopologyChanged( ConsistentHash oldConsistentHash, ConsistentHash newConsistentHash, int newTopologyId, boolean pre) { if (!topologyChangedListeners.isEmpty()) { EventImpl<Object, Object> e = EventImpl.createEvent(cache, TOPOLOGY_CHANGED); e.setPre(pre); e.setConsistentHashAtStart(oldConsistentHash); e.setConsistentHashAtEnd(newConsistentHash); e.setNewTopologyId(newTopologyId); for (ListenerInvocation listener : topologyChangedListeners) listener.invoke(e); } } public boolean isNotificationAllowed( FlagAffectedCommand cmd, List<ListenerInvocation> listeners) { return (cmd == null || !cmd.hasFlag(Flag.SKIP_LISTENER_NOTIFICATION)) && !listeners.isEmpty(); } }
/** * Contains all the logic of manipulating the table, including creating it if needed and access * operations like inserting, selecting etc. Used by JDBC based cache persistence. * * @author [email protected] */ public class TableManipulation implements Cloneable { private static final Log log = LogFactory.getLog(TableManipulation.class, Log.class); private static final boolean trace = log.isTraceEnabled(); public static final int DEFAULT_FETCH_SIZE = 100; public static final int DEFAULT_BATCH_SIZE = 128; private String identifierQuoteString; private String cacheName; TableManipulationConfiguration config; /* * following two params manage creation and destruction during start up/shutdown. */ private ConnectionFactory connectionFactory; /* Cache the sql for managing data */ private String insertRowSql; private String updateRowSql; private String selectRowSql; private String selectIdRowSql; private String deleteRowSql; private String loadAllRowsSql; private String countRowsSql; private String loadAllNonExpiredRowsSql; private String deleteAllRows; private String selectExpiredRowsSql; private String deleteExpiredRowsSql; private String loadSomeRowsSql; private DatabaseType databaseType; private String loadAllKeysBinarySql; private String loadAllKeysStringSql; private TableName tableName; public TableManipulation(TableManipulationConfiguration config, DatabaseType databaseType) { this.config = config; this.databaseType = databaseType; } public TableManipulation() {} public boolean tableExists(Connection connection, TableName tableName) throws PersistenceException { if (tableName == null) { throw new NullPointerException("table name is mandatory"); } ResultSet rs = null; try { // we need to make sure, that (even if the user has extended permissions) only the tables in // current schema are checked // explicit set of the schema to the current user one to make sure only tables of the current // users are requested DatabaseMetaData metaData = connection.getMetaData(); String schemaPattern = tableName.getSchema(); if (schemaPattern == null) { switch (getDialect()) { case ORACLE: schemaPattern = metaData.getUserName(); break; default: } } rs = metaData.getTables(null, schemaPattern, tableName.getName(), new String[] {"TABLE"}); return rs.next(); } catch (SQLException e) { if (trace) log.tracef(e, "SQLException occurs while checking the table %s", tableName); return false; } finally { JdbcUtil.safeClose(rs); } } public void createTable(Connection conn) throws PersistenceException { // removed CONSTRAINT clause as this causes problems with some databases, like Informix. assertMandatoryElementsPresent(); String createTableDdl = "CREATE TABLE " + getTableName() + "(" + config.idColumnName() + " " + config.idColumnType() + " NOT NULL, " + config.dataColumnName() + " " + config.dataColumnType() + ", " + config.timestampColumnName() + " " + config.timestampColumnType() + ", PRIMARY KEY (" + config.idColumnName() + "))"; if (trace) { log.tracef("Creating table with following DDL: '%s'.", createTableDdl); } executeUpdateSql(conn, createTableDdl); } private void assertMandatoryElementsPresent() throws PersistenceException { assertNotNull(cacheName, "cacheName needed in order to create table"); } private void assertNotNull(String keyColumnType, String message) throws PersistenceException { if (keyColumnType == null || keyColumnType.trim().length() == 0) { throw new PersistenceException(message); } } private void executeUpdateSql(Connection conn, String sql) throws PersistenceException { Statement statement = null; try { statement = conn.createStatement(); statement.executeUpdate(sql); } catch (SQLException e) { log.errorCreatingTable(sql, e); throw new PersistenceException(e); } finally { JdbcUtil.safeClose(statement); } } public void dropTable(Connection conn) throws PersistenceException { String dropTableDdl = "DROP TABLE " + getTableName(); String clearTable = "DELETE FROM " + getTableName(); executeUpdateSql(conn, clearTable); if (trace) { log.tracef("Dropping table with following DDL '%s'", dropTableDdl); } executeUpdateSql(conn, dropTableDdl); } public void start(ConnectionFactory connectionFactory) throws PersistenceException { this.connectionFactory = connectionFactory; if (config.createOnStart()) { Connection conn = null; try { conn = this.connectionFactory.getConnection(); if (!tableExists(conn, getTableName())) { createTable(conn); } } finally { this.connectionFactory.releaseConnection(conn); } } } public void stop() throws PersistenceException { if (config.dropOnExit()) { Connection conn = null; try { conn = connectionFactory.getConnection(); dropTable(conn); } finally { connectionFactory.releaseConnection(conn); } } } public String getInsertRowSql() { if (insertRowSql == null) { insertRowSql = "INSERT INTO " + getTableName() + " (" + config.dataColumnName() + ", " + config.timestampColumnName() + ", " + config.idColumnName() + ") VALUES(?,?,?)"; } return insertRowSql; } public String getUpdateRowSql() { if (updateRowSql == null) { switch (getDialect()) { case SYBASE: updateRowSql = "UPDATE " + getTableName() + " SET " + config.dataColumnName() + " = ? , " + config.timestampColumnName() + "=? WHERE " + config.idColumnName() + " = convert(" + config.idColumnType() + "," + "?)"; break; case POSTGRES: updateRowSql = "UPDATE " + getTableName() + " SET " + config.dataColumnName() + " = ? , " + config.timestampColumnName() + "=? WHERE " + config.idColumnName() + " = cast(? as " + config.idColumnType() + ")"; break; default: updateRowSql = "UPDATE " + getTableName() + " SET " + config.dataColumnName() + " = ? , " + config.timestampColumnName() + "=? WHERE " + config.idColumnName() + " = ?"; break; } } return updateRowSql; } public String getSelectRowSql() { if (selectRowSql == null) { switch (getDialect()) { case SYBASE: selectRowSql = "SELECT " + config.idColumnName() + ", " + config.dataColumnName() + " FROM " + getTableName() + " WHERE " + config.idColumnName() + " = convert(" + config.idColumnType() + "," + "?)"; break; case POSTGRES: selectRowSql = "SELECT " + config.idColumnName() + ", " + config.dataColumnName() + " FROM " + getTableName() + " WHERE " + config.idColumnName() + " = cast(? as " + config.idColumnType() + ")"; break; default: selectRowSql = "SELECT " + config.idColumnName() + ", " + config.dataColumnName() + " FROM " + getTableName() + " WHERE " + config.idColumnName() + " = ?"; break; } } return selectRowSql; } public String getSelectIdRowSql() { if (selectIdRowSql == null) { switch (getDialect()) { case SYBASE: selectIdRowSql = "SELECT " + config.idColumnName() + " FROM " + getTableName() + " WHERE " + config.idColumnName() + " = convert(" + config.idColumnType() + "," + "?)"; break; case POSTGRES: selectIdRowSql = "SELECT " + config.idColumnName() + " FROM " + getTableName() + " WHERE " + config.idColumnName() + " = cast(? as " + config.idColumnType() + ")"; break; default: selectIdRowSql = "SELECT " + config.idColumnName() + " FROM " + getTableName() + " WHERE " + config.idColumnName() + " = ?"; break; } } return selectIdRowSql; } public String getCountRowsSql() { if (countRowsSql == null) { countRowsSql = "SELECT COUNT(*) FROM " + getTableName(); } return countRowsSql; } public String getDeleteRowSql() { if (deleteRowSql == null) { switch (getDialect()) { case SYBASE: deleteRowSql = "DELETE FROM " + getTableName() + " WHERE " + config.idColumnName() + " = convert(" + config.idColumnType() + "," + "?)"; break; case POSTGRES: deleteRowSql = "DELETE FROM " + getTableName() + " WHERE " + config.idColumnName() + " = cast(? as " + config.idColumnType() + ")"; break; default: deleteRowSql = "DELETE FROM " + getTableName() + " WHERE " + config.idColumnName() + " = ?"; break; } } return deleteRowSql; } public String getLoadNonExpiredAllRowsSql() { if (loadAllNonExpiredRowsSql == null) { loadAllNonExpiredRowsSql = "SELECT " + config.dataColumnName() + "," + config.idColumnName() + ", " + config.timestampColumnName() + " FROM " + getTableName() + " WHERE " + config.timestampColumnName() + " > ? OR " + config.timestampColumnName() + " < 0"; } return loadAllNonExpiredRowsSql; } public String getLoadAllRowsSql() { if (loadAllRowsSql == null) { loadAllRowsSql = "SELECT " + config.dataColumnName() + "," + config.idColumnName() + " FROM " + getTableName(); } return loadAllRowsSql; } public String getDeleteAllRowsSql() { if (deleteAllRows == null) { deleteAllRows = "DELETE FROM " + getTableName(); } return deleteAllRows; } public String getSelectExpiredRowsSql() { if (selectExpiredRowsSql == null) { selectExpiredRowsSql = getLoadAllRowsSql() + " WHERE " + config.timestampColumnName() + "< ?"; } return selectExpiredRowsSql; } public String getDeleteExpiredRowsSql() { if (deleteExpiredRowsSql == null) { deleteExpiredRowsSql = "DELETE FROM " + getTableName() + " WHERE " + config.timestampColumnName() + "< ? AND " + config.timestampColumnName() + "> 0"; } return deleteExpiredRowsSql; } @Override public TableManipulation clone() { try { return (TableManipulation) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException(e); } } public TableName getTableName() { if (tableName == null) { tableName = new TableName(getIdentifierQuoteString(), config.tableNamePrefix(), cacheName); } return tableName; } public boolean tableExists(Connection connection) throws PersistenceException { return tableExists(connection, getTableName()); } public void setCacheName(String cacheName) { this.cacheName = cacheName; tableName = null; } public boolean isVariableLimitSupported() { DatabaseType type = getDialect(); return !(type == DatabaseType.DB2 || type == DatabaseType.DB2_390 || type == DatabaseType.SYBASE); } public String getLoadSomeRowsSql() { if (loadSomeRowsSql == null) { // this stuff is going to be database specific!! // see // http://stackoverflow.com/questions/595123/is-there-an-ansi-sql-alternative-to-the-mysql-limit-keyword switch (getDialect()) { case ORACLE: loadSomeRowsSql = String.format( "SELECT %s, %s FROM (SELECT %s, %s FROM %s) WHERE ROWNUM <= ?", config.dataColumnName(), config.idColumnName(), config.dataColumnName(), config.idColumnName(), getTableName()); break; case DB2: case DB2_390: case DERBY: loadSomeRowsSql = String.format( "SELECT %s, %s FROM %s FETCH FIRST ? ROWS ONLY", config.dataColumnName(), config.idColumnName(), getTableName()); break; case INFORMIX: case INTERBASE: case FIREBIRD: loadSomeRowsSql = String.format( "SELECT FIRST ? %s, %s FROM %s", config.dataColumnName(), config.idColumnName(), getTableName()); break; case SQL_SERVER: loadSomeRowsSql = String.format( "SELECT TOP (?) %s, %s FROM %s", config.dataColumnName(), config.idColumnName(), getTableName()); break; case ACCESS: case HSQL: case SYBASE: loadSomeRowsSql = String.format( "SELECT TOP ? %s, %s FROM %s", config.dataColumnName(), config.idColumnName(), getTableName()); break; default: // the MySQL-style LIMIT clause (works for PostgreSQL too) loadSomeRowsSql = String.format( "SELECT %s, %s FROM %s LIMIT ?", config.dataColumnName(), config.idColumnName(), getTableName()); break; } } return loadSomeRowsSql; } public String getLoadAllKeysBinarySql() { if (loadAllKeysBinarySql == null) { loadAllKeysBinarySql = String.format("SELECT %s FROM %s", config.dataColumnName(), getTableName()); } return loadAllKeysBinarySql; } public String getLoadAllKeysStringSql() { if (loadAllKeysStringSql == null) { loadAllKeysStringSql = String.format("SELECT %s FROM %s", config.idColumnName(), getTableName()); } return loadAllKeysStringSql; } /** * For DB queries the fetch size will be set on {@link java.sql.ResultSet#setFetchSize(int)}. This * is optional parameter, if not specified will be defaulted to {@link #DEFAULT_FETCH_SIZE}. */ public int getFetchSize() { return getDialect() == DatabaseType.MYSQL ? Integer.MIN_VALUE : config.fetchSize(); } /** * When doing repetitive DB inserts this will be batched according to this parameter. This is an * optional parameter, and if it is not specified it will be defaulted to {@link * #DEFAULT_BATCH_SIZE}. Guaranteed to be a power of two. */ public int getBatchSize() { return config.batchSize(); } private DatabaseType getDialect() { if (databaseType == null) { // need to guess from the database type! Connection connection = null; try { connection = connectionFactory.getConnection(); String dbProduct = connection.getMetaData().getDatabaseProductName(); databaseType = guessDialect(dbProduct); } catch (Exception e) { log.debug("Unable to guess dialect from JDBC metadata.", e); } finally { connectionFactory.releaseConnection(connection); } if (databaseType == null) { log.debug( "Unable to detect database dialect using connection metadata. Attempting to guess on driver name."); try { connection = connectionFactory.getConnection(); String dbProduct = connectionFactory.getConnection().getMetaData().getDriverName(); databaseType = guessDialect(dbProduct); } catch (Exception e) { log.debug("Unable to guess database dialect from JDBC driver name.", e); } finally { connectionFactory.releaseConnection(connection); } } if (databaseType == null) { throw new CacheConfigurationException( "Unable to detect database dialect from JDBC driver name or connection metadata. Please provide this manually using the 'dialect' property in your configuration. Supported database dialect strings are " + Arrays.toString(DatabaseType.values())); } else { log.debugf( "Guessing database dialect as '%s'. If this is incorrect, please specify the correct dialect using the 'dialect' attribute in your configuration. Supported database dialect strings are %s", databaseType, Arrays.toString(DatabaseType.values())); } } return databaseType; } private DatabaseType guessDialect(String name) { DatabaseType type = null; if (name != null) { if (name.toLowerCase().contains("mysql")) { type = DatabaseType.MYSQL; } else if (name.toLowerCase().contains("postgres")) { type = DatabaseType.POSTGRES; } else if (name.toLowerCase().contains("derby")) { type = DatabaseType.DERBY; } else if (name.toLowerCase().contains("hsql") || name.toLowerCase().contains("hypersonic")) { type = DatabaseType.HSQL; } else if (name.toLowerCase().contains("h2")) { type = DatabaseType.H2; } else if (name.toLowerCase().contains("sqlite")) { type = DatabaseType.SQLITE; } else if (name.toLowerCase().contains("db2")) { type = DatabaseType.DB2; } else if (name.toLowerCase().contains("informix")) { type = DatabaseType.INFORMIX; } else if (name.toLowerCase().contains("interbase")) { type = DatabaseType.INTERBASE; } else if (name.toLowerCase().contains("firebird")) { type = DatabaseType.FIREBIRD; } else if (name.toLowerCase().contains("sqlserver") || name.toLowerCase().contains("microsoft")) { type = DatabaseType.SQL_SERVER; } else if (name.toLowerCase().contains("access")) { type = DatabaseType.ACCESS; } else if (name.toLowerCase().contains("oracle")) { type = DatabaseType.ORACLE; } else if (name.toLowerCase().contains("adaptive")) { type = DatabaseType.SYBASE; } } return type; } public String getIdentifierQuoteString() { if (identifierQuoteString == null) { switch (getDialect()) { case MYSQL: identifierQuoteString = "`"; break; default: identifierQuoteString = "\""; break; } } return identifierQuoteString; } }
/** * Repository for {@link RemoteTransaction} and {@link * org.infinispan.transaction.xa.TransactionXaAdapter}s (locally originated transactions). * * @author [email protected] * @author Galder Zamarreño * @since 4.0 */ @Listener(sync = false) public class TransactionTable { public static final int CACHE_STOPPED_VIEW_ID = -1; private static final Log log = LogFactory.getLog(TransactionTable.class); private ConcurrentMap<Transaction, LocalTransaction> localTransactions; private ConcurrentMap<GlobalTransaction, RemoteTransaction> remoteTransactions; private final StaleTransactionCleanupService cleanupService = new StaleTransactionCleanupService(this); protected Configuration configuration; protected InvocationContextContainer icc; protected TransactionCoordinator txCoordinator; protected TransactionFactory txFactory; protected RpcManager rpcManager; protected CommandsFactory commandsFactory; private InterceptorChain invoker; private CacheNotifier notifier; private EmbeddedCacheManager cm; private TransactionSynchronizationRegistry transactionSynchronizationRegistry; protected ClusteringDependentLogic clusteringLogic; protected boolean clustered = false; private Lock minViewRecalculationLock; /** * minTxViewId is the minimum view ID across all ongoing local and remote transactions. It doesn't * update on transaction creation, but only on removal. That's because it is not possible for a * newly created transaction to have an bigger view ID than the current one. */ private volatile int minTxViewId = CACHE_STOPPED_VIEW_ID; private volatile int currentViewId = CACHE_STOPPED_VIEW_ID; private String cacheName; @Inject public void initialize( RpcManager rpcManager, Configuration configuration, InvocationContextContainer icc, InterceptorChain invoker, CacheNotifier notifier, TransactionFactory gtf, EmbeddedCacheManager cm, TransactionCoordinator txCoordinator, TransactionSynchronizationRegistry transactionSynchronizationRegistry, CommandsFactory commandsFactory, ClusteringDependentLogic clusteringDependentLogic, Cache cache) { this.rpcManager = rpcManager; this.configuration = configuration; this.icc = icc; this.invoker = invoker; this.notifier = notifier; this.txFactory = gtf; this.cm = cm; this.txCoordinator = txCoordinator; this.transactionSynchronizationRegistry = transactionSynchronizationRegistry; this.commandsFactory = commandsFactory; this.clusteringLogic = clusteringDependentLogic; this.cacheName = cache.getName(); } @Start private void start() { final int concurrencyLevel = configuration.locking().concurrencyLevel(); localTransactions = ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel); if (configuration.clustering().cacheMode().isClustered()) { minViewRecalculationLock = new ReentrantLock(); // Only initialize this if we are clustered. remoteTransactions = ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel); cleanupService.start(cacheName, rpcManager, invoker); cm.addListener(cleanupService); cm.addListener(this); notifier.addListener(cleanupService); minTxViewId = rpcManager.getTransport().getViewId(); currentViewId = minTxViewId; log.debugf("Min view id set to %s", minTxViewId); clustered = true; } } @Stop private void stop() { if (clustered) { notifier.removeListener(cleanupService); cm.removeListener(cleanupService); cleanupService.stop(); cm.removeListener(this); currentViewId = CACHE_STOPPED_VIEW_ID; // indicate that the cache has stopped } shutDownGracefully(); } public Set<Object> getLockedKeysForRemoteTransaction(GlobalTransaction gtx) { RemoteTransaction transaction = remoteTransactions.get(gtx); if (transaction == null) return emptySet(); return transaction.getLockedKeys(); } public void remoteTransactionPrepared(GlobalTransaction gtx) { // do nothing } public void localTransactionPrepared(LocalTransaction localTransaction) { // nothing, only used by recovery } public void enlist(Transaction transaction, LocalTransaction localTransaction) { if (!localTransaction.isEnlisted()) { SynchronizationAdapter sync = new SynchronizationAdapter( localTransaction, txCoordinator, commandsFactory, rpcManager, this, clusteringLogic, configuration); if (transactionSynchronizationRegistry != null) { try { transactionSynchronizationRegistry.registerInterposedSynchronization(sync); } catch (Exception e) { log.failedSynchronizationRegistration(e); throw new CacheException(e); } } else { try { transaction.registerSynchronization(sync); } catch (Exception e) { log.failedSynchronizationRegistration(e); throw new CacheException(e); } } ((SyncLocalTransaction) localTransaction).setEnlisted(true); } } public void failureCompletingTransaction(Transaction tx) { final LocalTransaction localTransaction = localTransactions.get(tx); if (localTransaction != null) { removeLocalTransaction(localTransaction); } } /** * Returns true if the given transaction is already registered with the transaction table. * * @param tx if null false is returned */ public boolean containsLocalTx(Transaction tx) { return tx != null && localTransactions.containsKey(tx); } public int getMinViewId() { return minTxViewId; } protected void updateStateOnNodesLeaving(Collection<Address> leavers) { Set<GlobalTransaction> toKill = new HashSet<GlobalTransaction>(); for (GlobalTransaction gt : remoteTransactions.keySet()) { if (leavers.contains(gt.getAddress())) toKill.add(gt); } if (toKill.isEmpty()) log.tracef( "No global transactions pertain to originator(s) %s who have left the cluster.", leavers); else log.tracef( "%s global transactions pertain to leavers list %s and need to be killed", toKill.size(), leavers); for (GlobalTransaction gtx : toKill) { log.tracef("Killing remote transaction originating on leaver %s", gtx); RollbackCommand rc = new RollbackCommand(cacheName, gtx); rc.init(invoker, icc, TransactionTable.this); try { rc.perform(null); log.tracef("Rollback of transaction %s complete.", gtx); } catch (Throwable e) { log.unableToRollbackGlobalTx(gtx, e); } } log.trace("Completed cleaning transactions originating on leavers"); } /** * Returns the {@link RemoteTransaction} associated with the supplied transaction id. Returns null * if no such association exists. */ public RemoteTransaction getRemoteTransaction(GlobalTransaction txId) { return remoteTransactions.get(txId); } public void remoteTransactionRollback(GlobalTransaction gtx) { final RemoteTransaction remove = removeRemoteTransaction(gtx); log.tracef("Removed local transaction %s? %b", gtx, remove); } /** * Creates and register a {@link RemoteTransaction} with no modifications. Returns the created * transaction. * * @throws IllegalStateException if an attempt to create a {@link RemoteTransaction} for an * already registered id is made. */ public RemoteTransaction createRemoteTransaction( GlobalTransaction globalTx, WriteCommand[] modifications) { RemoteTransaction remoteTransaction = modifications == null ? txFactory.newRemoteTransaction(globalTx, currentViewId) : txFactory.newRemoteTransaction(modifications, globalTx, currentViewId); registerRemoteTransaction(globalTx, remoteTransaction); return remoteTransaction; } private void registerRemoteTransaction(GlobalTransaction gtx, RemoteTransaction rtx) { RemoteTransaction transaction = remoteTransactions.put(gtx, rtx); if (transaction != null) { log.remoteTxAlreadyRegistered(); throw new IllegalStateException( "A remote transaction with the given id was already registered!!!"); } log.tracef("Created and registered remote transaction %s", rtx); } /** * Returns the {@link org.infinispan.transaction.xa.TransactionXaAdapter} corresponding to the * supplied transaction. If none exists, will be created first. */ public LocalTransaction getOrCreateLocalTransaction( Transaction transaction, TxInvocationContext ctx) { LocalTransaction current = localTransactions.get(transaction); if (current == null) { Address localAddress = rpcManager != null ? rpcManager.getTransport().getAddress() : null; GlobalTransaction tx = txFactory.newGlobalTransaction(localAddress, false); current = txFactory.newLocalTransaction( transaction, tx, ctx.isImplicitTransaction(), currentViewId); log.tracef("Created a new local transaction: %s", current); localTransactions.put(transaction, current); notifier.notifyTransactionRegistered(tx, ctx); } return current; } /** * Removes the {@link org.infinispan.transaction.xa.TransactionXaAdapter} corresponding to the * given tx. Returns true if such an tx exists. */ public boolean removeLocalTransaction(LocalTransaction localTransaction) { return localTransaction != null && (removeLocalTransactionInternal(localTransaction.getTransaction()) != null); } public LocalTransaction removeLocalTransaction(Transaction tx) { return removeLocalTransactionInternal(tx); } protected final LocalTransaction removeLocalTransactionInternal(Transaction tx) { LocalTransaction removed; removed = localTransactions.remove(tx); releaseResources(removed); return removed; } private void releaseResources(CacheTransaction cacheTransaction) { if (cacheTransaction != null) { if (clustered) { recalculateMinViewIdIfNeeded(cacheTransaction); } log.tracef("Removed %s from transaction table.", cacheTransaction); cacheTransaction.notifyOnTransactionFinished(); } } /** Removes the {@link RemoteTransaction} corresponding to the given tx. */ public void remoteTransactionCommitted(GlobalTransaction gtx) { if (Configurations.isSecondPhaseAsync(configuration)) { removeRemoteTransaction(gtx); } } public final RemoteTransaction removeRemoteTransaction(GlobalTransaction txId) { RemoteTransaction removed; removed = remoteTransactions.remove(txId); releaseResources(removed); return removed; } public int getRemoteTxCount() { return remoteTransactions.size(); } public int getLocalTxCount() { return localTransactions.size(); } public LocalTransaction getLocalTransaction(Transaction tx) { return localTransactions.get(tx); } public boolean containRemoteTx(GlobalTransaction globalTransaction) { return remoteTransactions.containsKey(globalTransaction); } public Collection<RemoteTransaction> getRemoteTransactions() { return remoteTransactions.values(); } protected final LocalTransaction getLocalTx(Transaction tx) { return localTransactions.get(tx); } public final Collection<LocalTransaction> getLocalTransactions() { return localTransactions.values(); } protected final void recalculateMinViewIdIfNeeded(CacheTransaction removedTransaction) { if (removedTransaction == null) throw new IllegalArgumentException("Transaction cannot be null!"); if (currentViewId != CACHE_STOPPED_VIEW_ID) { // Assume that we only get here if we are clustered. int removedTransactionViewId = removedTransaction.getViewId(); if (removedTransactionViewId < minTxViewId) { log.tracef( "A transaction has a view ID (%s) that is smaller than the smallest transaction view ID (%s) this node knows about! This can happen if a concurrent thread recalculates the minimum view ID after the current transaction has been removed from the transaction table.", removedTransactionViewId, minTxViewId); } else if (removedTransactionViewId == minTxViewId && removedTransactionViewId < currentViewId) { // We should only need to re-calculate the minimum view ID if the transaction being // completed // has the same ID as the smallest known transaction ID, to check what the new smallest is, // and this is // not the current view ID. calculateMinViewId(removedTransactionViewId); } } } @ViewChanged public void recalculateMinViewIdOnTopologyChange(ViewChangedEvent vce) { // don't do anything if this cache is not clustered - view changes are global if (clustered) { log.debugf("View changed, recalculating minViewId"); currentViewId = vce.getViewId(); calculateMinViewId(-1); } } /** * This method calculates the minimum view ID known by the current node. This method is only used * in a clustered cache, and only invoked when either a view change is detected, or a transaction * whose view ID is not the same as the current view ID. * * <p>This method is guarded by minViewRecalculationLock to prevent concurrent updates to the * minimum view ID field. * * @param idOfRemovedTransaction the view ID associated with the transaction that triggered this * recalculation, or -1 if triggered by a view change event. */ @GuardedBy("minViewRecalculationLock") private void calculateMinViewId(int idOfRemovedTransaction) { minViewRecalculationLock.lock(); try { // We should only need to re-calculate the minimum view ID if the transaction being completed // has the same ID as the smallest known transaction ID, to check what the new smallest is. // We do this check // again here, since this is now within a synchronized method. if (idOfRemovedTransaction == -1 || (idOfRemovedTransaction == minTxViewId && idOfRemovedTransaction < currentViewId)) { int minViewIdFound = currentViewId; for (CacheTransaction ct : localTransactions.values()) { int viewId = ct.getViewId(); if (viewId < minViewIdFound) minViewIdFound = viewId; } for (CacheTransaction ct : remoteTransactions.values()) { int viewId = ct.getViewId(); if (viewId < minViewIdFound) minViewIdFound = viewId; } if (minViewIdFound > minTxViewId) { log.tracef("Changing minimum view ID from %s to %s", minTxViewId, minViewIdFound); minTxViewId = minViewIdFound; } else { log.tracef("Minimum view ID still is %s; nothing to change", minViewIdFound); } } } finally { minViewRecalculationLock.unlock(); } } private boolean areTxsOnGoing() { return !localTransactions.isEmpty() || (remoteTransactions != null && !remoteTransactions.isEmpty()); } private void shutDownGracefully() { if (log.isDebugEnabled()) log.debugf( "Wait for on-going transactions to finish for %s.", Util.prettyPrintTime( configuration.transaction().cacheStopTimeout(), TimeUnit.MILLISECONDS)); long failTime = currentMillisFromNanotime() + configuration.transaction().cacheStopTimeout(); boolean txsOnGoing = areTxsOnGoing(); while (txsOnGoing && currentMillisFromNanotime() < failTime) { try { Thread.sleep(30); txsOnGoing = areTxsOnGoing(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); if (clustered) { log.debugf( "Interrupted waiting for on-going transactions to finish. %s local transactions and %s remote transactions", localTransactions.size(), remoteTransactions.size()); } else { log.debugf( "Interrupted waiting for %s on-going transactions to finish.", localTransactions.size()); } } } if (txsOnGoing) { log.unfinishedTransactionsRemain( localTransactions == null ? 0 : localTransactions.size(), remoteTransactions == null ? 0 : remoteTransactions.size()); } else { log.debug("All transactions terminated"); } } }