private void logFileOverride() { URL propsUrl = Thread.currentThread().getContextClassLoader().getResource("c3p0.properties"); URL xmlUrl = Thread.currentThread().getContextClassLoader().getResource("c3p0-config.xml"); if (log.isInfoEnabled() && propsUrl != null) { log.info("Found 'c3p0.properties' in classpath: " + propsUrl); } if (log.isInfoEnabled() && xmlUrl != null) { log.info("Found 'c3p0-config.xml' in classpath: " + xmlUrl); } }
public void testInitialStateTransferInDifferentThread(Method m) throws Exception { testCount++; log.info(m.getName() + " start - " + testCount); CacheContainer cm1 = null, cm2 = null, cm30 = null; try { Cache<Object, Object> cache1 = null, cache2 = null, cache3 = null; cm1 = createCacheManager(tmpDirectory1); cache1 = cm1.getCache(cacheName); writeInitialData(cache1); cm2 = createCacheManager(tmpDirectory2); cache2 = cm2.getCache(cacheName); cache1.put("delay", new StateTransferFunctionalTest.DelayTransfer()); // Pause to give caches time to see each other TestingUtil.blockUntilViewsReceived(60000, cache1, cache2); verifyInitialData(cache2); final CacheContainer cm3 = createCacheManager(tmpDirectory3); cm30 = cm3; Future<Void> f1 = Executors.newSingleThreadExecutor( new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(r, "CacheStarter-Cache3"); } }) .submit( new Callable<Void>() { public Void call() throws Exception { cm3.getCache(cacheName); return null; } }); f1.get(); cache3 = cm3.getCache(cacheName); TestingUtil.blockUntilViewsReceived(120000, cache1, cache2, cache3); verifyInitialData(cache3); log.info("testConcurrentStateTransfer end - " + testCount); } finally { if (cm1 != null) cm1.stop(); if (cm2 != null) cm2.stop(); if (cm30 != null) cm30.stop(); } }
private void addEntityCheckCache(SessionFactoryImplementor sessionFactory) throws Exception { Item item = new Item("chris", "Chris's Item"); beginTx(); try { Session s = sessionFactory.openSession(); s.getTransaction().begin(); s.persist(item); s.getTransaction().commit(); s.close(); } catch (Exception e) { setRollbackOnlyTx(e); } finally { commitOrRollbackTx(); } beginTx(); try { Session s = sessionFactory.openSession(); Item found = (Item) s.load(Item.class, item.getId()); Statistics stats = sessionFactory.getStatistics(); log.info(stats.toString()); assertEquals(item.getDescription(), found.getDescription()); assertEquals(0, stats.getSecondLevelCacheMissCount()); assertEquals(1, stats.getSecondLevelCacheHitCount()); s.delete(found); s.close(); } catch (Exception e) { setRollbackOnlyTx(e); } finally { commitOrRollbackTx(); } }
private void internalCleanup() throws Exception { if (sessionFactory != null) { tm.begin(); try { Session session = sessionFactory.getCurrentSession(); Query query = session.createQuery("select account from Account as account"); List accts = query.list(); if (accts != null) { for (Iterator it = accts.iterator(); it.hasNext(); ) { try { Object acct = it.next(); log.info("Removing " + acct); session.delete(acct); } catch (Exception ignored) { } } } tm.commit(); } catch (Exception e) { tm.rollback(); throw e; } } }
protected void testRemoveServers() throws InterruptedException { log.info("** before calling stop"); caches.get(2).getCacheManager().stop(); caches.remove(2); waitForClusterToResize(); assertEquals(2, caches.size()); assertUnaffected(); }
public synchronized void onTestSkipped(ITestResult arg0) { System.out.println(getThreadId() + " Test " + getTestDesc(arg0) + " skipped."); log.info(" Test " + getTestDesc(arg0) + " skipped."); if (arg0.getThrowable() != null) log.error("Test skipped : " + arg0.getThrowable(), arg0.getThrowable()); skipped.incrementAndGet(); printStatus(); }
public void testForFailure() { int num = 1000; for (int i = 0; i < num; i++) { BigObject bigObject = createBigObject(i, "prefix"); cache.put(i, bigObject); c0.put(i, bigObject); } for (int i = 0; i < num; i++) { assert c0.get(i) instanceof BigObject; assert c1.get(i) instanceof BigObject; assert c2.get(i) instanceof BigObject; assert c3.get(i) instanceof BigObject; } log.info("Before stopping a cache!"); Thread thread = new Thread() { @Override public void run() { log.info("About to stop " + c3.getAdvancedCache().getRpcManager().getAddress()); c3.stop(); c3.getCacheManager().stop(); log.info("Cache stopped async!"); } }; thread.start(); int failureCount = 0; for (int i = 0; i < num; i++) { log.info("----Running a get on " + i); try { Object o = c0.get(i); assertValue(i, o); } catch (TimeoutException e) { log.error("Exception received", e); failureCount++; } try { assertValue(i, c1.get(i)); } catch (TimeoutException e) { failureCount++; } try { assertValue(i, c2.get(i)); } catch (TimeoutException e) { failureCount++; } if (i % 100 == 0) System.out.println("i = " + i); } log.info("failureCount = " + failureCount); log.info("Before stopping cache managers!"); TestingUtil.killCacheManagers(manager(2)); log.info("2 killed"); TestingUtil.killCacheManagers(manager(1)); log.info("1 killed"); TestingUtil.killCacheManagers(manager(0)); log.info("0 killed"); }
public void testNonSerializable() throws Exception { try { marshaller.objectToByteBuffer(new Object()); } catch (NotSerializableException e) { log.info("Log exception for output format verification", e); TraceInformation inf = (TraceInformation) e.getCause(); assert inf.toString().contains("in object java.lang.Object@"); } }
public void testStoreAndRetrieve() throws CacheLoaderException { assertNotInCacheAndStore("k1", "k2", "k3", "k4", "k5", "k6", "k7"); cache.put("k1", "v1"); cache.put("k2", "v2", lifespan, MILLISECONDS); cache.putAll(Collections.singletonMap("k3", "v3")); cache.putAll(Collections.singletonMap("k4", "v4"), lifespan, MILLISECONDS); cache.putIfAbsent("k5", "v5"); cache.putIfAbsent("k6", "v6", lifespan, MILLISECONDS); cache.putIfAbsent("k5", "v5-SHOULD-NOT-PUT"); cache.putIfAbsent("k6", "v6-SHOULD-NOT-PUT", lifespan, MILLISECONDS); cache.putForExternalRead("k7", "v7"); cache.putForExternalRead("k7", "v7-SHOULD-NOT-PUT"); for (int i = 1; i < 8; i++) { // even numbers have lifespans if (i % 2 == 1) assertInCacheAndStore("k" + i, "v" + i); else assertInCacheAndStore("k" + i, "v" + i, lifespan); } assert !cache.remove("k1", "some rubbish"); for (int i = 1; i < 8; i++) { // even numbers have lifespans if (i % 2 == 1) assertInCacheAndStore("k" + i, "v" + i); else assertInCacheAndStore("k" + i, "v" + i, lifespan); } log.info("cache.get(\"k1\") = " + cache.get("k1")); assert cache.remove("k1", "v1"); log.info("cache.get(\"k1\") = " + cache.get("k1")); assert cache.remove("k2").equals("v2"); assertNotInCacheAndStore("k1", "k2"); for (int i = 3; i < 8; i++) { // even numbers have lifespans if (i % 2 == 1) assertInCacheAndStore("k" + i, "v" + i); else assertInCacheAndStore("k" + i, "v" + i, lifespan); } cache.clear(); assertNotInCacheAndStore("k1", "k2", "k3", "k4", "k5", "k6", "k7"); }
@Override public void stop() { try { emfRegistry.closeEntityManagerFactory(configuration.persistenceUnitName()); } catch (Exception e) { throw new JpaStoreException("Exceptions occurred while stopping store", e); } finally { log.info("JPA Store stopped, stats: " + stats); } }
public void testNestedNonSerializable() throws Exception { PutKeyValueCommand cmd = new PutKeyValueCommand("k", new Object(), false, null, 0, 0, Collections.<Flag>emptySet()); try { marshaller.objectToByteBuffer(cmd); } catch (NotSerializableException e) { log.info("Log exception for output format verification", e); TraceInformation inf = (TraceInformation) e.getCause(); assert inf.toString().contains("in object java.lang.Object@"); assert inf.toString().contains("in object org.infinispan.commands.write.PutKeyValueCommand@"); } }
@Override public <K, V> Cache<K, V> getCache(String cacheName) { log.info("Retrieve cache from hanging cache manager"); // TODO: Hacky but it's the easiest thing to do - consider ByteMan // ByteMan apparently supports testng since 1.5.1 but no clear // example out there, with more time it should be considered. String threadName = Thread.currentThread().getName(); if (threadName.startsWith("HotRod")) { log.info("Thread is a HotRod server worker thread, so force wait"); try { // Wait a max of 3 minutes, otherwise socket timeout's not working latch.await(180, TimeUnit.SECONDS); log.info("Wait finished, return the cache"); return super.getCache(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new CacheException(e); } } return super.getCache(cacheName); }
public void testErrorUnmarshalling() throws Exception { Pojo pojo = new PojoWhichFailsOnUnmarshalling(); byte[] bytes = marshaller.objectToByteBuffer(pojo); try { marshaller.objectFromByteBuffer(bytes); } catch (IOException e) { log.info("Log exception for output format verification", e); TraceInformation inf = (TraceInformation) e.getCause(); assert inf.toString() .contains( "in object of type org.infinispan.marshall.VersionAwareMarshallerTest$PojoWhichFailsOnUnmarshalling"); } }
@Override public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable { // First execute the operation itself Object ret = super.visitPutKeyValueCommand(ctx, command); assertKeySet = (cache.keySet().size() == 1); // After entry has been committed to the container log.info("Cache entry created, now check in different thread"); latch.countDown(); // Force a bit of delay in the listener TestingUtil.sleepThread(3000); return ret; }
public void testInitialStateTransfer() throws Exception { testCount++; log.info("testInitialStateTransfer start - " + testCount); CacheContainer cm1 = null, cm2 = null; try { Cache<Object, Object> cache1, cache2; cm1 = createCacheManager(tmpDirectory1); cache1 = cm1.getCache(cacheName); writeInitialData(cache1); cm2 = createCacheManager(tmpDirectory2); cache2 = cm2.getCache(cacheName); // Pause to give caches time to see each other TestingUtil.blockUntilViewsReceived(60000, cache1, cache2); verifyInitialData(cache2); log.info("testInitialStateTransfer end - " + testCount); } finally { if (cm1 != null) cm1.stop(); if (cm2 != null) cm2.stop(); } }
@Override protected void createCacheManagers() throws Throwable { Configuration config = getDefaultClusteredConfig(Configuration.CacheMode.DIST_SYNC); config.setL1CacheEnabled(false); config.setNumOwners(3); config.setUseLockStriping(false); createCluster(config, 4); c0 = cache(0); c1 = cache(1); c2 = cache(2); c3 = cache(3); waitForClusterToForm(); log.info("Rehash is complete!"); cache = new HashMap<Integer, BigObject>(); }
protected void testShutdownOwnManager() { log.info("**** here it starts"); caches.get(0).getCacheManager().stop(); caches.remove(0); assertEquals(1, caches.size()); TestingUtil.blockUntilViewsReceived(10000, false, caches.toArray(new Cache[0])); assertEquals(1, topology().size()); eventually( new Condition() { @Override public boolean isSatisfied() throws Exception { return !keyAffinityService.isStarted(); } }); }
@Test public void testInsertClearCacheDeleteEntity() throws Exception { final Statistics stats = sessionFactory().getStatistics(); stats.clear(); final Item item = new Item("chris", "Chris's Item"); withTx( tm, new Callable<Void>() { @Override public Void call() throws Exception { Session s = openSession(); s.getTransaction().begin(); s.persist(item); s.getTransaction().commit(); assertEquals(0, stats.getSecondLevelCacheMissCount()); assertEquals(0, stats.getSecondLevelCacheHitCount()); assertEquals(1, stats.getSecondLevelCachePutCount()); s.close(); return null; } }); log.info("Entry persisted, let's load and delete it."); cleanupCache(); withTx( tm, new Callable<Void>() { @Override public Void call() throws Exception { Session s = openSession(); s.getTransaction().begin(); Item found = (Item) s.load(Item.class, item.getId()); log.info(stats.toString()); assertEquals(item.getDescription(), found.getDescription()); assertEquals(1, stats.getSecondLevelCacheMissCount()); assertEquals(0, stats.getSecondLevelCacheHitCount()); assertEquals(2, stats.getSecondLevelCachePutCount()); s.delete(found); s.getTransaction().commit(); s.close(); return null; } }); }
public void testReplace() throws Exception { cache(0).put("myKey", "myValue"); // add an interceptor on second node that will block REPLACE commands right after // EntryWrappingInterceptor until we are ready final CountDownLatch replaceStartedLatch = new CountDownLatch(1); final CountDownLatch replaceProceedLatch = new CountDownLatch(1); boolean isVersioningEnabled = cache(0).getCacheConfiguration().versioning().enabled(); cacheConfigBuilder .customInterceptors() .addInterceptor() .after( isVersioningEnabled ? VersionedEntryWrappingInterceptor.class : EntryWrappingInterceptor.class) .interceptor( new CommandInterceptor() { @Override protected Object handleDefault(InvocationContext ctx, VisitableCommand cmd) throws Throwable { if (cmd instanceof ReplaceCommand) { // signal we encounter a REPLACE replaceStartedLatch.countDown(); // wait until it is ok to continue with REPLACE if (!replaceProceedLatch.await(15, TimeUnit.SECONDS)) { throw new TimeoutException(); } } return super.handleDefault(ctx, cmd); } }); // do not allow coordinator to send topology updates to node B final ClusterTopologyManager ctm0 = TestingUtil.extractGlobalComponent(manager(0), ClusterTopologyManager.class); ctm0.setRebalancingEnabled(false); log.info("Adding a new node .."); addClusterEnabledCacheManager(cacheConfigBuilder); log.info("Added a new node"); // node B is not a member yet and rebalance has not started yet CacheTopology cacheTopology = advancedCache(1).getComponentRegistry().getStateTransferManager().getCacheTopology(); assertNull(cacheTopology.getPendingCH()); assertTrue(cacheTopology.getMembers().contains(address(0))); assertFalse(cacheTopology.getMembers().contains(address(1))); assertFalse(cacheTopology.getCurrentCH().getMembers().contains(address(1))); // no keys should be present on node B yet because state transfer is blocked assertTrue(cache(1).keySet().isEmpty()); // initiate a REPLACE Future<Object> getFuture = fork( new Callable<Object>() { @Override public Object call() throws Exception { try { return cache(1).replace("myKey", "newValue"); } catch (Exception e) { log.errorf(e, "REPLACE failed: %s", e.getMessage()); throw e; } } }); // wait for REPLACE command on node B to reach beyond *EntryWrappingInterceptor, where it will // block. // the value seen so far is null if (!replaceStartedLatch.await(15, TimeUnit.SECONDS)) { throw new TimeoutException(); } // paranoia, yes the value is still missing from data container assertTrue(cache(1).keySet().isEmpty()); // allow rebalance to start ctm0.setRebalancingEnabled(true); // wait for state transfer to end TestingUtil.waitForRehashToComplete(cache(0), cache(1)); // the state should be already transferred now assertEquals(1, cache(1).keySet().size()); // allow REPLACE to continue replaceProceedLatch.countDown(); Object oldVal = getFuture.get(15, TimeUnit.SECONDS); assertNotNull(oldVal); assertEquals("myValue", oldVal); assertEquals("newValue", cache(0).get("myKey")); assertEquals("newValue", cache(1).get("myKey")); }
private void info(String str) { log.info(" [" + c + "] " + str); }
public void testGet() throws Exception { cache(0).put("myKey", "myValue"); // add an interceptor on node B that will block state transfer until we are ready final CountDownLatch applyStateProceedLatch = new CountDownLatch(1); final CountDownLatch applyStateStartedLatch = new CountDownLatch(1); cacheConfigBuilder .customInterceptors() .addInterceptor() .before(InvocationContextInterceptor.class) .interceptor( new CommandInterceptor() { @Override protected Object handleDefault(InvocationContext ctx, VisitableCommand cmd) throws Throwable { // if this 'put' command is caused by state transfer we block until GET begins if (cmd instanceof PutKeyValueCommand && ((PutKeyValueCommand) cmd).hasFlag(Flag.PUT_FOR_STATE_TRANSFER)) { // signal we encounter a state transfer PUT applyStateStartedLatch.countDown(); // wait until it is ok to apply state if (!applyStateProceedLatch.await(15, TimeUnit.SECONDS)) { throw new TimeoutException(); } } return super.handleDefault(ctx, cmd); } }); // add an interceptor on node B that will block GET commands until we are ready final CountDownLatch getKeyStartedLatch = new CountDownLatch(1); final CountDownLatch getKeyProceedLatch = new CountDownLatch(1); cacheConfigBuilder .customInterceptors() .addInterceptor() .before(CallInterceptor.class) .interceptor( new CommandInterceptor() { @Override protected Object handleDefault(InvocationContext ctx, VisitableCommand cmd) throws Throwable { if (cmd instanceof GetKeyValueCommand) { // signal we encounter a GET getKeyStartedLatch.countDown(); // wait until it is ok to continue with GET if (!getKeyProceedLatch.await(15, TimeUnit.SECONDS)) { throw new TimeoutException(); } } return super.handleDefault(ctx, cmd); } }); log.info("Adding a new node .."); addClusterEnabledCacheManager(cacheConfigBuilder); log.info("Added a new node"); // state transfer is blocked, no keys should be present on node B yet assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty()); // wait for state transfer on node B to progress to the point where data segments are about to // be applied if (!applyStateStartedLatch.await(15, TimeUnit.SECONDS)) { throw new TimeoutException(); } // state transfer is blocked, no keys should be present on node B yet assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty()); // initiate a GET Future<Object> getFuture = fork( new Callable<Object>() { @Override public Object call() { return cache(1).get("myKey"); } }); // wait for GET command on node B to reach beyond *DistributionInterceptor, where it will block. // the value seen so far is null if (!getKeyStartedLatch.await(15, TimeUnit.SECONDS)) { throw new TimeoutException(); } // allow state transfer to apply state applyStateProceedLatch.countDown(); // wait for state transfer to end TestingUtil.waitForRehashToComplete(cache(0), cache(1)); assertEquals(1, cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().size()); // allow GET to continue getKeyProceedLatch.countDown(); Object value = getFuture.get(15, TimeUnit.SECONDS); assertEquals("myValue", value); }
public void onTestStart(ITestResult res) { log.info("Starting test " + getTestDesc(res)); addOomLoggingSupport(); threadTestClass.set(res.getTestClass()); }
@Test @FailureExpectedWithNewMetamodel( message = "ForeignKeyHelper.java:140 fails with org.hibernate.cfg.NotYetImplementedException: " + "No support yet for referenced join columns unless they correspond with columns bound for an attribute binding.") public void testPersistAndLoadUnderJta() throws Exception { Item item; SessionFactory sessionFactory = buildSessionFactory(); try { UserTransaction ut = (UserTransaction) ctx.lookup("UserTransaction"); ut.begin(); try { Session session = sessionFactory.openSession(); session.getTransaction().begin(); item = new Item("anItem", "An item owned by someone"); session.persist(item); session.getTransaction().commit(); session.close(); } catch (Exception e) { ut.setRollbackOnly(); throw e; } finally { if (ut.getStatus() == Status.STATUS_ACTIVE) ut.commit(); else ut.rollback(); } ut = (UserTransaction) ctx.lookup("UserTransaction"); ut.begin(); try { Session session = sessionFactory.openSession(); session.getTransaction().begin(); Item found = (Item) session.load(Item.class, item.getId()); Statistics stats = session.getSessionFactory().getStatistics(); log.info(stats.toString()); assertEquals(item.getDescription(), found.getDescription()); assertEquals(0, stats.getSecondLevelCacheMissCount()); assertEquals(1, stats.getSecondLevelCacheHitCount()); session.delete(found); session.getTransaction().commit(); session.close(); } catch (Exception e) { ut.setRollbackOnly(); throw e; } finally { if (ut.getStatus() == Status.STATUS_ACTIVE) ut.commit(); else ut.rollback(); } ut = (UserTransaction) ctx.lookup("UserTransaction"); ut.begin(); try { Session session = sessionFactory.openSession(); session.getTransaction().begin(); assertNull(session.get(Item.class, item.getId())); session.getTransaction().commit(); session.close(); } catch (Exception e) { ut.setRollbackOnly(); throw e; } finally { if (ut.getStatus() == Status.STATUS_ACTIVE) ut.commit(); else ut.rollback(); } } finally { if (sessionFactory != null) sessionFactory.close(); } }
public synchronized void onTestSuccess(ITestResult arg0) { System.out.println(getThreadId() + " Test " + getTestDesc(arg0) + " succeeded."); log.info("Test succeeded " + getTestDesc(arg0) + "."); succeeded.incrementAndGet(); printStatus(); }