/** * Tests that we are in {@link GemFireHealth#OKAY_HEALTH okay} health if the hit ratio dips below * the threshold. */ public void testCheckHitRatio() throws CacheException { Cache cache = CacheFactory.create(this.system); // CachePerfStats stats = ((GemFireCache) cache).getCachePerfStats(); AttributesFactory factory = new AttributesFactory(); factory.setScope(Scope.LOCAL); factory.setCacheLoader( new CacheLoader() { public Object load(LoaderHelper helper) throws CacheLoaderException { return "Loaded"; } public void close() {} }); RegionAttributes attrs = factory.create(); Region region = cache.createRegion(this.getName(), attrs); GemFireHealthConfig config = new GemFireHealthConfigImpl(null); config.setMinHitRatio(0.5); CacheHealthEvaluator eval = new CacheHealthEvaluator(config, this.system.getDistributionManager()); List status = new ArrayList(); eval.evaluate(status); assertEquals(0, status.size()); region.get("One"); region.get("One"); region.get("One"); status = new ArrayList(); eval.evaluate(status); assertEquals(0, status.size()); for (int i = 0; i < 50; i++) { region.get("Miss " + i); } status = new ArrayList(); eval.evaluate(status); AbstractHealthEvaluator.HealthStatus ill = (AbstractHealthEvaluator.HealthStatus) status.get(0); assertEquals(GemFireHealth.OKAY_HEALTH, ill.getHealthCode()); String s = "The hit ratio of this Cache"; assertTrue(ill.getDiagnosis().indexOf(s) != -1); }
/** * Do a get on a key in region REGION_NAME. Keys to get are specified in keyIntervals. * * @return true if all keys to have get performaed have been completed. */ protected boolean get() { SharedCounters sc = CQUtilBB.getBB().getSharedCounters(); long nextKey = sc.incrementAndRead(CQUtilBB.LASTKEY_GET); if (!keyIntervals.keyInRange(KeyIntervals.GET, nextKey)) { Log.getLogWriter().info("All gets completed; returning from get"); return true; } Object key = NameFactory.getObjectNameForCounter(nextKey); Log.getLogWriter().info("Getting " + key); try { Object existingValue = aRegion.get(key); Log.getLogWriter() .info( "Done getting " + key + ", num remaining: " + (keyIntervals.getLastKey(KeyIntervals.GET) - nextKey)); if (existingValue == null) throw new TestException("Get of key " + key + " returned unexpected " + existingValue); } catch (TimeoutException e) { throw new TestException(TestHelper.getStackTrace(e)); } catch (CacheLoaderException e) { throw new TestException(TestHelper.getStackTrace(e)); } return (nextKey >= keyIntervals.getLastKey(KeyIntervals.GET)); }
/** * Update an existing key in region REGION_NAME. The keys to update are specified in keyIntervals. * * @return true if all keys to be updated have been completed. */ protected boolean updateExistingKey() { long nextKey = CQUtilBB.getBB().getSharedCounters().incrementAndRead(CQUtilBB.LASTKEY_UPDATE_EXISTING_KEY); if (!keyIntervals.keyInRange(KeyIntervals.UPDATE_EXISTING_KEY, nextKey)) { Log.getLogWriter().info("All existing keys updated; returning from updateExistingKey"); return true; } Object key = NameFactory.getObjectNameForCounter(nextKey); QueryObject existingValue = (QueryObject) aRegion.get(key); if (existingValue == null) throw new TestException("Get of key " + key + " returned unexpected " + existingValue); QueryObject newValue = existingValue.modifyWithNewInstance(QueryObject.NEGATE, 0, true); newValue.extra = key; // encode the key in the object for later validation if (existingValue.aPrimitiveLong < 0) throw new TestException( "Trying to update a key which was already updated: " + existingValue.toStringFull()); Log.getLogWriter() .info("Updating existing key " + key + " with value " + TestHelper.toString(newValue)); aRegion.put(key, newValue); Log.getLogWriter() .info( "Done updating existing key " + key + " with value " + TestHelper.toString(newValue) + ", num remaining: " + (keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY) - nextKey)); return (nextKey >= keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY)); }
/** * Tests that a local writer receives a modified version of the callback argument on an update. */ public void testLocalUpdateModifiedCallbackArgument() throws CacheException { final String name = this.getUniqueName(); final Object key = "KEY"; final Object value = "VALUE"; final Object one = "ONE"; final Object two = "TWO"; TestCacheLoader loader = new TestCacheLoader() { public Object load2(LoaderHelper helper) throws CacheLoaderException { Object[] array = (Object[]) helper.getArgument(); assertEquals(one, array[0]); array[0] = two; return value; } }; TestCacheWriter writer = new TestCacheWriter() { public void beforeCreate2(EntryEvent event) throws CacheWriterException {} public void beforeUpdate2(EntryEvent event) throws CacheWriterException { Object[] array = (Object[]) event.getCallbackArgument(); assertEquals(two, array[0]); } }; AttributesFactory factory = new AttributesFactory(getRegionAttributes()); factory.setCacheLoader(loader); factory.setCacheWriter(writer); Region region = createRegion(name, factory.create()); region.create(key, null); assertFalse(loader.wasInvoked()); assertTrue(writer.wasInvoked()); Object[] array = new Object[] {one}; assertEquals(value, region.get(key, array)); assertTrue(loader.wasInvoked()); assertTrue(writer.wasInvoked()); }
private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { byte[] row = Bytes.toBytes("" + i); Get get = new Get(row); for (byte[] family : families) { get.addColumn(family, qf); } Result result = newReg.get(get); Cell[] raw = result.rawCells(); assertEquals(families.length, result.size()); for (int j = 0; j < families.length; j++) { assertTrue(CellUtil.matchingRow(raw[j], row)); assertTrue(CellUtil.matchingFamily(raw[j], families[j])); assertTrue(CellUtil.matchingQualifier(raw[j], qf)); } } }
/** * Tests that if a <code>CacheLoader</code> for a local region invokes {@link * LoaderHelper#netSearch}, a {@link CacheLoaderException} is thrown. */ public void testLocalLoaderNetSearch() throws CacheException { assertEquals(Scope.LOCAL, getRegionAttributes().getScope()); final String name = this.getUniqueName(); final Object key = this.getUniqueName(); TestCacheLoader loader = new TestCacheLoader() { public Object load2(LoaderHelper helper) throws CacheLoaderException { try { helper.netSearch(true); } catch (TimeoutException ex) { fail("Why did I timeout?", ex); } return null; } }; AttributesFactory factory = new AttributesFactory(getRegionAttributes()); factory.setCacheLoader(loader); Region region = createRegion(name, factory.create()); assertEquals(Scope.LOCAL, region.getAttributes().getScope()); try { region.get(key); fail("Should have thrown a CacheLoaderException"); } catch (CacheLoaderException ex) { String expected = com.gemstone.gemfire.internal.cache.LoaderHelperImpl.NET_SEARCH_LOCAL.toLocalizedString(); String message = ex.getMessage(); assertTrue("Unexpected message \"" + message + "\"", message.indexOf(expected) != -1); } }
/** * Verify the contents of the region, taking into account the keys that were destroyed, * invalidated, etc (as specified in keyIntervals) Throw an error of any problems are detected. * This must be called repeatedly by the same thread until StopSchedulingTaskOnClientOrder is * thrown. */ public void verifyRegionContents() { final long LOG_INTERVAL_MILLIS = 10000; // we already completed this check once; we can't do it again without reinitializing the // verify state variables if (verifyRegionContentsCompleted) { throw new TestException( "Test configuration problem; already verified region contents, " + "cannot call this task again without resetting batch variables"); } // iterate keys long lastLogTime = System.currentTimeMillis(); long minTaskGranularitySec = TestConfig.tab().longAt(TestHelperPrms.minTaskGranularitySec); long minTaskGranularityMS = minTaskGranularitySec * TestHelper.SEC_MILLI_FACTOR; long startTime = System.currentTimeMillis(); long size = aRegion.size(); boolean first = true; int numKeysToCheck = keyIntervals.getNumKeys() + numNewKeys; while (verifyRegionContentsIndex < numKeysToCheck) { verifyRegionContentsIndex++; if (first) { Log.getLogWriter() .info( "In verifyRegionContents, region has " + size + " keys; starting verify at verifyRegionContentsIndex " + verifyRegionContentsIndex + "; verifying key names with indexes through (and including) " + numKeysToCheck); first = false; } // check region size the first time through the loop to avoid it being called // multiple times when this is batched if (verifyRegionContentsIndex == 1) { if (totalNumKeys != size) { String tmpStr = "Expected region size to be " + totalNumKeys + ", but it is size " + size; Log.getLogWriter().info(tmpStr); verifyRegionContentsErrStr.append(tmpStr + "\n"); } } Object key = NameFactory.getObjectNameForCounter(verifyRegionContentsIndex); try { if (((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.NONE)) && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.NONE))) || ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.GET)) && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.GET)))) { // this key was untouched after its creation checkContainsKey(key, true, "key was untouched"); checkContainsValueForKey(key, true, "key was untouched"); Object value = aRegion.get(key); checkValue(key, value); } else if ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.INVALIDATE)) && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.INVALIDATE))) { checkContainsKey(key, true, "key was invalidated"); checkContainsValueForKey(key, false, "key was invalidated"); } else if ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.LOCAL_INVALIDATE)) && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.LOCAL_INVALIDATE))) { // this key was locally invalidated checkContainsKey(key, true, "key was locally invalidated"); checkContainsValueForKey(key, true, "key was locally invalidated"); Object value = aRegion.get(key); checkValue(key, value); } else if ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.DESTROY)) && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.DESTROY))) { // this key was destroyed checkContainsKey(key, false, "key was destroyed"); checkContainsValueForKey(key, false, "key was destroyed"); } else if ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.LOCAL_DESTROY)) && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.LOCAL_DESTROY))) { // this key was locally destroyed checkContainsKey(key, true, "key was locally destroyed"); checkContainsValueForKey(key, true, "key was locally destroyed"); Object value = aRegion.get(key); checkValue(key, value); } else if ((verifyRegionContentsIndex >= keyIntervals.getFirstKey(KeyIntervals.UPDATE_EXISTING_KEY)) && (verifyRegionContentsIndex <= keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY))) { // this key was updated checkContainsKey(key, true, "key was updated"); checkContainsValueForKey(key, true, "key was updated"); Object value = aRegion.get(key); checkUpdatedValue(key, value); } else if (verifyRegionContentsIndex > keyIntervals.getNumKeys()) { // key was newly added checkContainsKey(key, true, "key was new"); checkContainsValueForKey(key, true, "key was new"); Object value = aRegion.get(key); checkValue(key, value); } } catch (TestException e) { Log.getLogWriter().info(TestHelper.getStackTrace(e)); verifyRegionContentsErrStr.append(e.getMessage() + "\n"); } if (System.currentTimeMillis() - lastLogTime > LOG_INTERVAL_MILLIS) { Log.getLogWriter() .info("Verified key " + verifyRegionContentsIndex + " out of " + totalNumKeys); lastLogTime = System.currentTimeMillis(); } if (System.currentTimeMillis() - startTime >= minTaskGranularityMS) { Log.getLogWriter() .info( "In HydraTask_verifyRegionContents, returning before completing verify " + "because of task granularity (this task must be batched to complete); last key verified is " + key); return; // task is batched; we are done with this batch } } verifyRegionContentsCompleted = true; if (verifyRegionContentsErrStr.length() > 0) { throw new TestException(verifyRegionContentsErrStr.toString()); } String aStr = "In HydraTask_verifyRegionContents, verified " + verifyRegionContentsIndex + " keys/values"; Log.getLogWriter().info(aStr); throw new StopSchedulingTaskOnClientOrder(aStr); }
/** * check distributed ops that originate in a PROXY are correctly distributed to non-proxy regions. */ private void distributedOps(DataPolicy dp, InterestPolicy ip) throws CacheException { initOtherId(); AttributesFactory af = new AttributesFactory(); af.setDataPolicy(dp); af.setSubscriptionAttributes(new SubscriptionAttributes(ip)); af.setScope(Scope.DISTRIBUTED_ACK); Region r = createRootRegion("ProxyDUnitTest", af.create()); doCreateOtherVm(); r.put("putkey", "putvalue1"); getOtherVm() .invoke( new CacheSerializableRunnable("check put") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); assertEquals(true, r.containsKey("putkey")); assertEquals("putvalue1", r.getEntry("putkey").getValue()); r.put("putkey", "putvalue2"); } }); assertEquals(false, r.containsKey("putkey")); assertEquals("putvalue2", r.get("putkey")); // netsearch r.invalidate("putkey"); getOtherVm() .invoke( new CacheSerializableRunnable("check invalidate") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); assertEquals(true, r.containsKey("putkey")); assertEquals(null, r.getEntry("putkey").getValue()); } }); assertEquals(null, r.get("putkey")); // invalid so total miss r.destroy("putkey"); getOtherVm() .invoke( new CacheSerializableRunnable("check destroy") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); assertEquals(false, r.containsKey("putkey")); } }); assertEquals(null, r.get("putkey")); // total miss r.create("createKey", "createValue1"); getOtherVm() .invoke( new CacheSerializableRunnable("check create") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); assertEquals(true, r.containsKey("createKey")); assertEquals("createValue1", r.getEntry("createKey").getValue()); } }); { Map m = new HashMap(); m.put("putAllKey1", "putAllValue1"); m.put("putAllKey2", "putAllValue2"); r.putAll(m, "putAllCallback"); } getOtherVm() .invoke( new CacheSerializableRunnable("check putAll") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); assertEquals(true, r.containsKey("putAllKey1")); assertEquals("putAllValue1", r.getEntry("putAllKey1").getValue()); assertEquals(true, r.containsKey("putAllKey2")); assertEquals("putAllValue2", r.getEntry("putAllKey2").getValue()); } }); r.clear(); getOtherVm() .invoke( new CacheSerializableRunnable("check clear") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); assertEquals(0, r.size()); } }); getOtherVm() .invoke( new CacheSerializableRunnable("install CacheWriter") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); AttributesMutator am = r.getAttributesMutator(); CacheWriter cw = new CacheWriterAdapter() { public void beforeCreate(EntryEvent event) throws CacheWriterException { throw new CacheWriterException("expected"); } }; am.setCacheWriter(cw); } }); try { r.put("putkey", "putvalue"); fail("expected CacheWriterException"); } catch (CacheWriterException expected) { } getOtherVm() .invoke( new CacheSerializableRunnable("check clear") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); assertEquals(0, r.size()); } }); assertEquals(null, r.get("loadkey")); // total miss getOtherVm() .invoke( new CacheSerializableRunnable("install CacheLoader") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); AttributesMutator am = r.getAttributesMutator(); am.setCacheWriter(null); // clear csche writer CacheLoader cl = new CacheLoader() { public Object load(LoaderHelper helper) throws CacheLoaderException { if (helper.getKey().equals("loadkey")) { return "loadvalue"; } else if (helper.getKey().equals("loadexception")) { throw new CacheLoaderException("expected"); } else { return null; } } public void close() {} }; am.setCacheLoader(cl); } }); assertEquals("loadvalue", r.get("loadkey")); // net load assertEquals(null, r.get("foobar")); // total miss try { r.get("loadexception"); fail("expected CacheLoaderException"); } catch (CacheLoaderException expected) { } r.destroyRegion(); getOtherVm() .invoke( new CacheSerializableRunnable("check clear") { public void run2() throws CacheException { Region r = getRootRegion("ProxyDUnitTest"); assertEquals(null, r); } }); }
/** * Tests that we are in {@link GemFireHealth#OKAY_HEALTH okay} health if cache loads take too * long. * * @see CacheHealthEvaluator#checkLoadTime */ public void testCheckLoadTime() throws CacheException { Cache cache = CacheFactory.create(this.system); CachePerfStats stats = ((GemFireCacheImpl) cache).getCachePerfStats(); AttributesFactory factory = new AttributesFactory(); factory.setScope(Scope.LOCAL); factory.setCacheLoader( new CacheLoader() { public Object load(LoaderHelper helper) throws CacheLoaderException { return "Loaded"; } public void close() {} }); RegionAttributes attrs = factory.create(); Region region = cache.createRegion(this.getName(), attrs); GemFireHealthConfig config = new GemFireHealthConfigImpl(null); config.setMaxLoadTime(100); CacheHealthEvaluator eval = new CacheHealthEvaluator(config, this.system.getDistributionManager()); for (int i = 0; i < 10; i++) { region.get("Test1 " + i); } long firstLoadTime = stats.getLoadTime(); long firstLoadsCompleted = stats.getLoadsCompleted(); assertTrue(firstLoadTime >= 0); assertTrue(firstLoadsCompleted > 0); // First time should always be empty List status = new ArrayList(); eval.evaluate(status); assertEquals(0, status.size()); config = new GemFireHealthConfigImpl(null); config.setMaxLoadTime(10); eval = new CacheHealthEvaluator(config, this.system.getDistributionManager()); eval.evaluate(status); long start = System.currentTimeMillis(); for (int i = 0; i < 100; i++) { region.get("Test2 " + i); } assertTrue(System.currentTimeMillis() - start < 1000); long secondLoadTime = stats.getLoadTime(); long secondLoadsCompleted = stats.getLoadsCompleted(); assertTrue( "firstLoadTime=" + firstLoadTime + ", secondLoadTime=" + secondLoadTime, secondLoadTime >= firstLoadTime); assertTrue(secondLoadsCompleted > firstLoadsCompleted); // Averge should be less than 10 milliseconds status = new ArrayList(); eval.evaluate(status); assertEquals(0, status.size()); region .getAttributesMutator() .setCacheLoader( new CacheLoader() { public Object load(LoaderHelper helper) throws CacheLoaderException { try { Thread.sleep(20); } catch (InterruptedException ex) { fail("Why was I interrupted?"); } return "Loaded"; } public void close() {} }); for (int i = 0; i < 50; i++) { region.get("Test3 " + i); } long thirdLoadTime = stats.getLoadTime(); long thirdLoadsCompleted = stats.getLoadsCompleted(); assertTrue(thirdLoadTime > secondLoadTime); assertTrue(thirdLoadsCompleted > secondLoadsCompleted); status = new ArrayList(); eval.evaluate(status); assertEquals(1, status.size()); AbstractHealthEvaluator.HealthStatus ill = (AbstractHealthEvaluator.HealthStatus) status.get(0); assertEquals(GemFireHealth.OKAY_HEALTH, ill.getHealthCode()); String s = "The average duration of a Cache load"; assertTrue(ill.getDiagnosis().indexOf(s) != -1); }