/** * Destroy a key in region REGION_NAME. The keys to destroy are specified in keyIntervals. * * @return true if all keys to be destroyed have been completed. */ protected boolean destroy() { SharedCounters sc = CQUtilBB.getBB().getSharedCounters(); long nextKey = sc.incrementAndRead(CQUtilBB.LASTKEY_DESTROY); if (!keyIntervals.keyInRange(KeyIntervals.DESTROY, nextKey)) { Log.getLogWriter().info("All destroys completed; returning from destroy"); return true; } Object key = NameFactory.getObjectNameForCounter(nextKey); Log.getLogWriter().info("Destroying " + key); checkContainsValueForKey(key, true, "before destroy"); try { aRegion.destroy(key); Log.getLogWriter() .info( "Done Destroying " + key + ", num remaining: " + (keyIntervals.getLastKey(KeyIntervals.DESTROY) - nextKey)); } catch (CacheWriterException e) { throw new TestException(TestHelper.getStackTrace(e)); } catch (TimeoutException e) { throw new TestException(TestHelper.getStackTrace(e)); } catch (EntryNotFoundException e) { throw new TestException(TestHelper.getStackTrace(e)); } return (nextKey >= keyIntervals.getLastKey(KeyIntervals.DESTROY)); }
/** * Invalidate a key in region REGION_NAME. The keys to invalidate are specified in keyIntervals. * * @return true if all keys to be invalidated have been completed. */ protected boolean invalidate() { SharedCounters sc = CQUtilBB.getBB().getSharedCounters(); long nextKey = sc.incrementAndRead(CQUtilBB.LASTKEY_INVALIDATE); if (!keyIntervals.keyInRange(KeyIntervals.INVALIDATE, nextKey)) { Log.getLogWriter().info("All existing keys invalidated; returning from invalidate"); return true; } Object key = NameFactory.getObjectNameForCounter(nextKey); Log.getLogWriter().info("Invalidating " + key); checkContainsValueForKey(key, true, "before invalidate"); try { aRegion.invalidate(key); Log.getLogWriter() .info( "Done invalidating " + key + ", num remaining: " + (keyIntervals.getLastKey(KeyIntervals.INVALIDATE) - nextKey)); } catch (TimeoutException e) { throw new TestException(TestHelper.getStackTrace(e)); } catch (EntryNotFoundException e) { throw new TestException(TestHelper.getStackTrace(e)); } return (nextKey >= keyIntervals.getLastKey(KeyIntervals.INVALIDATE)); }
/** * Task to do random operations that expect exceptions from shutDownAllMembers * * @param testInstance The RecoveryTest instance. */ public static void doOperationsHA(RecoveryTest testInstance) throws Exception { long minTaskGranularitySec = TestConfig.tab().longAt(TestHelperPrms.minTaskGranularitySec); long minTaskGranularityMS = minTaskGranularitySec * TestHelper.SEC_MILLI_FACTOR; try { testInstance.doOperations(minTaskGranularityMS); } catch (CacheClosedException e) { boolean thisVMReceivedNiceKill = StopStartVMs.niceKillInProgress(); if (thisVMReceivedNiceKill) { Log.getLogWriter().info("Caught expected exception " + e + "; continuing test"); } else { throw new TestException(TestHelper.getStackTrace(e)); } } catch (DistributedSystemDisconnectedException e) { boolean thisVMReceivedNiceKill = StopStartVMs.niceKillInProgress(); if (thisVMReceivedNiceKill) { Log.getLogWriter().info("Caught expected exception " + e + "; continuing test"); } else { throw new TestException(TestHelper.getStackTrace(e)); } } catch (RegionDestroyedException e) { boolean thisVMReceivedNiceKill = StopStartVMs.niceKillInProgress(); if (thisVMReceivedNiceKill) { Log.getLogWriter().info("Caught expected exception " + e + "; continuing test"); } else { throw new TestException(TestHelper.getStackTrace(e)); } } }
/** * Do a get on a key in region REGION_NAME. Keys to get are specified in keyIntervals. * * @return true if all keys to have get performaed have been completed. */ protected boolean get() { SharedCounters sc = CQUtilBB.getBB().getSharedCounters(); long nextKey = sc.incrementAndRead(CQUtilBB.LASTKEY_GET); if (!keyIntervals.keyInRange(KeyIntervals.GET, nextKey)) { Log.getLogWriter().info("All gets completed; returning from get"); return true; } Object key = NameFactory.getObjectNameForCounter(nextKey); Log.getLogWriter().info("Getting " + key); try { Object existingValue = aRegion.get(key); Log.getLogWriter() .info( "Done getting " + key + ", num remaining: " + (keyIntervals.getLastKey(KeyIntervals.GET) - nextKey)); if (existingValue == null) throw new TestException("Get of key " + key + " returned unexpected " + existingValue); } catch (TimeoutException e) { throw new TestException(TestHelper.getStackTrace(e)); } catch (CacheLoaderException e) { throw new TestException(TestHelper.getStackTrace(e)); } return (nextKey >= keyIntervals.getLastKey(KeyIntervals.GET)); }
/** * Update an existing key in region REGION_NAME. The keys to update are specified in keyIntervals. * * @return true if all keys to be updated have been completed. */ protected boolean updateExistingKey() { long nextKey = CQUtilBB.getBB().getSharedCounters().incrementAndRead(CQUtilBB.LASTKEY_UPDATE_EXISTING_KEY); if (!keyIntervals.keyInRange(KeyIntervals.UPDATE_EXISTING_KEY, nextKey)) { Log.getLogWriter().info("All existing keys updated; returning from updateExistingKey"); return true; } Object key = NameFactory.getObjectNameForCounter(nextKey); QueryObject existingValue = (QueryObject) aRegion.get(key); if (existingValue == null) throw new TestException("Get of key " + key + " returned unexpected " + existingValue); QueryObject newValue = existingValue.modifyWithNewInstance(QueryObject.NEGATE, 0, true); newValue.extra = key; // encode the key in the object for later validation if (existingValue.aPrimitiveLong < 0) throw new TestException( "Trying to update a key which was already updated: " + existingValue.toStringFull()); Log.getLogWriter() .info("Updating existing key " + key + " with value " + TestHelper.toString(newValue)); aRegion.put(key, newValue); Log.getLogWriter() .info( "Done updating existing key " + key + " with value " + TestHelper.toString(newValue) + ", num remaining: " + (keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY) - nextKey)); return (nextKey >= keyIntervals.getLastKey(KeyIntervals.UPDATE_EXISTING_KEY)); }
/** Start all cqs running for this VM, and create a CQHistory instance for each CQ. */ private void startCQsWithHistory() { initializeQueryService(); CqAttributesFactory cqFac = new CqAttributesFactory(); cqFac.addCqListener(new CQHistoryListener()); cqFac.addCqListener(new CQGatherListener()); CqAttributes cqAttr = cqFac.create(); Iterator it = queryMap.keySet().iterator(); while (it.hasNext()) { String queryName = (String) (it.next()); String query = (String) (queryMap.get(queryName)); try { CqQuery cq = qService.newCq(queryName, query, cqAttr); CQHistory history = new CQHistory(cq.getName()); CQHistoryListener.recordHistory(history); Log.getLogWriter() .info( "Creating CQ with name " + queryName + ": " + query + ", cq attributes: " + cqAttr); Log.getLogWriter().info("Calling executeWithInitialResults on " + cq); CqResults rs = cq.executeWithInitialResults(); SelectResults sr = CQUtil.getSelectResults(rs); if (sr == null) { throw new TestException( "For cq " + cq + " with name " + cq.getName() + " executeWithInitialResults returned " + sr); } Log.getLogWriter() .info( "Done calling executeWithInitializResults on " + cq + " with name " + queryName + ", select results size is " + sr.size()); history.setSelectResults(sr); logNumOps(); // log the select results List srList = sr.asList(); StringBuffer aStr = new StringBuffer(); aStr.append("SelectResults returned from " + queryName + " is\n"); for (int i = 0; i < srList.size(); i++) { aStr.append(srList.get(i) + "\n"); } Log.getLogWriter().info(aStr.toString()); } catch (CqExistsException e) { throw new TestException(TestHelper.getStackTrace(e)); } catch (RegionNotFoundException e) { throw new TestException(TestHelper.getStackTrace(e)); } catch (CqException e) { throw new TestException(TestHelper.getStackTrace(e)); } } }
public void doDDLOp(Connection dConn, Connection gConn) { boolean success = false; int maxNumOfTries = 1; ArrayList<SQLException> exList = new ArrayList<SQLException>(); String tableName = tableNames[SQLTest.random.nextInt(tableNames.length)]; String routineName = (hasRoutine) ? getRoutineNames(dConn, gConn) : null; StringBuffer aStr = new StringBuffer(); // grant or revoke String act = action[SQLTest.random.nextInt(action.length)]; aStr.append(act); // rest of the authorization stmt if (routineName == null) aStr.append(getAuthForTables(tableName, act)); else aStr.append( SQLTest.random.nextBoolean() ? getAuthForTables(tableName, act) : getAuthForRoutines(routineName, act)); if (dConn != null) { try { success = applySecurityToDerby(dConn, aStr.toString(), exList); // insert to derby table int count = 0; while (!success) { if (count >= maxNumOfTries) { Log.getLogWriter() .info("Could not get the lock to finish grant/revoke stmt, abort this operation"); return; } exList.clear(); success = applySecurityToDerby( dConn, aStr.toString(), exList); // retry insert to derby table count++; } applySecurityToGFE(gConn, aStr.toString(), exList); // insert to gfe table SQLHelper.handleMissedSQLException(exList); gConn.commit(); dConn.commit(); } catch (SQLException se) { SQLHelper.printSQLException(se); throw new TestException("execute security statement fails " + TestHelper.getStackTrace(se)); } // for verification } else { try { applySecurityToGFE(gConn, aStr.toString()); // insert to gfe table gConn.commit(); } catch (SQLException se) { SQLHelper.printSQLException(se); throw new TestException("execute security statement fails " + TestHelper.getStackTrace(se)); } } // no verification }
public static void handleOpsException(Exception e) { if ((e instanceof CacheClosedException) || (e instanceof DistributedSystemDisconnectedException)) { boolean thisVMReceivedNiceKill = StopStartVMs.niceKillInProgress(); if (thisVMReceivedNiceKill) { Log.getLogWriter().info("Caught expected exception " + e + "; continuing test"); } else { throw new TestException(TestHelper.getStackTrace(e)); } } else { throw new TestException(TestHelper.getStackTrace(e)); } }
/** Create an xml file for the current cache and all its regions. */ public static synchronized void createXmlFile() { String bbKey = null; String clientName = RemoteTestModule.getMyClientName(); String fileName = RecoveryTest.getXmlFileName(RemoteTestModule.getMyVmid()); File aFile = new File(fileName); if (aFile.exists()) { return; } if (clientName.indexOf("oldVersion") >= 0) { bbKey = RecoveryTest.oldVersionXmlKey; } else if (clientName.indexOf("newVersion") >= 0) { bbKey = RecoveryTest.newVersionXmlKey; } DeclarativeGenerator.createDeclarativeXml(fileName, RecoveryTest.theCache, false, true); if (bbKey != null) { File xmlFile = new File(fileName); String xmlFilePath; try { xmlFilePath = xmlFile.getCanonicalPath(); } catch (IOException e) { throw new TestException(TestHelper.getStackTrace(e)); } RecoveryBB.getBB().getSharedMap().put(bbKey, xmlFilePath); } }
/** * Forces recovery by revoking the members the system is waiting for. It is the responsibility of * the caller to know that the correct members are being waited for since this will revoke all * waiting members. * * @param doValidation If true, then validate the missing disk stores (used for serial tests or * tests that are in a silent phase), if false then don't do validation (used for concurrent * tests). */ public static void forceRecovery(boolean doValidation) { AdminDistributedSystem adminDS = AdminHelper.getAdminDistributedSystem(); Set<PersistentID> waitingForMembers; try { waitingForMembers = adminDS.getMissingPersistentMembers(); if (doValidation) { verifyMissingDiskStoresCommandLineTool(waitingForMembers); } for (PersistentID persistId : waitingForMembers) { boolean revokeWithCommandLineTool = TestConfig.tab().getRandGen().nextBoolean(); // workaround for bug 42432; when this bug is fixed remove the following if if (HostHelper.isWindows()) { revokeWithCommandLineTool = false; // force usage of API } if (CliHelperPrms.getUseCli()) { revokeWithCommandLineTool = true; } if (revokeWithCommandLineTool) { // revoke with command line tool PersistenceUtil.runRevokeMissingDiskStore(persistId); } else { Log.getLogWriter().info("Revoking PersistentID " + persistId); adminDS.revokePersistentMember(persistId.getUUID()); } } } catch (AdminException e) { throw new TestException(TestHelper.getStackTrace(e)); } }
/** Check for the the proper number of afterRemoteRegionCrashEvents */ public static void checkAfterRemoteRegionCrashEvents() { long numCrashEvents = ListenerBB.getBB().getSharedCounters().read(ListenerBB.numAfterRemoteRegionCrashEvents); long numVMsInDS = SplitBrainBB.getBB().getSharedCounters().read(SplitBrainBB.NumVMsInDS); long numVMsStopped = SplitBrainBB.getBB().getSharedCounters().read(SplitBrainBB.NumVMsStopped); // the number of expected crash events is: each vm that did not receive a forced disconnect gets // an event for the vm(s) that did receive a forced disconnect, plus the vm(s) that received // that // forced disconnect get an event for each of the surviving vms that have that region defined. // lynn - waiting for specification as to how many crash events to expect // long numExpectedCrashEvents = 2 * (numVMsInDS - numVMsStopped); long numExpectedCrashEvents = numVMsInDS - numVMsStopped; Log.getLogWriter() .info( "numVMsInDS: " + numVMsInDS + ", numVMsStopped: " + numVMsStopped + ", numExpectedCrashEvents: " + numExpectedCrashEvents); TestHelper.waitForCounter( ListenerBB.getBB(), "ListenerBB.numAfterRemoteRegionCrashEvents", ListenerBB.numAfterRemoteRegionCrashEvents, numExpectedCrashEvents, true, -1, 2000); }
public static void deleteTxhistory(String tableName, int pk1, String type) throws SQLException { Connection conn = getDefaultConnection(); int pk2 = type.equalsIgnoreCase("sell") ? SELL : BUY; String deletgfxdTxhistory = "update trade.monitor set deleteCount = deleteCount + 1 " + "where tname = ? and pk1 = ? and pk2 = ?"; PreparedStatement ps = conn.prepareStatement(deletgfxdTxhistory); ps.setString(1, tableName); ps.setInt(2, pk1); ps.setInt(3, pk2); Log.getLogWriter() .info(deletgfxdTxhistory + " for " + tableName + " and pk1 " + pk1 + " and pk2 " + pk2); try { ps.execute(); } catch (SQLException se) { if (se.getSQLState().equals("X0Z02")) { throw new TestException( "Got unexpected conflict exception in trigger" + TestHelper.getStackTrace(se)); } else throw se; } closeConnection(conn); }
public static void updatePortfolio(String tableName, int pk1, int pk2) throws SQLException { Connection conn = getDefaultConnection(); String updatgfxdPortfolio = "update trade.monitor set updateCount = updateCount + 1 " + "where tname = ? and pk1 = ? and pk2 = ?"; PreparedStatement ps = conn.prepareStatement(updatgfxdPortfolio); ps.setString(1, tableName); ps.setInt(2, pk1); ps.setInt(3, pk2); Log.getLogWriter() .info(updatgfxdPortfolio + " for " + tableName + " and pk1 " + pk1 + " and pk2 " + pk2); try { ps.execute(); } catch (SQLException se) { if (se.getSQLState().equals("X0Z02")) { throw new TestException( "Got unexpected conflict exception in trigger" + TestHelper.getStackTrace(se)); } else throw se; } closeConnection(conn); }
/** * Do operations on the REGION_NAME's keys using keyIntervals to specify which keys get which * operations. This will return when all operations in all intervals have completed. * * @param availableOps - Bits which are true correspond to the operations that should be executed. */ public void doOps(BitSet availableOps) { boolean useTransactions = getInitialImage.InitImagePrms.useTransactions(); while (availableOps.cardinality() != 0) { int whichOp = getOp(availableOps, operations.length); boolean doneWithOps = false; if (useTransactions) { TxHelper.begin(); } switch (whichOp) { case ADD_NEW_KEY: doneWithOps = addNewKey(); break; case INVALIDATE: doneWithOps = invalidate(); break; case DESTROY: doneWithOps = destroy(); break; case UPDATE_EXISTING_KEY: doneWithOps = updateExistingKey(); break; case GET: doneWithOps = get(); break; case LOCAL_INVALIDATE: doneWithOps = localInvalidate(); break; case LOCAL_DESTROY: doneWithOps = localDestroy(); break; default: { throw new TestException("Unknown operation " + whichOp); } } if (useTransactions) { try { TxHelper.commit(); } catch (CommitConflictException e) { // currently not expecting any conflicts ... throw new TestException( "Unexpected CommitConflictException " + TestHelper.getStackTrace(e)); } } if (doneWithOps) { Log.getLogWriter().info("Done with operation " + whichOp); availableOps.clear(whichOp); } if (sleepBetweenOps) { Log.getLogWriter().info("Sleeping between ops for " + SLEEP_BETWEEN_OPS_MILLIS + " millis"); MasterController.sleepForMs(SLEEP_BETWEEN_OPS_MILLIS); } } }
public static void insertSingleKeyTable(String tableName, int pk1) throws SQLException { Connection conn = getDefaultConnection(); PreparedStatement ps = conn.prepareStatement(insertsql); ps.setString(1, tableName); ps.setInt(2, pk1); ps.setInt(3, -1); ps.setInt(4, 1); ps.setInt(5, 0); ps.setInt(6, 0); Log.getLogWriter() .info("insert into trade.monitor values(" + tableName + ", " + pk1 + ", -1, 1, 0, 0 )"); try { ps.execute(); } catch (SQLException se) { if (se.getSQLState().equals("X0Z02")) { throw new TestException( "Got unexpected conflict exception in trigger" + TestHelper.getStackTrace(se)); } else throw se; } closeConnection(conn); }
// for portfolio and txhistory private static void newlyInsertedTable(Connection conn, String tableName, int pk1, int pk2) throws SQLException { PreparedStatement ps = conn.prepareStatement(insertsql); ps.setString(1, tableName); ps.setInt(2, pk1); ps.setInt(3, pk2); ps.setInt(4, 1); ps.setInt(5, 0); ps.setInt(6, 0); Log.getLogWriter() .info( "insert into trade.monitor values(" + tableName + ", " + pk1 + ", " + pk2 + ", 1, 0, 0 )"); try { ps.execute(); } catch (SQLException se) { if (se.getSQLState().equals("X0Z02")) { throw new TestException( "Got unexpected conflict exception in trigger" + TestHelper.getStackTrace(se)); } else throw se; } }
/** * During a shutDownAll we are disconnected from the DS and exceptions can occur. This method is * called when we know we are undergoing a shutdown all and allows certain exceptions * * @param e The exception that occurred during shutDownAll */ public static void handleOpsExceptionDuringShutDownAll(Exception e) { if ((e instanceof CacheClosedException) || (e instanceof DistributedSystemDisconnectedException)) { Log.getLogWriter() .info("Caught expected " + e + " during shutDownAllMembers; continuing test"); } else { throw new TestException(TestHelper.getStackTrace(e)); } }
public void reduce(GFKey key, Iterable<PEIWritable> values, Context context) throws IOException, InterruptedException { // For a particular key ... process all records and output what we would have expected in this // concKnownKeys test // Note that we either // 1. do a single create // 2. create + update // 3. create + destroy // look at all ops ... and output either // 1. create // 2. create (with value from update) // 3. do nothing (overall result is destroy, so do not create the entry in the gemfire // validation region String keyStr = (String) key.getKey(); ValueHolder updateValue = null; ValueHolder createValue = null; boolean destroyed = false; System.out.println("KnownKeysMRv2.reduce() invoked with " + keyStr); for (PEIWritable value : values) { PersistedEventImpl event = value.getEvent(); Operation op = event.getOperation(); ValueHolder vh = null; if (op.isDestroy()) { destroyed = true; } else { try { vh = (ValueHolder) event.getDeserializedValue(); } catch (ClassNotFoundException e) { System.out.println( "KnownKeysMRv2.map() caught " + e + " : " + TestHelper.getStackTrace(e)); } if (op.isUpdate()) { updateValue = vh; } else { createValue = vh; } } System.out.println( "KnownKeysMRv2.reduce() record: " + op.toString() + ": key = " + keyStr + " and op " + op.toString()); } if (!destroyed) { if (updateValue != null) { context.write(key.getKey(), updateValue); } else { context.write(key.getKey(), createValue); } } }
public void assignSelectToMe(Connection dConn, Connection gConn, int tid) { boolean success = false; int maxNumOfTries = 1; ArrayList<SQLException> exList = new ArrayList<SQLException>(); // for load test String sql = "grant select on trade.buyorders to thr_" + tid; if (dConn != null) { try { success = applySecurityToDerby(dConn, sql, exList); // insert to derby table int count = 0; while (!success) { if (count >= maxNumOfTries) { Log.getLogWriter() .info("Could not get the lock to finish grant/revoke stmt, abort this operation"); return; } exList.clear(); success = applySecurityToDerby(dConn, sql, exList); // retry insert to derby table count++; } applySecurityToGFE(gConn, sql, exList); // insert to gfe table SQLHelper.handleMissedSQLException(exList); gConn.commit(); dConn.commit(); } catch (SQLException se) { SQLHelper.printSQLException(se); throw new TestException("execute security statement fails " + TestHelper.getStackTrace(se)); } // for verification } else { try { applySecurityToGFE(gConn, sql); // insert to gfe table gConn.commit(); } catch (SQLException se) { SQLHelper.printSQLException(se); throw new TestException("execute security statement fails " + TestHelper.getStackTrace(se)); } } // no verification }
/** Performs a backup. */ protected static void performBackup() { long execution = RecoveryBB.getBB().getSharedCounters().read(RecoveryBB.executionNumber); File backupDir = createBackupDir(execution); File baselineDir = (execution > 1 ? getBackupDir(execution - 1) : null); try { BackupStatus status = AdminHelper.getAdminDistributedSystem().backupAllMembers(backupDir, baselineDir); } catch (AdminException e) { throw new TestException(TestHelper.getStackTrace(e)); } }
// Identity mapper (log and write out processed key/value pairs, the value is the // PersistedEventImpl) public void map(GFKey key, PersistedEventImpl value, Context context) throws IOException, InterruptedException { String keyStr = (String) key.getKey(); Operation op = value.getOperation(); ValueHolder entryValue = null; System.out.println("map method invoked with " + keyStr + " " + op.toString()); try { entryValue = (ValueHolder) value.getDeserializedValue(); } catch (ClassNotFoundException e) { System.out.println("KnownKeysMRv2.map() caught " + e + " : " + TestHelper.getStackTrace(e)); } context.write(key, new PEIWritable(value)); }
/** * Task to do random operations that expect exceptions from shutDownAllMembers * * @param testInstance The RecoveryTest instance. */ public static void doOperationsShutDownAll(RecoveryTest testInstance) throws Exception { long minTaskGranularitySec = TestConfig.tab().longAt(TestHelperPrms.minTaskGranularitySec); long minTaskGranularityMS = minTaskGranularitySec * TestHelper.SEC_MILLI_FACTOR; try { testInstance.doOperations(minTaskGranularityMS); } catch (CacheClosedException e) { boolean shutDownAllInProgress = (Boolean) RecoveryBB.getBB().getSharedMap().get(RecoveryBB.shutDownAllKey); if (shutDownAllInProgress) { Log.getLogWriter().info("Caught expected exception " + e + "; continuing test"); } else { throw new TestException(TestHelper.getStackTrace(e)); } } catch (DistributedSystemDisconnectedException e) { boolean shutDownAllInProgress = (Boolean) RecoveryBB.getBB().getSharedMap().get(RecoveryBB.shutDownAllKey); if (shutDownAllInProgress) { Log.getLogWriter().info("Caught expected exception " + e + "; continuing test"); } else { throw new TestException(TestHelper.getStackTrace(e)); } } }
/** * Create a region with the given region description name. * * @param regDescriptName The name of a region description. */ protected void initializeRegion(String regDescriptName) { CacheHelper.createCache("cache1"); String key = VmIDStr + RemoteTestModule.getMyVmid(); String xmlFile = key + ".xml"; try { CacheHelper.generateCacheXmlFile("cache1", regDescriptName, xmlFile); } catch (HydraRuntimeException e) { if (e.toString().indexOf("Cache XML file was already created") >= 0) { // this can occur when reinitializing after a stop-start because // the cache xml file is written during the first init tasks } else { throw new TestException(TestHelper.getStackTrace(e)); } } aRegion = RegionHelper.createRegion(regDescriptName); }
/** * Check that the value of the given key is expected as an updated value. Throw an error if any * problems. * * @param key The key to check. * @param value The value for the key. * @param logStr Used if throwing an error due to an unexpected value. */ protected void checkUpdatedValue(Object key, Object value) { if (value instanceof QueryObject) { QueryObject qo = (QueryObject) value; long keyCounter = NameFactory.getCounterForName(key); if (qo.aPrimitiveLong > 0) { // this value has not been updated; updates are negative throw new TestException( "Expected QueryObject for key " + key + " to contain negative values (indicating it was updated), but the value for this key is " + qo.toStringFull()); } } else { throw new TestException( "Expected value " + TestHelper.toString(value) + " to be a QueryObject"); } }
protected void initializeQueryService() { try { String usingPool = TestConfig.tab().stringAt(CQUtilPrms.QueryServiceUsingPool, "false"); boolean queryServiceUsingPool = Boolean.valueOf(usingPool).booleanValue(); if (queryServiceUsingPool) { Pool pool = PoolHelper.createPool(CQUtilPrms.getQueryServicePoolName()); qService = pool.getQueryService(); Log.getLogWriter() .info("Initializing QueryService using Pool. PoolName: " + pool.getName()); } else { qService = CacheHelper.getCache().getQueryService(); Log.getLogWriter().info("Initializing QueryService using Cache."); } } catch (Exception e) { throw new TestException(TestHelper.getStackTrace(e)); } }
/** * Check that the value of the given key is expected for this test. Throw an error if any * problems. * * @param key The key to check. * @param value The value for the key. * @param logStr Used if throwing an error due to an unexpected value. */ protected void checkValue(Object key, Object value) { if (value instanceof QueryObject) { QueryObject qo = (QueryObject) value; long keyCounter = NameFactory.getCounterForName(key); if (keyCounter != qo.aPrimitiveLong) { // just pick one field from the QueryObject to test; use aPrimitiveLong throw new TestException( "Inconsistent QueryObject for key " + key + ":" + qo.toStringFull()); } } else { throw new TestException( "For key " + key + ", expected value " + TestHelper.toString(value) + " to be a QueryObject"); } }
/** * Load a region with keys and values. The number of keys and values is specified by the total * number of keys in keyIntervals. This can be invoked by several threads to accomplish the work. */ public void loadRegion() { final long LOG_INTERVAL_MILLIS = 10000; int numKeysToCreate = keyIntervals.getNumKeys(); long lastLogTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis(); SharedCounters sc = CQUtilBB.getBB().getSharedCounters(); do { long shouldAddCount = CQUtilBB.getBB().getSharedCounters().incrementAndRead(CQUtilBB.SHOULD_ADD_COUNT); if (shouldAddCount > numKeysToCreate) { String aStr = "In loadRegion, shouldAddCount is " + shouldAddCount + ", numOriginalKeysCreated is " + sc.read(CQUtilBB.NUM_ORIGINAL_KEYS_CREATED) + ", numKeysToCreate is " + numKeysToCreate + ", region size is " + aRegion.size(); Log.getLogWriter().info(aStr); NameBB.getBB().printSharedCounters(); throw new StopSchedulingTaskOnClientOrder(aStr); } Object key = NameFactory.getNextPositiveObjectName(); QueryObject value = getValueToAdd(key); value.extra = key; Log.getLogWriter().info("Creating with put, key " + key + ", value " + value.toStringFull()); aRegion.put(key, value); sc.increment(CQUtilBB.NUM_ORIGINAL_KEYS_CREATED); if (System.currentTimeMillis() - lastLogTime > LOG_INTERVAL_MILLIS) { Log.getLogWriter() .info( "Added " + NameFactory.getPositiveNameCounter() + " out of " + numKeysToCreate + " entries into " + TestHelper.regionToString(aRegion, false)); lastLogTime = System.currentTimeMillis(); } } while ((minTaskGranularitySec == -1) || (System.currentTimeMillis() - startTime < minTaskGranularityMS)); }
public static void updateSingleKeyTable(String tableName, int pk1) throws SQLException { Connection conn = getDefaultConnection(); PreparedStatement ps = conn.prepareStatement(updatgfxd); ps.setString(1, tableName); ps.setInt(2, pk1); Log.getLogWriter().info(updatgfxd + " for " + tableName + " and pk1 " + pk1); try { ps.execute(); } catch (SQLException se) { if (se.getSQLState().equals("X0Z02")) { throw new TestException( "Got unexpected conflict exception in trigger" + TestHelper.getStackTrace(se)); } else throw se; } closeConnection(conn); }
/** * Creates a new key/value in the given region by creating a new key within the range and a random * value. * * @param aRegion The region to create the new key in. * @param exists Not used in this overridden method; this test wants to use unique keys even on * creates, so we don't do anything different here based on the value of exists. * @return An instance of Operation describing the create operation. */ @Override public Operation createEntry(Region aRegion, boolean exists) { int lower = ((Integer) (lowerKeyRange.get())).intValue(); int upper = ((Integer) (upperKeyRange.get())).intValue(); long keyIndex = TestConfig.tab().getRandGen().nextInt(lower, upper); long startKeyIndex = keyIndex; Object key = NameFactory.getObjectNameForCounter(keyIndex); boolean containsKey = aRegion.containsKey(key); while (containsKey) { // looking for a key that does not exist keyIndex++; // go to the next key if (keyIndex > upper) keyIndex = lower; if (keyIndex == startKeyIndex) { // considered all keys return null; } key = NameFactory.getObjectNameForCounter(keyIndex); containsKey = aRegion.containsKey(key); } BaseValueHolder vh = new ValueHolder(key, randomValues, new Integer(modValInitializer++)); try { Log.getLogWriter() .info( "createEntryKeyRange: putting key " + key + ", object " + vh.toString() + " in region " + aRegion.getFullPath()); aRegion.put(key, vh); Log.getLogWriter() .info( "createEntryKeyRange: done putting key " + key + ", object " + vh.toString() + " in region " + aRegion.getFullPath()); } catch (Exception e) { throw new TestException(TestHelper.getStackTrace(e)); } return new Operation(aRegion.getFullPath(), key, Operation.ENTRY_CREATE, null, vh.modVal); }
// for portfolio and txhistory private static void reInsertedTable(Connection conn, String tableName, int pk1, int pk2) throws SQLException { String reinsertsql = "update trade.monitor set insertCount = insertCount + 1 " + "where tname = ? and pk1 = ? and pk2 = ?"; PreparedStatement ps = conn.prepareStatement(reinsertsql); ps.setString(1, tableName); ps.setInt(2, pk1); ps.setInt(3, pk2); Log.getLogWriter() .info(reinsertsql + " for " + tableName + " and pk1 " + pk1 + " and pk2 " + pk2); try { ps.execute(); } catch (SQLException se) { if (se.getSQLState().equals("X0Z02")) { throw new TestException( "Got unexpected conflict exception in trigger" + TestHelper.getStackTrace(se)); } else throw se; } }