public static junit.framework.Test suite() { VoltServerConfig config = null; MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestSqlAggregateSuite.class); VoltProjectBuilder project = new VoltProjectBuilder("aggregate"); project.addSchema(Insert.class.getResource("aggregate-sql-ddl.sql")); project.addTablePartitionInfo("P1", "ID"); project.addProcedures(PROCEDURES); config = new LocalSingleProcessServer("sqlaggregate-onesite.jar", 1, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); // ADHOC sql still returns double the number of modified rows // config = new LocalSingleProcessServer("sqlaggregate-twosites.jar", 2, // BackendTarget.NATIVE_EE_JNI); // config.compile(project); // builder.addServerConfig(config); // config = new LocalSingleProcessServer("sqlaggregate-hsql.jar", 1, // BackendTarget.HSQLDB_BACKEND); // config.compile(project); // builder.addServerConfig(config); // Cluster config = new LocalCluster("sqlaggregate-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); return builder; }
public static junit.framework.Test suite() { VoltServerConfig config = null; MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestLimitOffsetSuite.class); VoltProjectBuilder project = new VoltProjectBuilder(); project.addSchema(TestLimitOffsetSuite.class.getResource("testlimitoffset-ddl.sql")); project.addPartitionInfo("A", "PKEY"); project.addStmtProcedure("InsertA", "INSERT INTO A VALUES(?, ?);"); project.addStmtProcedure("InsertB", "INSERT INTO B VALUES(?, ?);"); project.addStmtProcedure("LimitAPKEY", "SELECT * FROM A ORDER BY PKEY LIMIT ? OFFSET ?;"); project.addStmtProcedure("LimitBPKEY", "SELECT * FROM B ORDER BY PKEY LIMIT ? OFFSET ?;"); project.addStmtProcedure("LimitAI", "SELECT * FROM A ORDER BY I LIMIT ? OFFSET ?;"); project.addStmtProcedure("LimitBI", "SELECT * FROM B ORDER BY I LIMIT ? OFFSET ?;"); // local config = new LocalCluster("testlimitoffset-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI); if (!config.compile(project)) fail(); builder.addServerConfig(config); // Cluster config = new LocalCluster("testlimitoffset-cluster.jar", 2, 3, 1, BackendTarget.NATIVE_EE_JNI); if (!config.compile(project)) fail(); builder.addServerConfig(config); // HSQL for baseline config = new LocalCluster("testlimitoffset-hsql.jar", 1, 1, 0, BackendTarget.HSQLDB_BACKEND); if (!config.compile(project)) fail(); builder.addServerConfig(config); return builder; }
public static junit.framework.Test suite() { VoltServerConfig config = null; MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestWindowFunctionSuite.class); boolean success = false; VoltProjectBuilder project; try { project = new VoltProjectBuilder(); config = new LocalCluster("test-windowed-rank.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI); setupSchema(project); success = config.compile(project); assertTrue(success); builder.addServerConfig(config); project = new VoltProjectBuilder(); config = new LocalCluster("test-windowed-rank.jar", 3, 1, 0, BackendTarget.NATIVE_EE_JNI); setupSchema(project); success = config.compile(project); assertTrue(success); builder.addServerConfig(config); } catch (IOException excp) { fail(); } return builder; }
/** * Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit. Use helper * classes that are part of the RegressionSuite framework. This particular class runs all tests on * the the local JNI backend with both one and two partition configurations, as well as on the * hsql backend. * * @return The TestSuite containing all the tests to be run. */ public static Test suite() { VoltServerConfig config = null; // the suite made here will all be using the tests from this class MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestExplainCommandSuite.class); // build up a project builder for the workload VoltProjectBuilder project = new VoltProjectBuilder(); project.addSchema(TestExplainCommandSuite.class.getResource("testExplainCommand-ddl.sql")); project.addPartitionInfo("t1", "PKEY"); project.addPartitionInfo("t2", "PKEY"); project.addPartitionInfo("t3", "PKEY"); project.setUseDDLSchema(true); boolean success; ///////////////////////////////////////////////////////////// // CONFIG #1: 1 Local Site/Partitions running on JNI backend ///////////////////////////////////////////////////////////// // get a server config for the native backend with one sites/partitions config = new LocalCluster("testExplainCommand-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI); // build the jarfile success = config.compile(project); assert (success); // add this config to the set of tests to run builder.addServerConfig(config); return builder; }
/** * Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit. Use helper * classes that are part of the RegressionSuite framework. This particular class runs all tests on * the the local JNI backend with both one and two partition configurations, as well as on the * hsql backend. * * @return The TestSuite containing all the tests to be run. */ public static Test suite() { // the suite made here will all be using the tests from this class MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestSneakyExecutionOrderSuite.class); // build up a project builder for the workload VoltProjectBuilder project = new VoltProjectBuilder("sneaky"); project.addSchema(MultiPartition.class.getResource("sneaky-ddl.sql")); project.addTablePartitionInfo("P1", "P"); project.addProcedures(PROCEDURES); ///////////////////////////////////////////////////////////// // CONFIG #1: 1 Local Site/Partition running on HSQL backend ///////////////////////////////////////////////////////////// // VoltServerConfig config = new LocalCluster("sneaky.jar", 2, 2, BackendTarget.NATIVE_EE_JNI); VoltServerConfig config = new LocalSingleProcessServer("sneaky-twosites.jar", 2, BackendTarget.NATIVE_EE_JNI); boolean success = config.compile(project); assert (success); builder.addServerConfig(config); // Cluster config = new LocalCluster("sneaky-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); return builder; }
// // Build a list of the tests to be run. Use the regression suite // helpers to allow multiple backends. // JUnit magic that uses the regression suite helper classes. // public static Test suite() { VoltServerConfig config = null; MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestSystemProcedureSuite.class); // Not really using TPCC functionality but need a database. // The testLoadMultipartitionTable procedure assumes partitioning // on warehouse id. TPCCProjectBuilder project = new TPCCProjectBuilder(); project.addDefaultSchema(); project.addDefaultPartitioning(); project.addProcedures(PROCEDURES); project.addStmtProcedure( "InsertNewOrder", "INSERT INTO NEW_ORDER VALUES (?, ?, ?);", "NEW_ORDER.NO_W_ID: 2"); config = new LocalSingleProcessServer("sysproc-twosites.jar", 2, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); /* * Add a cluster configuration for sysprocs too */ config = new LocalCluster("sysproc-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); return builder; }
public static junit.framework.Test suite() { VoltServerConfig config = null; MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestIndexMemoryOwnershipSuite.class); VoltProjectBuilder project = new VoltProjectBuilder(); project.addSchema( TestIndexMemoryOwnershipSuite.class.getResource("testindexmemoryownership-ddl.sql")); project.addPartitionInfo("t1", "a"); project.addStmtProcedure("InsertT1", "insert into t1 values (?, ?, ?);", "t1.a:0"); project.addStmtProcedure("UpdateT1c", "update t1 set c = ? where a = ?;", "t1.a:1"); project.addStmtProcedure("UpdateT1b", "update t1 set b = ? where a = ?;", "t1.a:1"); project.addStmtProcedure("DeleteT1", "delete from t1 where c = ?;"); project.addStmtProcedure("LookupT1b", "select * from t1 where b = ?;"); project.addStmtProcedure("MVLookup", "select * from mv where b = ? and a = ?;", "t1.a:1"); project.addStmtProcedure("MVAll", "select * from mv;"); boolean success; // JNI config = new LocalCluster("updatememoryownership.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI); success = config.compile(project); assertTrue(success); builder.addServerConfig(config); return builder; }
public static junit.framework.Test suite() { MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestFastCombineSuite.class); VoltProjectBuilder project = new VoltProjectBuilder("fastcomb"); VoltServerConfig config = null; // Schema + Table Partitions project.addSchema(TestFastAggregateSuite.class.getResource("testorderby-ddl.sql")); project.addTablePartitionInfo("O1", "PKEY"); project.addProcedures(PROCEDURES); // Single Statement Procedures project.addStmtProcedure("TestSelect", "SELECT PKEY FROM O1 WHERE A_INT=?"); // CLUSTER CONFIG #1 // One site with two partitions running in this JVM config = new LocalSingleProcessServer("fastcomb-twoPart.jar", 2, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); /** * // CLUSTER CONFIG #2 // Two sites, each with two partitions running in separate JVMs config = * new LocalCluster("fastcomb-twoSiteTwoPart.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI); * config.compile(project); builder.addServerConfig(config); */ return builder; }
public Client getClient(long timeout, ClientAuthHashScheme scheme, boolean useAdmin) throws IOException { final Random r = new Random(); String listener = null; if (useAdmin) { listener = m_config.getAdminAddress(r.nextInt(m_config.getListenerCount())); } else { listener = m_config.getListenerAddress(r.nextInt(m_config.getListenerCount())); } ClientConfig config = new ClientConfigForTest(m_username, m_password, scheme); config.setConnectionResponseTimeout(timeout); config.setProcedureCallTimeout(timeout); final Client client = ClientFactory.createClient(config); // Use the port generated by LocalCluster if applicable try { client.createConnection(listener); } // retry once catch (ConnectException e) { if (useAdmin) { listener = m_config.getAdminAddress(r.nextInt(m_config.getListenerCount())); } else { listener = m_config.getListenerAddress(r.nextInt(m_config.getListenerCount())); } client.createConnection(listener); } m_clients.add(client); return client; }
public static junit.framework.Test suite() { VoltServerConfig config = null; MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestReplicationSuite.class); VoltProjectBuilder project = new VoltProjectBuilder("replication"); project.addSchema(SelectEmptyTable.class.getResource("replication-ddl.sql")); project.addTablePartitionInfo("P1", "ID"); project.addStmtProcedure("InsertSinglePart", "INSERT INTO P1 VALUES (?, ?, ?, ?);", "P1.ID: 0"); project.addStmtProcedure( "UpdateSinglePart", "UPDATE P1 SET P1.NUM = ? WHERE P1.ID = ?", "P1.ID: 0"); project.addStmtProcedure("SelectSinglePart", "SELECT * FROM P1 WHERE P1.ID = ?", "P1.ID: 0"); project.addStmtProcedure("InsertMultiPart", "INSERT INTO P1 VALUES (?, ?, ?, ?);"); project.addStmtProcedure("SelectMultiPart", "SELECT * FROM P1"); project.addStmtProcedure("UpdateMultiPart", "UPDATE P1 SET P1.NUM = ?"); project.addStmtProcedure("InsertMultiPartRepl", "INSERT INTO R1 VALUES (?, ?, ?, ?);"); project.addStmtProcedure("SelectMultiPartRepl", "SELECT * FROM R1"); project.addStmtProcedure("UpdateMultiPartRepl", "UPDATE R1 SET R1.NUM = ?"); project.addProcedures(PROCEDURES); ///////////////////////////////////////////////////////////// // CLUSTER, two hosts, each with two sites, replication of 1 ///////////////////////////////////////////////////////////// config = new LocalCluster(PREFIX + "-1-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); ///////////////////////////////////////////////////////////// // CLUSTER, 3 hosts, each with two sites, replication of 1 ///////////////////////////////////////////////////////////// config = new LocalCluster(PREFIX + "-offset-cluster.jar", 2, 3, 1, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); ///////////////////////////////////////////////////////////// // CLUSTER, 3 hosts, each with one site, replication of 1 ///////////////////////////////////////////////////////////// config = new LocalSingleProcessServer(PREFIX + "-odd-local.jar", 3, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); // CLUSTER, four hosts, each with three sites, replication of 2 // config = new LocalCluster("replication-2-cluster.jar", 3, 4, // 2, BackendTarget.NATIVE_EE_JNI); // config.compile(project); // builder.addServerConfig(config); return builder; }
public static junit.framework.Test suite() { VoltServerConfig config = null; MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestGiantDeleteSuite.class); VoltProjectBuilder project = new VoltProjectBuilder(); project.addSchema(Insert.class.getResource("giant-delete-ddl.sql")); project.addProcedures(PROCEDURES); project.addStmtProcedure("Delete", "DELETE FROM ASSET WHERE ASSET_ID > -1;"); config = new LocalCluster("giantdelete-onesite.jar", 2, 1, 0, BackendTarget.NATIVE_EE_JNI); config.compile(project); builder.addServerConfig(config); return builder; }
public SocketChannel getClientChannel(final boolean noTearDown) throws IOException { final List<String> listeners = m_config.getListenerAddresses(); final Random r = new Random(); final String listener = listeners.get(r.nextInt(listeners.size())); byte[] hashedPassword = ConnectionUtil.getHashedPassword(m_password); HostAndPort hNp = HostAndPort.fromString(listener); int port = Constants.DEFAULT_PORT; if (hNp.hasPort()) { port = hNp.getPort(); } final SocketChannel channel = (SocketChannel) ConnectionUtil.getAuthenticatedConnection( hNp.getHostText(), m_username, hashedPassword, port, null, ClientAuthHashScheme.getByUnencodedLength(hashedPassword.length))[0]; channel.configureBlocking(true); if (!noTearDown) { synchronized (m_clientChannels) { m_clientChannels.add(channel); } } return channel; }
@Override public void finalize() throws Throwable { try { shutDownExternal(); } finally { super.finalize(); } }
/** * Get a VoltClient instance connected to a specific server driven by the VoltServerConfig * instance. Find the server by the config's HostId. * * @return A VoltClient instance connected to the server driven by the VoltServerConfig instance. */ public Client getClientToHostId(int hostId, long timeout) throws IOException { final String listener = m_config.getListenerAddress(hostId); ClientConfig config = new ClientConfigForTest(m_username, m_password); config.setConnectionResponseTimeout(timeout); config.setProcedureCallTimeout(timeout); final Client client = ClientFactory.createClient(config); try { client.createConnection(listener); } // retry once catch (ConnectException e) { client.createConnection(listener); } m_clients.add(client); return client; }
/** * JUnit special method called to shutdown the test. This instance will stop the VoltDB server * using the VoltServerConfig instance provided. */ @Override public void tearDown() throws Exception { m_config.shutDown(); for (final Client c : m_clients) { c.close(); } synchronized (m_clientChannels) { for (final SocketChannel sc : m_clientChannels) { try { ConnectionUtil.closeConnection(sc); } catch (final IOException e) { e.printStackTrace(); } } m_clientChannels.clear(); } m_clients.clear(); }
public Client getFullyConnectedClient(long timeout) throws IOException { final List<String> listeners = m_config.getListenerAddresses(); final Random r = new Random(); ClientConfig config = new ClientConfigForTest(m_username, m_password); config.setConnectionResponseTimeout(timeout); config.setProcedureCallTimeout(timeout); final Client client = ClientFactory.createClient(config); for (String listener : listeners) { // Use the port generated by LocalCluster if applicable try { client.createConnection(listener); } // retry once catch (ConnectException e) { listener = listeners.get(r.nextInt(listeners.size())); client.createConnection(listener); } } m_clients.add(client); return client; }
/** * Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit. Use helper * classes that are part of the RegressionSuite framework. This particular class runs all tests on * the the local JNI backend with both one and two partition configurations, as well as on the * hsql backend. * * @return The TestSuite containing all the tests to be run. */ public static Test suite() { VoltServerConfig config = null; // the suite made here will all be using the tests from this class MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestIndexCountSuite.class); // build up a project builder for the workload VoltProjectBuilder project = new VoltProjectBuilder(); project.addSchema(BatchedMultiPartitionTest.class.getResource("sqlindex-ddl.sql")); project.addProcedures(PROCEDURES); project.addPartitionInfo("TU1", "ID"); project.addPartitionInfo("TU2", "UNAME"); project.addPartitionInfo("TU3", "TEL"); project.addPartitionInfo("TU4", "UNAME"); project.addPartitionInfo("TM1", "ID"); project.addPartitionInfo("TM2", "UNAME"); project.addStmtProcedure("TU1_LT", "SELECT COUNT(*) FROM TU1 WHERE POINTS < ?"); project.addStmtProcedure("TU1_LET", "SELECT COUNT(*) FROM TU1 WHERE POINTS <= ?"); project.addStmtProcedure("TU1_GT", "SELECT COUNT(*) FROM TU1 WHERE POINTS > ?"); project.addStmtProcedure("TU1_GET", "SELECT COUNT(*) FROM TU1 WHERE POINTS >= ?"); project.addStmtProcedure("TU3_LT", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS < ?"); project.addStmtProcedure("TU3_LET", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS <= ?"); project.addStmtProcedure( "TU3_GT_LT", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS > ? AND POINTS < ?"); project.addStmtProcedure( "TU3_GT_LET", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS > ? AND POINTS <= ?"); project.addStmtProcedure( "TU3_GET_LT", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS >= ? AND POINTS < ?"); project.addStmtProcedure( "TU3_GET_LET", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS >= ? AND POINTS <= ?"); project.addStmtProcedure("TM1_LT", "SELECT COUNT(*) FROM TM1 WHERE POINTS < ?"); project.addStmtProcedure("TM1_LET", "SELECT COUNT(*) FROM TM1 WHERE POINTS <= ?"); project.addStmtProcedure("TM1_GT", "SELECT COUNT(*) FROM TM1 WHERE POINTS > ?"); project.addStmtProcedure("TM1_GET", "SELECT COUNT(*) FROM TM1 WHERE POINTS >= ?"); project.addStmtProcedure("TM2_LT", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS < ?"); project.addStmtProcedure("TM2_LET", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS <= ?"); project.addStmtProcedure( "TM2_GT_LT", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS > ? AND POINTS < ?"); project.addStmtProcedure( "TM2_GT_LET", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS > ? AND POINTS <= ?"); project.addStmtProcedure( "TM2_GET_LT", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS >= ? AND POINTS < ?"); project.addStmtProcedure( "TM2_GET_LET", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS >= ? AND POINTS <= ?"); boolean success; ///////////////////////////////////////////////////////////// // CONFIG #1: 1 Local Site/Partitions running on JNI backend ///////////////////////////////////////////////////////////// // get a server config for the native backend with one sites/partitions config = new LocalCluster("sqlCountingIndex-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI); // build the jarfile success = config.compile(project); assert (success); // add this config to the set of tests to run builder.addServerConfig(config); ///////////////////////////////////////////////////////////// // CONFIG #2: 1 Local Site/Partition running on HSQL backend ///////////////////////////////////////////////////////////// config = new LocalCluster("sqlCountingIndex-hsql.jar", 1, 1, 0, BackendTarget.HSQLDB_BACKEND); success = config.compile(project); assert (success); builder.addServerConfig(config); ///////////////////////////////////////////////////////////// // CONFIG #3: 2 Local Site/Partitions running on JNI backend ///////////////////////////////////////////////////////////// config = new LocalCluster("sql-twosites.jar", 2, 1, 0, BackendTarget.NATIVE_EE_JNI); success = config.compile(project); assert (success); builder.addServerConfig(config); return builder; }
/** * JUnit special method called to setup the test. This instance will start the VoltDB server using * the VoltServerConfig instance provided. */ @Override public void setUp() throws Exception { // New tests means a new server thread that hasn't done a restore m_config.setCallingMethodName(m_methodName); m_config.startUp(true); }
@Override public String getName() { // munge the test name with the VoltServerConfig instance name return super.getName() + "-" + m_config.getName(); }
// // Build a list of the tests to be run. Use the regression suite // helpers to allow multiple backends. // JUnit magic that uses the regression suite helper classes. // public static Test suite(Class classzz, boolean isCommandLogTest) throws IOException { VoltServerConfig config = null; MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(classzz); // Not really using TPCC functionality but need a database. // The testLoadMultipartitionTable procedure assumes partitioning // on warehouse id. VoltProjectBuilder project = new VoltProjectBuilder(); project.addLiteralSchema( "CREATE TABLE WAREHOUSE (\n" + " W_ID SMALLINT DEFAULT '0' NOT NULL,\n" + " W_NAME VARCHAR(16) DEFAULT NULL,\n" + " W_STREET_1 VARCHAR(32) DEFAULT NULL,\n" + " W_STREET_2 VARCHAR(32) DEFAULT NULL,\n" + " W_CITY VARCHAR(32) DEFAULT NULL,\n" + " W_STATE VARCHAR(2) DEFAULT NULL,\n" + " W_ZIP VARCHAR(9) DEFAULT NULL,\n" + " W_TAX FLOAT DEFAULT NULL,\n" + " W_YTD FLOAT DEFAULT NULL,\n" + " CONSTRAINT W_PK_TREE PRIMARY KEY (W_ID)\n" + ");\n" + "CREATE TABLE ITEM (\n" + " I_ID INTEGER DEFAULT '0' NOT NULL,\n" + " I_IM_ID INTEGER DEFAULT NULL,\n" + " I_NAME VARCHAR(32) DEFAULT NULL,\n" + " I_PRICE FLOAT DEFAULT NULL,\n" + " I_DATA VARCHAR(64) DEFAULT NULL,\n" + " CONSTRAINT I_PK_TREE PRIMARY KEY (I_ID)\n" + ");\n" + "CREATE TABLE NEW_ORDER (\n" + " NO_W_ID SMALLINT DEFAULT '0' NOT NULL\n" + ");\n"); project.addPartitionInfo("WAREHOUSE", "W_ID"); project.addPartitionInfo("NEW_ORDER", "NO_W_ID"); project.addProcedures(PROCEDURES); // Enable asynchronous logging for test of commandlog test if (MiscUtils.isPro() && isCommandLogTest) { project.configureLogging(null, null, false, true, FSYNC_INTERVAL_GOLD, null, null); } /* * Create a cluster configuration. * Some of the sysproc results come back a little strange when applied to a cluster that is being * simulated through LocalCluster -- all the hosts have the same HOSTNAME, just different host ids. * So, these tests shouldn't rely on the usual uniqueness of host names in a cluster. */ config = new LocalCluster( "statistics-cluster.jar", StatisticsTestSuiteBase.SITES, StatisticsTestSuiteBase.HOSTS, StatisticsTestSuiteBase.KFACTOR, BackendTarget.NATIVE_EE_JNI); ((LocalCluster) config).setHasLocalServer(hasLocalServer); if (MiscUtils.isPro() && isCommandLogTest) { ((LocalCluster) config).setJavaProperty("LOG_SEGMENT_SIZE", "1"); ((LocalCluster) config).setJavaProperty("LOG_SEGMENTS", "1"); } boolean success = config.compile(project); assertTrue(success); builder.addServerConfig(config); return builder; }
/** * Add a sever configuration to the set of configurations we want these tests to run on. * * @param config A Server Configuration to run this set of tests on. */ public boolean addServerConfig(VoltServerConfig config) { final String enabled_configs = System.getenv().get("VOLT_REGRESSIONS"); System.out.println("VOLT REGRESSIONS ENABLED: " + enabled_configs); if (!(enabled_configs == null || enabled_configs.contentEquals("all"))) { if (config instanceof LocalCluster) { if (config.isHSQL() && !enabled_configs.contains("hsql")) { return true; } if ((config.getNodeCount() == 1) && !enabled_configs.contains("local")) { return true; } if ((config.getNodeCount() > 1) && !enabled_configs.contains("cluster")) { return true; } } } final String buildType = System.getenv().get("BUILD"); if (buildType != null) { if (buildType.startsWith("memcheck")) { if (config instanceof LocalCluster) { LocalCluster lc = (LocalCluster) config; // don't run valgrind on multi-node clusters without embedded processes if ((lc.getNodeCount() > 1) || (lc.m_hasLocalServer == false)) { return true; } } if (config.isHSQL()) { return true; } } } // get the constructor of the test class Constructor<?> cons = null; try { cons = m_testClass.getConstructor(String.class); } catch (Exception e) { e.printStackTrace(); return false; } // get the set of test methods List<String> methods = getTestMethodNames(m_testClass); // add a test case instance for each method for the specified // server config for (String mname : methods) { RegressionSuite rs = null; try { rs = (RegressionSuite) cons.newInstance(mname); } catch (Exception e) { e.printStackTrace(); return false; } rs.setConfig(config); super.addTest(rs); } return true; }
/** @return Is the underlying instance of VoltDB running HSQL? */ public boolean isHSQL() { return m_config.isHSQL(); }
/** @return The number of logical partitions in this configuration */ public int getLogicalPartitionCount() { return m_config.getLogicalPartitionCount(); }
/** @return Is the underlying instance of VoltDB running Valgrind with the IPC client? */ public boolean isValgrind() { return m_config.isValgrind(); }