Exemplo n.º 1
0
  public static junit.framework.Test suite() {
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestFastCombineSuite.class);
    VoltProjectBuilder project = new VoltProjectBuilder("fastcomb");
    VoltServerConfig config = null;

    // Schema + Table Partitions
    project.addSchema(TestFastAggregateSuite.class.getResource("testorderby-ddl.sql"));
    project.addTablePartitionInfo("O1", "PKEY");
    project.addProcedures(PROCEDURES);
    // Single Statement Procedures

    project.addStmtProcedure("TestSelect", "SELECT PKEY FROM O1 WHERE A_INT=?");

    // CLUSTER CONFIG #1
    // One site with two partitions running in this JVM
    config = new LocalSingleProcessServer("fastcomb-twoPart.jar", 2, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    /**
     * // CLUSTER CONFIG #2 // Two sites, each with two partitions running in separate JVMs config =
     * new LocalCluster("fastcomb-twoSiteTwoPart.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI);
     * config.compile(project); builder.addServerConfig(config);
     */
    return builder;
  }
Exemplo n.º 2
0
  public static junit.framework.Test suite() {

    VoltServerConfig config = null;
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestSqlAggregateSuite.class);

    VoltProjectBuilder project = new VoltProjectBuilder("aggregate");
    project.addSchema(Insert.class.getResource("aggregate-sql-ddl.sql"));
    project.addTablePartitionInfo("P1", "ID");
    project.addProcedures(PROCEDURES);

    config =
        new LocalSingleProcessServer("sqlaggregate-onesite.jar", 1, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    // ADHOC sql still returns double the number of modified rows
    // config = new LocalSingleProcessServer("sqlaggregate-twosites.jar", 2,
    // BackendTarget.NATIVE_EE_JNI);
    // config.compile(project);
    // builder.addServerConfig(config);

    // config = new LocalSingleProcessServer("sqlaggregate-hsql.jar", 1,
    // BackendTarget.HSQLDB_BACKEND);
    // config.compile(project);
    // builder.addServerConfig(config);

    // Cluster
    config = new LocalCluster("sqlaggregate-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    return builder;
  }
Exemplo n.º 3
0
  /**
   * Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit. Use helper
   * classes that are part of the RegressionSuite framework. This particular class runs all tests on
   * the the local JNI backend with both one and two partition configurations, as well as on the
   * hsql backend.
   *
   * @return The TestSuite containing all the tests to be run.
   */
  public static Test suite() {
    VoltServerConfig config = null;

    // the suite made here will all be using the tests from this class
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestExplainCommandSuite.class);

    // build up a project builder for the workload
    VoltProjectBuilder project = new VoltProjectBuilder();
    project.addSchema(TestExplainCommandSuite.class.getResource("testExplainCommand-ddl.sql"));
    project.addPartitionInfo("t1", "PKEY");
    project.addPartitionInfo("t2", "PKEY");
    project.addPartitionInfo("t3", "PKEY");
    project.setUseDDLSchema(true);

    boolean success;

    /////////////////////////////////////////////////////////////
    // CONFIG #1: 1 Local Site/Partitions running on JNI backend
    /////////////////////////////////////////////////////////////

    // get a server config for the native backend with one sites/partitions
    config =
        new LocalCluster("testExplainCommand-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI);

    // build the jarfile
    success = config.compile(project);
    assert (success);

    // add this config to the set of tests to run
    builder.addServerConfig(config);

    return builder;
  }
  //
  // Build a list of the tests to be run. Use the regression suite
  // helpers to allow multiple backends.
  // JUnit magic that uses the regression suite helper classes.
  //
  public static Test suite() {
    VoltServerConfig config = null;

    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestSystemProcedureSuite.class);

    // Not really using TPCC functionality but need a database.
    // The testLoadMultipartitionTable procedure assumes partitioning
    // on warehouse id.
    TPCCProjectBuilder project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(PROCEDURES);
    project.addStmtProcedure(
        "InsertNewOrder", "INSERT INTO NEW_ORDER VALUES (?, ?, ?);", "NEW_ORDER.NO_W_ID: 2");

    config = new LocalSingleProcessServer("sysproc-twosites.jar", 2, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    /*
     * Add a cluster configuration for sysprocs too
     */
    config = new LocalCluster("sysproc-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    return builder;
  }
Exemplo n.º 5
0
  public static junit.framework.Test suite() {
    VoltServerConfig config = null;
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestWindowFunctionSuite.class);
    boolean success = false;

    VoltProjectBuilder project;
    try {
      project = new VoltProjectBuilder();
      config = new LocalCluster("test-windowed-rank.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI);
      setupSchema(project);
      success = config.compile(project);
      assertTrue(success);
      builder.addServerConfig(config);

      project = new VoltProjectBuilder();
      config = new LocalCluster("test-windowed-rank.jar", 3, 1, 0, BackendTarget.NATIVE_EE_JNI);
      setupSchema(project);
      success = config.compile(project);
      assertTrue(success);
      builder.addServerConfig(config);
    } catch (IOException excp) {
      fail();
    }

    return builder;
  }
  public static junit.framework.Test suite() {

    VoltServerConfig config = null;
    MultiConfigSuiteBuilder builder =
        new MultiConfigSuiteBuilder(TestIndexMemoryOwnershipSuite.class);

    VoltProjectBuilder project = new VoltProjectBuilder();
    project.addSchema(
        TestIndexMemoryOwnershipSuite.class.getResource("testindexmemoryownership-ddl.sql"));
    project.addPartitionInfo("t1", "a");
    project.addStmtProcedure("InsertT1", "insert into t1 values (?, ?, ?);", "t1.a:0");
    project.addStmtProcedure("UpdateT1c", "update t1 set c = ? where a = ?;", "t1.a:1");
    project.addStmtProcedure("UpdateT1b", "update t1 set b = ? where a = ?;", "t1.a:1");
    project.addStmtProcedure("DeleteT1", "delete from t1 where c = ?;");
    project.addStmtProcedure("LookupT1b", "select * from t1 where b = ?;");
    project.addStmtProcedure("MVLookup", "select * from mv where b = ? and a = ?;", "t1.a:1");
    project.addStmtProcedure("MVAll", "select * from mv;");

    boolean success;

    // JNI
    config = new LocalCluster("updatememoryownership.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI);
    success = config.compile(project);
    assertTrue(success);
    builder.addServerConfig(config);

    return builder;
  }
  /**
   * Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit. Use helper
   * classes that are part of the RegressionSuite framework. This particular class runs all tests on
   * the the local JNI backend with both one and two partition configurations, as well as on the
   * hsql backend.
   *
   * @return The TestSuite containing all the tests to be run.
   */
  public static Test suite() {
    // the suite made here will all be using the tests from this class
    MultiConfigSuiteBuilder builder =
        new MultiConfigSuiteBuilder(TestSneakyExecutionOrderSuite.class);

    // build up a project builder for the workload
    VoltProjectBuilder project = new VoltProjectBuilder("sneaky");
    project.addSchema(MultiPartition.class.getResource("sneaky-ddl.sql"));
    project.addTablePartitionInfo("P1", "P");
    project.addProcedures(PROCEDURES);

    /////////////////////////////////////////////////////////////
    // CONFIG #1: 1 Local Site/Partition running on HSQL backend
    /////////////////////////////////////////////////////////////

    // VoltServerConfig config = new LocalCluster("sneaky.jar", 2, 2, BackendTarget.NATIVE_EE_JNI);
    VoltServerConfig config =
        new LocalSingleProcessServer("sneaky-twosites.jar", 2, BackendTarget.NATIVE_EE_JNI);
    boolean success = config.compile(project);
    assert (success);
    builder.addServerConfig(config);

    // Cluster
    config = new LocalCluster("sneaky-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    return builder;
  }
Exemplo n.º 8
0
  public static junit.framework.Test suite() {
    VoltServerConfig config = null;
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestLimitOffsetSuite.class);
    VoltProjectBuilder project = new VoltProjectBuilder();

    project.addSchema(TestLimitOffsetSuite.class.getResource("testlimitoffset-ddl.sql"));
    project.addPartitionInfo("A", "PKEY");

    project.addStmtProcedure("InsertA", "INSERT INTO A VALUES(?, ?);");
    project.addStmtProcedure("InsertB", "INSERT INTO B VALUES(?, ?);");
    project.addStmtProcedure("LimitAPKEY", "SELECT * FROM A ORDER BY PKEY LIMIT ? OFFSET ?;");
    project.addStmtProcedure("LimitBPKEY", "SELECT * FROM B ORDER BY PKEY LIMIT ? OFFSET ?;");
    project.addStmtProcedure("LimitAI", "SELECT * FROM A ORDER BY I LIMIT ? OFFSET ?;");
    project.addStmtProcedure("LimitBI", "SELECT * FROM B ORDER BY I LIMIT ? OFFSET ?;");

    // local
    config = new LocalCluster("testlimitoffset-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI);
    if (!config.compile(project)) fail();
    builder.addServerConfig(config);

    // Cluster
    config = new LocalCluster("testlimitoffset-cluster.jar", 2, 3, 1, BackendTarget.NATIVE_EE_JNI);
    if (!config.compile(project)) fail();
    builder.addServerConfig(config);

    // HSQL for baseline
    config = new LocalCluster("testlimitoffset-hsql.jar", 1, 1, 0, BackendTarget.HSQLDB_BACKEND);
    if (!config.compile(project)) fail();
    builder.addServerConfig(config);
    return builder;
  }
  public static junit.framework.Test suite() {
    VoltServerConfig config = null;
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestReplicationSuite.class);

    VoltProjectBuilder project = new VoltProjectBuilder("replication");
    project.addSchema(SelectEmptyTable.class.getResource("replication-ddl.sql"));
    project.addTablePartitionInfo("P1", "ID");
    project.addStmtProcedure("InsertSinglePart", "INSERT INTO P1 VALUES (?, ?, ?, ?);", "P1.ID: 0");
    project.addStmtProcedure(
        "UpdateSinglePart", "UPDATE P1 SET P1.NUM = ? WHERE P1.ID = ?", "P1.ID: 0");
    project.addStmtProcedure("SelectSinglePart", "SELECT * FROM P1 WHERE P1.ID = ?", "P1.ID: 0");
    project.addStmtProcedure("InsertMultiPart", "INSERT INTO P1 VALUES (?, ?, ?, ?);");
    project.addStmtProcedure("SelectMultiPart", "SELECT * FROM P1");
    project.addStmtProcedure("UpdateMultiPart", "UPDATE P1 SET P1.NUM = ?");
    project.addStmtProcedure("InsertMultiPartRepl", "INSERT INTO R1 VALUES (?, ?, ?, ?);");
    project.addStmtProcedure("SelectMultiPartRepl", "SELECT * FROM R1");
    project.addStmtProcedure("UpdateMultiPartRepl", "UPDATE R1 SET R1.NUM = ?");
    project.addProcedures(PROCEDURES);

    /////////////////////////////////////////////////////////////
    // CLUSTER, two hosts, each with two sites, replication of 1
    /////////////////////////////////////////////////////////////
    config = new LocalCluster(PREFIX + "-1-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    /////////////////////////////////////////////////////////////
    // CLUSTER, 3 hosts, each with two sites, replication of 1
    /////////////////////////////////////////////////////////////
    config = new LocalCluster(PREFIX + "-offset-cluster.jar", 2, 3, 1, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    /////////////////////////////////////////////////////////////
    // CLUSTER, 3 hosts, each with one site, replication of 1
    /////////////////////////////////////////////////////////////
    config =
        new LocalSingleProcessServer(PREFIX + "-odd-local.jar", 3, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    // CLUSTER, four hosts, each with three sites, replication of 2
    //        config = new LocalCluster("replication-2-cluster.jar", 3, 4,
    //                                  2, BackendTarget.NATIVE_EE_JNI);
    //        config.compile(project);
    //        builder.addServerConfig(config);

    return builder;
  }
Exemplo n.º 10
0
  public static Test suite() throws IOException {
    // the suite made here will all be using the tests from this class
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestJDBCConnectionFail.class);

    // build up a project builder for the workload
    VoltProjectBuilder project = getBuilderForTest();
    boolean success;
    m_config = new LocalCluster("decimal-default.jar", 4, 5, kfactor, BackendTarget.NATIVE_EE_JNI);
    m_config.setHasLocalServer(true);
    success = m_config.compile(project);
    assertTrue(success);

    // add this config to the set of tests to run
    builder.addServerConfig(m_config);
    return builder;
  }
Exemplo n.º 11
0
  public static junit.framework.Test suite() {

    VoltServerConfig config = null;
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestGiantDeleteSuite.class);

    VoltProjectBuilder project = new VoltProjectBuilder();
    project.addSchema(Insert.class.getResource("giant-delete-ddl.sql"));
    project.addProcedures(PROCEDURES);
    project.addStmtProcedure("Delete", "DELETE FROM ASSET WHERE ASSET_ID > -1;");

    config = new LocalCluster("giantdelete-onesite.jar", 2, 1, 0, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    return builder;
  }
Exemplo n.º 12
0
  //
  // Build a list of the tests to be run. Use the regression suite
  // helpers to allow multiple backends.
  // JUnit magic that uses the regression suite helper classes.
  //
  public static Test suite(Class classzz, boolean isCommandLogTest) throws IOException {
    VoltServerConfig config = null;

    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(classzz);

    // Not really using TPCC functionality but need a database.
    // The testLoadMultipartitionTable procedure assumes partitioning
    // on warehouse id.
    VoltProjectBuilder project = new VoltProjectBuilder();
    project.addLiteralSchema(
        "CREATE TABLE WAREHOUSE (\n"
            + "  W_ID SMALLINT DEFAULT '0' NOT NULL,\n"
            + "  W_NAME VARCHAR(16) DEFAULT NULL,\n"
            + "  W_STREET_1 VARCHAR(32) DEFAULT NULL,\n"
            + "  W_STREET_2 VARCHAR(32) DEFAULT NULL,\n"
            + "  W_CITY VARCHAR(32) DEFAULT NULL,\n"
            + "  W_STATE VARCHAR(2) DEFAULT NULL,\n"
            + "  W_ZIP VARCHAR(9) DEFAULT NULL,\n"
            + "  W_TAX FLOAT DEFAULT NULL,\n"
            + "  W_YTD FLOAT DEFAULT NULL,\n"
            + "  CONSTRAINT W_PK_TREE PRIMARY KEY (W_ID)\n"
            + ");\n"
            + "CREATE TABLE ITEM (\n"
            + "  I_ID INTEGER DEFAULT '0' NOT NULL,\n"
            + "  I_IM_ID INTEGER DEFAULT NULL,\n"
            + "  I_NAME VARCHAR(32) DEFAULT NULL,\n"
            + "  I_PRICE FLOAT DEFAULT NULL,\n"
            + "  I_DATA VARCHAR(64) DEFAULT NULL,\n"
            + "  CONSTRAINT I_PK_TREE PRIMARY KEY (I_ID)\n"
            + ");\n"
            + "CREATE TABLE NEW_ORDER (\n"
            + "  NO_W_ID SMALLINT DEFAULT '0' NOT NULL\n"
            + ");\n");

    project.addPartitionInfo("WAREHOUSE", "W_ID");
    project.addPartitionInfo("NEW_ORDER", "NO_W_ID");
    project.addProcedures(PROCEDURES);

    // Enable asynchronous logging for test of commandlog test
    if (MiscUtils.isPro() && isCommandLogTest) {
      project.configureLogging(null, null, false, true, FSYNC_INTERVAL_GOLD, null, null);
    }

    /*
     * Create a cluster configuration.
     * Some of the sysproc results come back a little strange when applied to a cluster that is being
     * simulated through LocalCluster -- all the hosts have the same HOSTNAME, just different host ids.
     * So, these tests shouldn't rely on the usual uniqueness of host names in a cluster.
     */
    config =
        new LocalCluster(
            "statistics-cluster.jar",
            StatisticsTestSuiteBase.SITES,
            StatisticsTestSuiteBase.HOSTS,
            StatisticsTestSuiteBase.KFACTOR,
            BackendTarget.NATIVE_EE_JNI);
    ((LocalCluster) config).setHasLocalServer(hasLocalServer);

    if (MiscUtils.isPro() && isCommandLogTest) {
      ((LocalCluster) config).setJavaProperty("LOG_SEGMENT_SIZE", "1");
      ((LocalCluster) config).setJavaProperty("LOG_SEGMENTS", "1");
    }

    boolean success = config.compile(project);
    assertTrue(success);
    builder.addServerConfig(config);

    return builder;
  }
Exemplo n.º 13
0
  /**
   * Build a list of the tests that will be run when TestTPCCSuite gets run by JUnit. Use helper
   * classes that are part of the RegressionSuite framework. This particular class runs all tests on
   * the the local JNI backend with both one and two partition configurations, as well as on the
   * hsql backend.
   *
   * @return The TestSuite containing all the tests to be run.
   */
  public static Test suite() {
    VoltServerConfig config = null;

    // the suite made here will all be using the tests from this class
    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestIndexCountSuite.class);

    // build up a project builder for the workload
    VoltProjectBuilder project = new VoltProjectBuilder();
    project.addSchema(BatchedMultiPartitionTest.class.getResource("sqlindex-ddl.sql"));
    project.addProcedures(PROCEDURES);
    project.addPartitionInfo("TU1", "ID");
    project.addPartitionInfo("TU2", "UNAME");
    project.addPartitionInfo("TU3", "TEL");
    project.addPartitionInfo("TU4", "UNAME");
    project.addPartitionInfo("TM1", "ID");
    project.addPartitionInfo("TM2", "UNAME");

    project.addStmtProcedure("TU1_LT", "SELECT COUNT(*) FROM TU1 WHERE POINTS < ?");
    project.addStmtProcedure("TU1_LET", "SELECT COUNT(*) FROM TU1 WHERE POINTS <= ?");
    project.addStmtProcedure("TU1_GT", "SELECT COUNT(*) FROM TU1 WHERE POINTS > ?");
    project.addStmtProcedure("TU1_GET", "SELECT COUNT(*) FROM TU1 WHERE POINTS >= ?");

    project.addStmtProcedure("TU3_LT", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS < ?");
    project.addStmtProcedure("TU3_LET", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS <= ?");
    project.addStmtProcedure(
        "TU3_GT_LT", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS > ? AND POINTS < ?");
    project.addStmtProcedure(
        "TU3_GT_LET", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS > ? AND POINTS <= ?");
    project.addStmtProcedure(
        "TU3_GET_LT", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS >= ? AND POINTS < ?");
    project.addStmtProcedure(
        "TU3_GET_LET", "SELECT COUNT(*) FROM TU3 WHERE TEL = ? AND POINTS >= ? AND POINTS <= ?");

    project.addStmtProcedure("TM1_LT", "SELECT COUNT(*) FROM TM1 WHERE POINTS < ?");
    project.addStmtProcedure("TM1_LET", "SELECT COUNT(*) FROM TM1 WHERE POINTS <= ?");
    project.addStmtProcedure("TM1_GT", "SELECT COUNT(*) FROM TM1 WHERE POINTS > ?");
    project.addStmtProcedure("TM1_GET", "SELECT COUNT(*) FROM TM1 WHERE POINTS >= ?");

    project.addStmtProcedure("TM2_LT", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS < ?");
    project.addStmtProcedure("TM2_LET", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS <= ?");
    project.addStmtProcedure(
        "TM2_GT_LT", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS > ? AND POINTS < ?");
    project.addStmtProcedure(
        "TM2_GT_LET", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS > ? AND POINTS <= ?");
    project.addStmtProcedure(
        "TM2_GET_LT", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS >= ? AND POINTS < ?");
    project.addStmtProcedure(
        "TM2_GET_LET", "SELECT COUNT(*) FROM TM2 WHERE UNAME = ? AND POINTS >= ? AND POINTS <= ?");
    boolean success;

    /////////////////////////////////////////////////////////////
    // CONFIG #1: 1 Local Site/Partitions running on JNI backend
    /////////////////////////////////////////////////////////////

    // get a server config for the native backend with one sites/partitions
    config = new LocalCluster("sqlCountingIndex-onesite.jar", 1, 1, 0, BackendTarget.NATIVE_EE_JNI);

    // build the jarfile
    success = config.compile(project);
    assert (success);

    // add this config to the set of tests to run
    builder.addServerConfig(config);

    /////////////////////////////////////////////////////////////
    // CONFIG #2: 1 Local Site/Partition running on HSQL backend
    /////////////////////////////////////////////////////////////

    config = new LocalCluster("sqlCountingIndex-hsql.jar", 1, 1, 0, BackendTarget.HSQLDB_BACKEND);
    success = config.compile(project);
    assert (success);
    builder.addServerConfig(config);

    /////////////////////////////////////////////////////////////
    // CONFIG #3: 2 Local Site/Partitions running on JNI backend
    /////////////////////////////////////////////////////////////
    config = new LocalCluster("sql-twosites.jar", 2, 1, 0, BackendTarget.NATIVE_EE_JNI);
    success = config.compile(project);
    assert (success);
    builder.addServerConfig(config);

    return builder;
  }