//
  // Build a list of the tests to be run. Use the regression suite
  // helpers to allow multiple backends.
  // JUnit magic that uses the regression suite helper classes.
  //
  public static Test suite() {
    VoltServerConfig config = null;

    MultiConfigSuiteBuilder builder = new MultiConfigSuiteBuilder(TestSystemProcedureSuite.class);

    // Not really using TPCC functionality but need a database.
    // The testLoadMultipartitionTable procedure assumes partitioning
    // on warehouse id.
    TPCCProjectBuilder project = new TPCCProjectBuilder();
    project.addDefaultSchema();
    project.addDefaultPartitioning();
    project.addProcedures(PROCEDURES);
    project.addStmtProcedure(
        "InsertNewOrder", "INSERT INTO NEW_ORDER VALUES (?, ?, ?);", "NEW_ORDER.NO_W_ID: 2");

    config = new LocalSingleProcessServer("sysproc-twosites.jar", 2, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    /*
     * Add a cluster configuration for sysprocs too
     */
    config = new LocalCluster("sysproc-cluster.jar", 2, 2, 1, BackendTarget.NATIVE_EE_JNI);
    config.compile(project);
    builder.addServerConfig(config);

    return builder;
  }
  @BeforeClass
  public static void setUp() throws ClassNotFoundException, SQLException {
    testjar = BuildDirectoryUtils.getBuildDirectoryPath() + File.separator + "jdbcdrivertest.jar";

    // compile a catalog
    pb = new TPCCProjectBuilder();
    pb.addDefaultSchema();
    pb.addDefaultPartitioning();
    pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class);
    pb.compile(testjar, 2, 0);

    // Set up ServerThread and Connection
    startServer();
  }
  private static void startServer() throws ClassNotFoundException, SQLException {
    server = new ServerThread(testjar, pb.getPathToDeployment(), BackendTarget.NATIVE_EE_JNI);
    server.start();
    server.waitForInitialization();

    Class.forName("org.voltdb.jdbc.Driver");
    conn = DriverManager.getConnection("jdbc:voltdb://localhost:21212");
  }
Beispiel #4
0
 /**
  * Main setUp method for test cases. Given the ProjectType we will populate the static catalog
  * field members The full_catalog flag is a hack to work around OutofMemory issues with TPC-E
  *
  * @param type
  * @param fkeys
  * @param full_catalog
  * @throws Exception
  */
 protected void setUp(ProjectType type, boolean fkeys, boolean full_catalog) throws Exception {
   super.setUp();
   is_first = (is_first == null ? true : false);
   this.last_type = type;
   catalog = project_catalogs.get(type);
   catalog_db = project_databases.get(type);
   p_estimator = project_p_estimators.get(type);
   if (catalog == null) {
     AbstractProjectBuilder projectBuilder = AbstractProjectBuilder.getProjectBuilder(type);
     if (ENABLE_JAR_REUSE) {
       File jar_path = projectBuilder.getJarPath(true);
       if (jar_path.exists()) {
         LOG.debug("LOAD CACHE JAR: " + jar_path.getAbsolutePath());
         catalog = CatalogUtil.loadCatalogFromJar(jar_path.getAbsolutePath());
       } else {
         LOG.debug("MISSING JAR: " + jar_path.getAbsolutePath());
       }
     }
     if (catalog == null) {
       switch (type) {
         case TPCC:
           catalog = TPCCProjectBuilder.getTPCCSchemaCatalog(true);
           // Update the ProcParameter mapping used in the catalogs
           //
           // ParametersUtil.populateCatalog(CatalogUtil.getDatabase(catalog),
           // ParametersUtil.getParameterMapping(type));
           break;
         case TPCE:
           catalog = projectBuilder.createCatalog(fkeys, full_catalog);
           break;
         case TM1:
         case SEATS:
         case AUCTIONMARK:
         case MARKOV:
         case LOCALITY:
         case MAPREDUCE:
           catalog = projectBuilder.getFullCatalog(fkeys);
           break;
         default:
           assert (false) : "Invalid project type - " + type;
       } // SWITCH
     }
     if (type == ProjectType.TPCC)
       ParametersUtil.populateCatalog(
           CatalogUtil.getDatabase(catalog), ParametersUtil.getParameterMapping(type));
     this.init(type, catalog);
   }
 }
Beispiel #5
0
  @SuppressWarnings("deprecation")
  @Override
  public void setUp() throws IOException, InterruptedException {
    VoltDB.instance().readBuildInfo("Test");

    // compile a catalog
    String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
    String catalogJar = testDir + File.separator + JAR;

    TPCCProjectBuilder pb = new TPCCProjectBuilder();
    pb.addDefaultSchema();
    pb.addDefaultPartitioning();
    pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class);

    pb.compile(catalogJar, 2, 0);

    // load a catalog
    byte[] bytes = CatalogUtil.toBytes(new File(catalogJar));
    String serializedCatalog = CatalogUtil.loadCatalogFromJar(bytes, null);

    // create the catalog (that will be passed to the ClientInterface
    catalog = new Catalog();
    catalog.execute(serializedCatalog);

    // update the catalog with the data from the deployment file
    String pathToDeployment = pb.getPathToDeployment();
    assertTrue(CatalogUtil.compileDeploymentAndGetCRC(catalog, pathToDeployment, true) >= 0);

    cluster = catalog.getClusters().get("cluster");
    CatalogMap<Procedure> procedures = cluster.getDatabases().get("database").getProcedures();
    Procedure insertProc = procedures.get("InsertNewOrder");
    assert (insertProc != null);
    selectProc = procedures.get("MultiSiteSelect");
    assert (selectProc != null);

    // Each EE needs its own thread for correct initialization.
    final AtomicReference<ExecutionEngine> site1Reference = new AtomicReference<ExecutionEngine>();
    final byte configBytes[] = LegacyHashinator.getConfigureBytes(2);
    Thread site1Thread =
        new Thread() {
          @Override
          public void run() {
            site1Reference.set(
                new ExecutionEngineJNI(
                    cluster.getRelativeIndex(),
                    1,
                    0,
                    0,
                    "",
                    100,
                    HashinatorType.LEGACY,
                    configBytes));
          }
        };
    site1Thread.start();
    site1Thread.join();

    final AtomicReference<ExecutionEngine> site2Reference = new AtomicReference<ExecutionEngine>();
    Thread site2Thread =
        new Thread() {
          @Override
          public void run() {
            site2Reference.set(
                new ExecutionEngineJNI(
                    cluster.getRelativeIndex(),
                    2,
                    1,
                    0,
                    "",
                    100,
                    HashinatorType.LEGACY,
                    configBytes));
          }
        };
    site2Thread.start();
    site2Thread.join();

    // create two EEs
    site1 = new ExecutionSite(0); // site 0
    ee1 = site1Reference.get();
    ee1.loadCatalog(0, catalog.serialize());
    site2 = new ExecutionSite(1); // site 1
    ee2 = site2Reference.get();
    ee2.loadCatalog(0, catalog.serialize());

    // cache some plan fragments
    selectStmt = selectProc.getStatements().get("selectAll");
    assert (selectStmt != null);
    int i = 0;
    // this kinda assumes the right order
    for (PlanFragment f : selectStmt.getFragments()) {
      if (i == 0) selectTopFrag = f;
      else selectBottomFrag = f;
      i++;
    }
    assert (selectTopFrag != null);
    assert (selectBottomFrag != null);

    if (selectTopFrag.getHasdependencies() == false) {
      PlanFragment temp = selectTopFrag;
      selectTopFrag = selectBottomFrag;
      selectBottomFrag = temp;
    }

    // get the insert frag
    Statement insertStmt = insertProc.getStatements().get("insert");
    assert (insertStmt != null);

    for (PlanFragment f : insertStmt.getFragments()) insertFrag = f;

    // populate plan cache
    ActivePlanRepository.clear();
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(selectBottomFrag),
        Encoder.base64Decode(selectBottomFrag.getPlannodetree()));
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(selectTopFrag),
        Encoder.base64Decode(selectTopFrag.getPlannodetree()));
    ActivePlanRepository.addFragmentForTest(
        CatalogUtil.getUniqueIdForFragment(insertFrag),
        Encoder.base64Decode(insertFrag.getPlannodetree()));

    // insert some data
    ParameterSet params = ParameterSet.fromArrayNoCopy(1L, 1L, 1L);

    VoltTable[] results =
        ee2.executePlanFragments(
            1,
            new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)},
            null,
            new ParameterSet[] {params},
            1,
            0,
            42,
            Long.MAX_VALUE);
    assert (results.length == 1);
    assert (results[0].asScalarLong() == 1L);

    params = ParameterSet.fromArrayNoCopy(2L, 2L, 2L);

    results =
        ee1.executePlanFragments(
            1,
            new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)},
            null,
            new ParameterSet[] {params},
            2,
            1,
            42,
            Long.MAX_VALUE);
    assert (results.length == 1);
    assert (results[0].asScalarLong() == 1L);
  }