/** Comparisons that can't get pushed down. */
  public void testComparision() throws SQLException {
    Statement st = createStatement();

    st.execute("CALL SYSCS_UTIL.SYSCS_SET_RUNTIMESTATISTICS(1)");

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1" + " --derby-properties index = t1_c1\n" + "where c1 = c1"),
        FULL_TABLE);
    RuntimeStatisticsParser rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T1", "T1_C1"));

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1" + " --derby-properties index = t1_c1\n" + "where c1 = c2"),
        FULL_TABLE);
    rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T1", "T1_C1"));

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1" + " --derby-properties index = t1_c1\n" + "where c1 + 1 = 1 + c1"),
        FULL_TABLE);
    rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T1", "T1_C1"));

    st.close();
  }
  /** Make sure following fragments get treated as comments. */
  public void testFragmentsAsComments() throws SQLException {
    String[] frags = {
      "--d",
      "-- de",
      "-- der",
      "--derb",
      "--derby comment",
      "-- derby another comment",
      "--derby-",
      "--derby-p",
      "--derby-pr",
      "--derby-pro",
      "--derby-prop",
      "--derby-prope",
      "--derby-proper",
      "-- derby-propert",
      "-- derby-properti",
      "-- derby-propertie",
      "-- derby-propertiex"
    };

    Statement st = createStatement();

    for (int i = 0; i < frags.length; i++)
      JDBC.assertFullResultSet(st.executeQuery(frags[i] + "\n VALUES 1 "), new String[][] {{"1"}});

    st.close();
  }
  /**
   * Test that routine parameters and return types are handled correctly with 10.4 creating a
   * procedure in soft-upgrade. 10.4 simplified the stored format of the types by ensuring the
   * catalog type was written. See DERBY-2917 for details.
   *
   * @throws SQLException
   */
  public void testRoutineParameters() throws SQLException {

    switch (getPhase()) {
      case PH_CREATE:
        break;

      case PH_SOFT_UPGRADE:
        Statement s = createStatement();
        s.execute(
            "CREATE FUNCTION TYPES_10_4"
                + "(A INTEGER) RETURNS CHAR(10) "
                + "LANGUAGE JAVA "
                + "PARAMETER STYLE JAVA "
                + "NO SQL "
                + "EXTERNAL NAME 'java.lang.Integer.toHexString'");
        // fall through to test it

      case PH_HARD_UPGRADE:
      case PH_POST_SOFT_UPGRADE:
        PreparedStatement ps = prepareStatement("VALUES TYPES_10_4(?)");
        ps.setInt(1, 48879);
        // Don't use the single value check method here
        // because we want to check the returned value
        // was converted to its correct type of CHAR(10)
        // (so no trimming of values)
        JDBC.assertFullResultSet(ps.executeQuery(), new Object[][] {{"beef      "}}, false);
        break;
    }
  }
  void _fillTableOnServer(
      String serverHost, int serverPort, String dbPath, int startVal, int _noTuplesToInsert)
      throws Exception {
    ClientDataSourceInterface ds;

    if (JDBC.vmSupportsJNDI()) {
      ds =
          (ClientDataSourceInterface)
              Class.forName("org.apache.derby.jdbc.ClientDataSource").newInstance();
    } else {
      ds =
          (ClientDataSourceInterface)
              Class.forName("org.apache.derby.jdbc.BasicClientDataSource40").newInstance();
    }

    ds.setDatabaseName(dbPath);
    ds.setServerName(serverHost);
    ds.setPortNumber(serverPort);
    ds.setConnectionAttributes(useEncryption(false));
    Connection conn = ds.getConnection();

    PreparedStatement ps = conn.prepareStatement("insert into t values (?,?,?)");
    for (int i = 0; i < _noTuplesToInsert; i++) {
      ps.setInt(1, (i + startVal));
      ps.setString(2, "dilldall" + (i + startVal));
      ps.setInt(3, (i + startVal) % (_noTuplesToInsert / 10));
      ps.execute();
      if ((i % 10000) == 0) conn.commit();
    }

    _verify(conn, startVal + _noTuplesToInsert);

    conn.close();
  }
  /** Verify that statements are dependent on specified index. */
  public void testDependenceOnIndex() throws SQLException {
    PreparedStatement ps =
        prepareStatement("select * from t1 " + "--derby-properties index = t1_c1");

    JDBC.assertFullResultSet(
        ps.executeQuery(),
        new String[][] {
          {"1", "1", "1"},
          {"2", "2", "2"},
          {"3", "3", "3"},
          {"4", "4", "4"},
        });

    Statement st = createStatement();
    st.executeUpdate("drop index t1_c1");

    assertStatementError("42Y46", ps);

    ps.close();

    // add index to avoid exception when deleted in tearDown().
    st.executeUpdate("create index t1_c1 on t1(c1)");

    st.close();
  }
  public void testJoin() throws SQLException {
    Statement st = createStatement();

    st.execute("CALL SYSCS_UTIL.SYSCS_SET_RUNTIMESTATISTICS(1)");
    JDBC.assertFullResultSet(
        st.executeQuery(
            "select 1 from t1 a"
                + " --derby-properties index = t1_c1\n"
                + ",t2 b --derby-properties index = t2_c2"),
        new String[][] {
          {"1"}, {"1"}, {"1"}, {"1"},
          {"1"}, {"1"}, {"1"}, {"1"},
          {"1"}, {"1"}, {"1"}, {"1"},
          {"1"}, {"1"}, {"1"}, {"1"},
        });
    RuntimeStatisticsParser rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T1", "T1_C1"));
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T2", "T2_C2"));

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select 1 from "
                + " --derby-properties joinOrder=fixed\n"
                + "t1, t2 where t1.c1 = t2.c1"),
        new String[][] {
          {"1"}, {"1"}, {"1"}, {"1"},
        });

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1"
                + " --derby-properties index = t1_c1\n"
                + "left outer join t2 "
                + "--derby-properties index = t2_c2\n"
                + "on t1.c1 = t2.c1"),
        new String[][] {
          {"1", "1", "1", "1", "1", "1"},
          {"2", "2", "2", "2", "2", "2"},
          {"3", "3", "3", "3", "3", "3"},
          {"4", "4", "4", "4", "4", "4"},
        });
    rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T1", "T1_C1"));
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T2", "T2_C2"));

    st.close();
  }
 /**
  * Test that normalization of negative zero to positive zero works for doubles. In some JVMs this
  * used to give wrong results after runtime optimization. See DERBY-2447 and <a
  * href="http://bugs.sun.com/view_bug.do?bug_id=6833879">CR6833879</a> in Sun's bug database.
  */
 public void testNegativeZeroDoubleJvmBug() throws SQLException {
   PreparedStatement ps = prepareStatement("values -cast(? as double)");
   ps.setDouble(1, 0.0d);
   // Execute the statement many times so that the JVM is likely to
   // produce native, optimized code.
   for (int i = 0; i < 7000; i++) {
     JDBC.assertSingleValueResultSet(ps.executeQuery(), "0.0");
   }
 }
  public void testNullValue() throws SQLException {
    Statement st = createStatement();

    st.execute("CALL SYSCS_UTIL.SYSCS_SET_RUNTIMESTATISTICS(1)");

    JDBC.assertFullResultSet(
        st.executeQuery("select * from t1 " + "--derby-properties index = null"), FULL_TABLE);
    RuntimeStatisticsParser rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue("force table scan", rtsp.usedTableScan());

    JDBC.assertFullResultSet(
        st.executeQuery("select * from t1 " + "--derby-properties constraint = null"), FULL_TABLE);

    assertStatementError(
        "42Y56", st, "select * from t1 " + "--derby-properties joinStrategy = null");

    st.close();
  }
  /**
   * Tests that the cache throws out the least frequently used statement when it reaches its maximum
   * capacity, and that the thrown out statement is closed in the process.
   *
   * <p>Note: This test assumes things about the replacement policy.
   *
   * @throws SQLException if a JDBC operation fails
   */
  public void testEvictionFromCache() throws SQLException {
    // Initial setup.
    JDBCStatementCache cache = new JDBCStatementCache(2);
    final String sql1 = "values 1";
    final String sql2 = "values 2";
    final String sql3 = "values 3";
    // Create three physical prepares statements.
    java.sql.PreparedStatement ps1 = prepareStatement(sql1);
    java.sql.PreparedStatement ps2 = prepareStatement(sql2);
    java.sql.PreparedStatement ps3 = prepareStatement(sql3);
    // Insert the two first physical statements, the get logical wrappers.
    StatementKey stmtKey1 = insertStatementIntoCache(cache, ps1, sql1);
    StatementKey stmtKey2 = insertStatementIntoCache(cache, ps2, sql2);
    LogicalStatementEntity logic1 = createLogicalStatementEntity(sql1, false, cache);
    LogicalStatementEntity logic2 = createLogicalStatementEntity(sql2, false, cache);
    // Insert the last physical statement and get the logical wrapper.
    StatementKey stmtKey3 = insertStatementIntoCache(cache, ps3, sql3);
    LogicalStatementEntity logic3 = createLogicalStatementEntity(sql3, false, cache);
    assertSame(ps1, logic1.getPhysPs());
    assertSame(ps2, logic2.getPhysPs());
    assertSame(ps3, logic3.getPhysPs());

    // Close two first logical statements, putting them back into the cache.
    logic1.close();
    logic2.close();
    // Assert both of the statements are open.
    JDBC.assertSingleValueResultSet(ps1.executeQuery(), "1");
    JDBC.assertSingleValueResultSet(ps2.executeQuery(), "2");
    // Close the third statement. It should be cached, but since the cache
    // will exceed its maximum capacity, the first statement will be thrown
    // out and it should be closed in the process.
    logic3.close();
    JDBC.assertSingleValueResultSet(ps3.executeQuery(), "3");
    assertNull("ps1 still in the cache", cache.getCached(stmtKey1));
    try {
      ps1.executeQuery();
      fail("ps1 should have been closed by the cache");
    } catch (SQLException sqle) {
      assertSQLState("XJ012", sqle);
    }
    // Make sure the right statements are returned from the cache.
    assertSame(ps2, cache.getCached(stmtKey2));
    assertSame(ps3, cache.getCached(stmtKey3));
  }
  /**
   * Base suite of tests that will run in both embedded and client.
   *
   * @param name Name for the suite.
   */
  private static TestSuite baseSuite(String name) {
    TestSuite suite = new TestSuite(name);

    suite.addTestSuite(UpdateXXXTest.class);

    // requires java.math.BigDecimal
    if (JDBC.vmSupportsJDBC3()) suite.addTest(new UpdateXXXTest("jdbc2testUpdateBigDecimal"));

    return suite;
  }
  /** Index which includes columns in for update of list. */
  public void testPropertyForUpdate() throws SQLException {
    Statement st = createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);

    JDBC.assertFullResultSet(
        st.executeQuery("select * from t1 " + "--derby-properties index = t1_c1\n" + "for update"),
        FULL_TABLE);

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1 exposedname " + "--derby-properties index = t1_c1\n" + "for update"),
        FULL_TABLE);

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1 " + "--derby-properties index = t1_c1\n" + "for update of c2, c1"),
        FULL_TABLE);

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1 exposedname "
                + "--derby-properties index = t1_c1\n"
                + "for update of c2, c1"),
        FULL_TABLE);

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1 " + "--derby-properties constraint = cons1\n" + "for update"),
        FULL_TABLE);

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1 exposedname "
                + "--derby-properties constraint = cons1\n"
                + "for update"),
        FULL_TABLE);

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1 "
                + "--derby-properties constraint = cons1\n"
                + "for update of c2, c1"),
        FULL_TABLE);

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1 exposedname "
                + "--derby-properties constraint = cons1\n"
                + "for update of c2, c1"),
        FULL_TABLE);

    st.close();
  }
  /** Test case insensitivity, spelling sensitivity and delimited index. */
  public void testSpell() throws SQLException {
    Statement st = createStatement();

    st.execute("CALL SYSCS_UTIL.SYSCS_SET_RUNTIMESTATISTICS(1)");

    // the token derby-properties is case insensitive.
    JDBC.assertFullResultSet(
        st.executeQuery("select * from t1" + " --DeRbY-pRoPeRtIeS index = t1_c1"), FULL_TABLE);
    RuntimeStatisticsParser rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T1", "T1_C1"));

    // -- misspell derby-properties and make sure that
    // it gets treated as a regular comment
    // rather than optimizer override
    JDBC.assertFullResultSet(
        st.executeQuery("select * from t1 " + " --DeRbY-pRoPeRtIeAAAA index = t1_c1"), FULL_TABLE);
    rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue(
        "not using t1_c1, but what derby thinks is best index.",
        rtsp.usedSpecificIndexForIndexScan("T1", "T1_C1C2C3"));

    // "--DeRbY-pRoPeRtIeSAAAA index = t1_c1" is
    // treated as "--DeRbY-pRoPeRtIeS AAAA index = t1_c1"
    assertStatementError(
        "42Y44", st, "select * from t1 " + " --DeRbY-pRoPeRtIeSAAAA index = t1_c1");

    // -- force index, delimited identifier
    JDBC.assertFullResultSet(
        st.executeQuery("select * from t1 " + "--derby-properties index = \"t1_c2c1\""),
        FULL_TABLE);
    rtsp = SQLUtilities.getRuntimeStatisticsParser(st);
    assertTrue(rtsp.usedSpecificIndexForIndexScan("T1", "t1_c2c1"));

    // If the property spelled wrong gets treated
    // as an optimizer override, the following test will fail.
    assertStatementError(
        "42Y46", st, "select * from t1 " + " --DeRbY-pRoPeRtIeS index = t1_notexisting");

    st.close();
  }
Beispiel #13
0
  /**
   * Creates a JUnit test suite containing all the tests (subsuites) in this package. The number of
   * tests included may depend on the environment in which this method is run.
   *
   * @return A test suite containing all tests in this package
   */
  public static Test suite() {

    TestSuite suite = new TestSuite("management");

    if (JDBC.vmSupportsJMX()) {
      suite.addTest(JMXTest.suite());
      suite.addTest(ManagementMBeanTest.suite());
      suite.addTest(InactiveManagementMBeanTest.suite());
      suite.addTest(VersionMBeanTest.suite());
      suite.addTest(JDBCMBeanTest.suite());
      suite.addTest(NetworkServerMBeanTest.suite());
      suite.addTest(CustomMBeanServerBuilderTest.suite());
    }

    return suite;
  }
  public void test_decimal_BigDecimal_BigDecimal() throws Exception {
    //
    // On small device platforms, this raises an exception in the byte-code
    // compiler. See DERBY-3697.
    //
    if (JDBC.vmSupportsJSR169()) {
      return;
    }

    declareAndRunFunction(
        "decimal_BigDecimal_BigDecimal",
        "decimal( 7, 2 )",
        new String[] {"decimal( 7, 2 )"},
        "12345.67",
        "12345.67");
  }
  /** Verify that statements are dependent on specified constraint. */
  public void testDependenceOnConstraint() throws SQLException {
    PreparedStatement ps =
        prepareStatement("select * from t1 " + "--derby-properties constraint = cons1");

    JDBC.assertFullResultSet(ps.executeQuery(), FULL_TABLE);

    Statement st = createStatement();
    st.executeUpdate("alter table t1 drop constraint cons1");

    assertStatementError("42Y48", ps);

    // add cons1 to restore the test environment.
    st.executeUpdate("alter table t1 " + "add constraint cons1 primary key(c1, c2)");

    ps.close();

    st.close();
  }
  /**
   * Asserts that closing the logical statement and caching the physical one does close the logical
   * one but not the physical one.
   *
   * @throws SQLException if a JDBC operation fails
   */
  public void testCloseWhenStatementShallBeCached() throws SQLException {
    // Initial setup.
    final String sql = "values 9";
    java.sql.PreparedStatement ps = prepareStatement(sql);
    JDBCStatementCache cache = new JDBCStatementCache(10);
    StatementKey stmtKey = insertStatementIntoCache(cache, ps, sql);
    LogicalStatementEntity logic = createLogicalStatementEntity(sql, false, cache);
    assertSame(ps, logic.getPhysPs());
    assertFalse(logic.isLogicalEntityClosed());

    // Close the statement, it should go into the cache.
    logic.close();
    assertTrue(logic.isLogicalEntityClosed());
    // Use the physical statement.
    java.sql.ResultSet rs = ps.executeQuery();
    JDBC.assertSingleValueResultSet(rs, "9");
    // Get the statement from the cache.
    assertSame(ps, cache.getCached(stmtKey));
  }
  /**
   * Tests that a statement equal to one in the cache is not cached when closing the logical
   * statement, and that the physical statement is closed.
   *
   * @throws SQLException if a JDBC operation fails
   */
  public void testCloseOnDuplicateStatement() throws SQLException {
    // Initial setup.
    final String sql = "values 7";
    java.sql.PreparedStatement ps = prepareStatement(sql);
    JDBCStatementCache cache = new JDBCStatementCache(10);
    StatementKey stmtKey = insertStatementIntoCache(cache, ps, sql);
    LogicalStatementEntity logic = createLogicalStatementEntity(sql, false, cache);
    assertSame(ps, logic.getPhysPs());
    assertFalse(logic.isLogicalEntityClosed());

    // Put a statement into the cache.
    // assertTrue(cache.cacheStatement(stmtKey, ps));
    // Create a second statement, equal to the first.
    java.sql.PreparedStatement psDupe = prepareStatement(sql);
    insertStatementIntoCache(cache, psDupe, sql);
    LogicalStatementEntity logicDupe = createLogicalStatementEntity(sql, false, cache);
    // Close the first logical entry, to put the physical statement back
    // into the cache.
    logic.close();
    // When we ask the logical entity to close the statement now, the
    // underlying physical prepared statement should actually be closed.
    logicDupe.close();
    assertTrue(logicDupe.isLogicalEntityClosed());
    // Since we are possibly running in a pre-JDBC 4 environment, try do do
    // something to provoke an exception.
    try {
      psDupe.execute();
      fail("Statement should have been closed and throw an exception");
    } catch (SQLException sqle) {
      assertSQLState("XJ012", sqle);
    }

    // The cached statement should still be open.
    java.sql.PreparedStatement psCached = cache.getCached(stmtKey);
    assertSame(ps, psCached);
    java.sql.ResultSet rs = psCached.executeQuery();
    JDBC.assertSingleValueResultSet(rs, "7");
  }
  /**
   * Check that even though we have set schema to a user schema, the metadata queries get run with
   * compilation schema as SYS. DERBY-2946 Test added for 10.4.
   *
   * @throws SQLException
   */
  public void testMetaDataQueryRunInSYScompilationSchema() throws SQLException {
    // This test is for databases with territory based collation. That
    // feature was added in 10.3 codeline and hence there is no point in
    // doing any testing with pre-10.3 databases.
    if (!oldAtLeast(10, 3)) return;

    DataSource ds = JDBCDataSource.getDataSourceLogical("COLLATED_DB_10_3");

    switch (getPhase()) {
      case PH_CREATE:
        // create the database if it was not already created. Note the
        // JDBC url attributes.
        JDBCDataSource.setBeanProperty(
            ds, "ConnectionAttributes", "create=true;territory=no;collation=TERRITORY_BASED");
        ds.getConnection().close();
        break;

      case PH_SOFT_UPGRADE:
      case PH_POST_SOFT_UPGRADE:
      case PH_HARD_UPGRADE:
        Connection con = ds.getConnection();
        // First make the current schema as a user schema. And then run a
        // metadata query to make sure that it runs fine. If it does (which
        // is the expected behavior), then it will mean that the metadata
        // query is getting run with SYS as the compilation schema rather
        // than the current schema which is APP.
        Statement s = con.createStatement();
        s.execute("SET SCHEMA APP");

        DatabaseMetaData dmd = con.getMetaData();
        ResultSet rs = dmd.getTables(null, "APP", null, null);
        JDBC.assertDrainResults(rs);
        s.close();
        break;
    }
  }
  public void testNestedLoopJoinStrategy() throws SQLException {
    Statement st = createStatement();

    st.execute("CALL SYSCS_UTIL.SYSCS_SET_RUNTIMESTATISTICS(1)");

    JDBC.assertFullResultSet(
        st.executeQuery(
            "select * from t1 a, t1 b "
                + "--derby-properties joinStrategy = nestedloop\n"
                + "where a.c1 = b.c1"),
        new String[][] {
          {"1", "1", "1", "1", "1", "1"},
          {"2", "2", "2", "2", "2", "2"},
          {"3", "3", "3", "3", "3", "3"},
          {"4", "4", "4", "4", "4", "4"},
        });
    //        RuntimeStatisticsParser rtsp =
    //            SQLUtilities.getRuntimeStatisticsParser(st);
    //        assertTrue(rtsp.usedHashJoin());
    //        assertTrue("not using t1_c1, but what derby thinks is best index.",
    //                rtsp.usedSpecificIndexForIndexScan("T1", "T1_C1C2C3"));

    st.close();
  }
  public void testCreateDiskStoreDDLUT() throws Exception {
    // Test various legal and illegal forms of CREATE DISKSTORE
    // Catch exceptions from illegal syntax
    // Tests still not fixed marked FIXME

    // Array of DDL text to execute and sqlstates to expect
    Object[][] CreateDiskStoreUT = {
      {"CREATE DISKSTORE \"GFXD-DD-DISKSTORE\"", "X0Y68"},
      {"CREATE DISKSTORE \"GFXD-DEFAULT-DISKSTORE\"", "X0Y68"},
      {"CREATE DISKSTORE DISKSTORE", null},
      {"CREATE DISKSTORE MYSTORE1", null},
      {"CREATE DISKSTORE MYSTORE1", "X0Y68"},
      {"CREATE DISKSTORE NULL", "42X01"},
      {"CREATE DISKSTORE ''", "42X01"},
      {"CREATE DISKSTORE \"*\"", "0A000"},
      {"CREATE DISKSTORE SYS.MYDISKSTORE", "42X01"},
      {"CREATE DISKSTORE QUEUESIZE QUEUESIZE 5", null},
      {"CREATE DISKSTORE QUEUESIZE2 QUEUESIZE 5 QUEUESIZE 99", "42Y49"},
      {"CREATE DISKSTORE BADSIZE ('MYDIR' -5000)", "42X44"},
      // FIXME { "CREATE DISKSTORE BADSIZE2 ('MYDIR' 0)", "42X44" },
      {"CREATE DISKSTORE BADSIZE3 ('MYDIR' +infinity)", "42X01"},
      {"CREATE DISKSTORE BADSIZE4 ('MYDIR' x'41')", "22018"},
      {"CREATE DISKSTORE BADSIZE5 ('MYDIR' 2147483648)", "22018"},
      {"CREATE DISKSTORE BADSIZE6 ('MYDIR' 2147483647)", null},
      {"CREATE DISKSTORE ML1 MAXLOGSIZE -5", "42X44"},
      // FIXME { "CREATE DISKSTORE ML1 MAXLOGSIZE 0", "42X44" },
      {"CREATE DISKSTORE ML3 MAXLOGSIZE 2147483647", null},
      {"CREATE DISKSTORE ML4 MAXLOGSIZE 'hello'", "42X01"},
      {"CREATE DISKSTORE AC1 AUTOCOMPACT true", null},
      {"CREATE DISKSTORE AC2 AUTOCOMPACT false", null},
      {"CREATE DISKSTORE AC3 AUTOCOMPACT 5", "42X01"},
      {"CREATE DISKSTORE AC4 AUTOCOMPACT maybe", "42X01"},
      {"CREATE DISKSTORE AC5 AUTOCOMPACT 'true'", "42X01"},
      {"CREATE DISKSTORE AC6 AUTOCOMPACT \"false\"", "42X01"},
      {"CREATE DISKSTORE AFC1 ALLOWFORCECOMPACTION true", null},
      {"CREATE DISKSTORE AFC2 ALLOWFORCECOMPACTION false", null},
      {"CREATE DISKSTORE AFC3 ALLOWFORCECOMPACTION 11", "42X01"},
      {"CREATE DISKSTORE AFC4 ALLOWFORCECOMPACTION maybe", "42X01"},
      {"CREATE DISKSTORE AFC5 ALLOWFORCECOMPACTION 'true'", "42X01"},
      {"CREATE DISKSTORE AFC6 ALLOWFORCECOMPACTION \"false\"", "42X01"},
      {
        "CREATE DISKSTORE AFC7 ALLOWFORCECOMPACTION true AUTOCOMPACT false", null
      }, // should probably be an error
      {"CREATE DISKSTORE CT1 COMPACTIONTHRESHOLD -5", "42X44"},
      {"CREATE DISKSTORE CT2 COMPACTIONTHRESHOLD 0", null},
      {"CREATE DISKSTORE CT3 COMPACTIONTHRESHOLD 100", null},
      {"CREATE DISKSTORE CT4 COMPACTIONTHRESHOLD 55.2", "42X20"},
      {"CREATE DISKSTORE CT5 COMPACTIONTHRESHOLD 175", "0A000"}, // replace with better sqlstate
      {"CREATE DISKSTORE CT6 COMPACTIONTHRESHOLD 'big'", "42X01"},
      {"CREATE DISKSTORE CT7 COMPACTIONTHRESHOLD zero", "42X01"},
      {
        "CREATE DISKSTORE CT8 COMPACTIONTHRESHOLD 50 AUTOCOMPACT false", null
      }, // should probably be an error
      {
        "CREATE DISKSTORE CT9 COMPACTIONTHRESHOLD 50 AUTOCOMPACT false ALLOWFORCECOMPACTION true",
        null
      }, // should probably be an error
      {"CREATE DISKSTORE TI1 TIMEINTERVAL -5", "42X44"},
      {"CREATE DISKSTORE TI2 TIMEINTERVAL 0", null},
      {"CREATE DISKSTORE TI3 TIMEINTERVAL 2147473648", null},
      {"CREATE DISKSTORE TI4 TIMEINTERVAL '4'", "42X01"},
      {"CREATE DISKSTORE TI5 TIMEINTERVAL five", "42X01"},
      {"CREATE DISKSTORE WB1 WRITEBUFFERSIZE -5", "42X44"},
      {"CREATE DISKSTORE WB2 WRITEBUFFERSIZE 0", null},
      {"CREATE DISKSTORE WB3 WRITEBUFFERSIZE 2147473648", null},
      {"CREATE DISKSTORE WB4 WRITEBUFFERSIZE 'awholelot'", "42X01"},
      {"CREATE DISKSTORE WB5 WRITEBUFFERSIZE many", "42X01"},
      {"CREATE DISKSTORE QS1 QUEUESIZE -5", "42X44"},
      {"CREATE DISKSTORE QS2 QUEUESIZE 0", null},
      {"CREATE DISKSTORE QS3 QUEUESIZE 2147473648", null},
      {"CREATE DISKSTORE QS4 QUEUESIZE '10'", "42X01"},
      {"CREATE DISKSTORE QS5 QUEUESIZE fifteen", "42X01"},
      {"CREATE DISKSTORE DIRTEST1 ('D1', 'D1')", "X0Z19"},
      {"CREATE DISKSTORE DIRTEST2 ('../D1', '../D1')", "X0Z19"},
      // FIXME { "CREATE DISKSTORE DIRTEST3 ('SUBDIR1', 'SUBDIR2', 'SUBDIR3')", "42X01" },
      // FIXME { "CREATE DISKSTORE DIRTEST4 ('/dev/nul')", "42X01" },   // or some other unwritable
      // directory
      {"CREATE DISKSTORE DIRTEST5 ('')", "0A000"},
      // FIXME { "CREATE DISKSTORE DIRTEST6 ('*')", "0A000" },
      // FIXME { "CREATE DISKSTORE DIRTEST7 ('?')", "0A000" },    // or any other illegal character
      {"CREATE DISKSTORE DIRTEST8 'DIR1' 700 TIMEINTERVAL 1000 'DIR2' 1000", null},
      {"CREATE DISKSTORE DIRTEST9 'DIR1' 0", "42X01"},
      {
        "CREATE DISKSTORE EVERYTHING1 'DIR1' 700 TIMEINTERVAL 1000 AUTOCOMPACT false QUEUESIZE 75 WRITEBUFFERSIZE 32767 COMPACTIONTHRESHOLD 25 MAXLOGSIZE 699 ALLOWFORCECOMPACTION false",
        null
      },
      {
        "CREATE DISKSTORE EVERYTHING2 TIMEINTERVAL 999 WRITEBUFFERSIZE 19776 QUEUESIZE 0 AUTOCOMPACT true COMPACTIONTHRESHOLD 55 ALLOWFORCECOMPACTION true MAXLOGSIZE 125000 ('EVERYTHING1' 500, 'EVERYTHING2' 1500, 'EVERYTHING3')",
        "X0Z33"
      },
      {
        "CREATE DISKSTORE EVERYTHING2 TIMEINTERVAL 999 WRITEBUFFERSIZE 19776 QUEUESIZE 0 AUTOCOMPACT true COMPACTIONTHRESHOLD 55 ALLOWFORCECOMPACTION true MAXLOGSIZE 500 ('EVERYTHING1' 500, 'EVERYTHING2' 1500, 'EVERYTHING3')",
        null
      }
    };

    Connection conn = TestUtil.getConnection();
    Statement stmt = conn.createStatement();
    // Go through the array, execute each string[0], check sqlstate [1]
    // This will fail on the first one that succeeds where it shouldn't
    // or throws unknown exception
    JDBC.SQLUnitTestHelper(stmt, CreateDiskStoreUT);
    // TODO : verify columns in catalog
  }
  // This test is the as-is LangScript conversion, without any partitioning clauses
  public void testLangScript_SchemasTestNoPartitioning() throws Exception {
    // This is a JUnit conversion of the Derby Lang Schemas.sql script
    // without any GemFireXD extensions

    // Catch exceptions from illegal syntax
    // Tests still not fixed marked FIXME

    // Array of SQL text to execute and sqlstates to expect
    // The first object is a String, the second is either
    // 1) null - this means query returns no rows and throws no exceptions
    // 2) a string - this means query returns no rows and throws expected SQLSTATE
    // 3) a String array - this means query returns rows which must match (unordered) given
    // resultset
    //       - for an empty result set, an uninitialized size [0][0] array is used
    Object[][] Script_SchemasUT = {
      {"create table myschem.t(c int)", null},
      {"insert into t values (1)", "42X05"},
      {"insert into blah.t values (2)", "42Y07"},
      {"insert into blah.blah.t values (3)", "42X01"},
      {"insert into blah.blah.blah.t values (3)", "42X01"},
      {"create table mycat.myschem.s(c int)", "42X01"},
      {"create table myworld.mycat.myschem.s(c int)", "42X01"},
      {" create table myschem.s(c int)", null},
      {"insert into s values (1)", "42X05"},
      {"insert into honk.s values (2)", "42Y07"},
      {"insert into honk.blat.s values (3)", "42X01"},
      {"insert into loud.honk.blat.s values (4)", "42X01"},
      {"drop table xyzzy.t", "42Y07"},
      {"drop table goodness.gosh.s", "42X01"},
      {"drop table gosh.s", "42Y07"},
      {"create table mytab (i int)", null},
      {"create table APP.mytab2 (i int)", null},
      {"insert into mytab values 1,2,3", null},
      {"insert into APP.mytab2 values 1,2,3", null},
      {"select i, mytab.i from mytab", new String[][] {{"1", "1"}, {"2", "2"}, {"3", "3"}}},
      {"select APP.mytab2.i from APP.mytab2", new String[][] {{"1"}, {"2"}, {"3"}}},
      {"select APP.mytab2.i from mytab2", new String[][] {{"1"}, {"2"}, {"3"}}},
      {"select mytab2.i from APP.mytab2", new String[][] {{"1"}, {"2"}, {"3"}}},
      {"select m.i from APP.mytab2 m", new String[][] {{"1"}, {"2"}, {"3"}}},
      {"select nocatalogs.APP.mytab.i from mytab2", "42X04"},
      {"drop table mytab", null},
      {"drop table APP.mytab2", null},
      {"create schema app", "X0Y68"},
      {"create schema sys", "42939"},
      {"drop schema does_not_exist RESTRICT", "42Y07"},
      {"create schema app", "X0Y68"},
      {"create schema APP", "X0Y68"},
      {"create schema SYS", "42939"},
      {"create schema sysibm", "42939"},
      {"create schema syscat", "42939"},
      {"create schema sysfun", "42939"},
      {"create schema sysproc", "42939"},
      {"create schema sysstat", "42939"},
      {"create schema syscs_diag", "42939"},
      {"create schema syscs_util", "42939"},
      {"create schema nullid", "X0Y68"},
      {"create schema sqlj", "X0Y68"},
      {"create table syscat.foo1 (a int)", "42X62"},
      {"create table sysfun.foo2 (a int)", "42X62"},
      {"create table sysproc.foo3 (a int)", "42X62"},
      {"create table sysstat.foo4 (a int)", "42X62"},
      {"create table syscs_diag.foo6 (a int)", "42X62"},
      {"create table nullid.foo7 (a int)", "42X62"},
      {"create table sysibm.foo8 (a int)", "42X62"},
      {"create table sqlj.foo8 (a int)", "42X62"},
      {"create table syscs_util.foo9 (a int)", "42X62"},
      {"create table SYSCAT.foo1 (a int)", "42X62"},
      {"create table SYSFUN.foo2 (a int)", "42X62"},
      {"create table SYSPROC.foo3 (a int)", "42X62"},
      {"create table SYSSTAT.foo4 (a int)", "42X62"},
      {"create table SYSCS_DIAG.foo6 (a int)", "42X62"},
      {"create table SYSIBM.foo8 (a int)", "42X62"},
      {"create table SQLJ.foo8 (a int)", "42X62"},
      {"create table SYSCS_UTIL.foo9 (a int)", "42X62"},
      {
        "drop schema app RESTRICT", null
      }, // GemFireXD allows dropping default APP schema, Derby did not!
      {"drop schema APP RESTRICT", "42Y07"},
      {"drop schema sys RESTRICT", "42Y67"},
      {"drop schema SYS RESTRICT", "42Y67"},
      {"drop schema sysibm RESTRICT", "42Y67"},
      {"drop schema syscat RESTRICT", "42Y67"},
      {"drop schema sysfun RESTRICT", "42Y67"},
      {"drop schema sysproc RESTRICT", "42Y67"},
      {"drop schema sysstat RESTRICT", "42Y67"},
      {"drop schema syscs_diag RESTRICT", "42Y67"},
      {"drop schema syscs_util RESTRICT", "42Y67"},
      {"drop schema nullid RESTRICT", "42Y67"},
      {"drop schema sqlj RESTRICT", "42Y67"},
      {"create schema app", null},
      {"set schema app", null},
      {"create table test (a int)", null},
      {"set schema syscat", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema sysfun", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema sysproc", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema sysstat", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema sysstat", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema syscs_diag", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema syscs_util", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema nullid", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema sysibm", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema sqlj", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SYSCAT", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SYSFUN", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SYSPROC", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SYSSTAT", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SYSSTAT", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SYSCS_DIAG", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SYSCS_UTIL", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema NULLID", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SYSIBM", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema SQLJ", null},
      {"create table foo1 (a int)", "42X62"},
      {"create view foo1 as select * from app.test", "42X62"},
      {"set schema app", null},
      {"create table t1 (c1 int)", null},
      {
        "create trigger sysblah.trig1 after update of c1 on t1 for each row insert into t1 values 1",
        "42X62"
      },
      {
        "create procedure sysblah.dummy() language java external name 'NotReallyThere.NoMethod' parameter style java",
        "42X62"
      },
      {"drop table t1", null},
      {"create schema test", null},
      {"create schema test", "X0Y68"},
      {
        "select schemaname, authorizationid from sys.sysschemas 	where CAST(schemaname AS VARCHAR(128)) = 'TEST'",
        new String[][] {{"TEST", "APP"}}
      },
      {"set schema test", null},
      {"create table sampletab (c1 int constraint st_c1 check (c1 > 1), c2 char(20))", null},
      {"insert into sampletab values (1,'in schema: TEST')", "23513"},
      {"insert into sampletab values (2,'in schema: TEST')", null},
      {
        "select schemaname, tablename, descriptor from sys.sysschemas s, sys.sysconglomerates c , sys.systables t where CAST(t.tablename AS VARCHAR(128)) = 'SAMPLETAB' and s.schemaid = c.schemaid and c.tableid = t.tableid",
        new String[][] {{"TEST", "SAMPLETAB", null}}
      },
      {"create index ixsampletab on sampletab(c1)", null},
      {"create index ix2sampletab on test.sampletab(c1)", null},
      {"create view vsampletab as select * from sampletab", null},
      {"create view v2sampletab as select * from test.sampletab", null},
      {"set schema APP", null},
      {"create table sampletab (c1 int constraint st_c1 check(c1 > 1), c2 char(20))", null},
      {"insert into sampletab values (2,'in schema: APP')", null},
      {
        "select schemaname, tablename, descriptor as descr from sys.sysschemas s, sys.sysconglomerates c , sys.systables t where CAST(t.tablename AS VARCHAR(128)) = 'SAMPLETAB' and s.schemaid = c.schemaid and c.tableid = t.tableid order by schemaname, tablename",
        new String[][] {
          {"APP", "SAMPLETAB", null},
          {"TEST", "SAMPLETAB", "LOCALSORTEDMAP (1)"},
          {"TEST", "SAMPLETAB", null}
        }
      }, // GEMFIREXD uses sortedmap, not btree indexes
      {"select * from sampletab", new String[][] {{"2", "in schema: APP"}}},
      {"select * from test.sampletab", new String[][] {{"2", "in schema: TEST"}}},
      {"drop schema test RESTRICT", "X0Y54"},
      {"drop view test.vsampletab", null},
      {"drop view test.v2sampletab", null},
      {"drop index test.ixsampletab", null},
      {"drop index test.ix2sampletab", "42X65"},
      {"drop table sampletab", null},
      {"drop table test.sampletab", null},
      {"drop schema test RESTRICT", null},
      {"create schema x", null},
      {"set schema x", null},
      {"create view vx as select * from sys.sysschemas", null},
      {"drop schema x RESTRICT", "X0Y54"},
      {"drop view x.vx", null},
      {"create table x (x int)", null},
      {"drop schema x restrict", "X0Y54"},
      {"drop table x.x", null},
      {"drop schema x cascade", "42X01"},
      {"set schema app", null},
      {"drop schema x restrict", null},
      {"create schema test", null},
      {"set schema test", null},
      {"create table s (i int, s smallint, c char(30), vc char(30))", null},
      {"create table t (i int, s smallint, c char(30), vc char(30))", null},
      {"create table tt (ii int, ss smallint, cc char(30), vcvc char(30))", null},
      {"create table ttt (iii int, sss smallint, ccc char(30), vcvcvc char(30))", null},
      {"insert into s values (null, null, null, null)", null},
      {"insert into s values (0, 0, '0', '0')", null},
      {"insert into s values (1, 1, '1', '1')", null},
      {"insert into t values (null, null, null, null)", null},
      {"insert into t values (0, 0, '0', '0')", null},
      {"insert into t values (1, 1, '1', '1')", null},
      {"insert into t values (1, 1, '1', '1')", null},
      {"insert into tt values (null, null, null, null)", null},
      {"insert into tt values (0, 0, '0', '0')", null},
      {"insert into tt values (1, 1, '1', '1')", null},
      {"insert into tt values (1, 1, '1', '1')", null},
      {"insert into tt values (2, 2, '2', '2')", null},
      {"insert into ttt values (null, null, null, null)", null},
      {"insert into ttt values (11, 11, '11', '11')", null},
      {"insert into ttt values (11, 11, '11', '11')", null},
      {"insert into ttt values (22, 22, '22', '22')", null},
      {"set schema app", null},
      {"insert into test.t values (2, 2, '2', '2')", null},
      {"update test.t set s = 2 where i = 2", null},
      {"update test.t set s = 2 where test.t.i = 2", null},
      {"delete from test.t where i = 1", null},
      {
        "select * from test.t",
        new String[][] {{null, null, null, null}, {"0", "0", "0", "0"}, {"2", "2", "2", "2"}}
      },
      {"insert into test.t values (1, 1, '1', '1')", null},
      {"insert into test.t values (1, 1, '1', '1')", null},
      {
        "select * from test.t t1",
        new String[][] {
          {null, null, null, null},
          {"0", "0", "0", "0"},
          {"2", "2", "2", "2"},
          {"1", "1", "1", "1"},
          {"1", "1", "1", "1"}
        }
      },
      {
        "select * from test.s where exists (select test.s.* from test.t)",
        new String[][] {{null, null, null, null}, {"0", "0", "0", "0"}, {"1", "1", "1", "1"}}
      },
      {
        "DECLARE GLOBAL TEMPORARY TABLE SESSION.ISCT(c21 int) on commit delete rows not logged",
        null
      },
      {
        "select count(*) from SYS.SYSSCHEMAS WHERE CAST(SCHEMANAME AS VARCHAR(128)) = 'SESSION'",
        new String[][] {{"0"}}
      },
      {"drop table SESSION.ISCT", null},
      {"create schema SYSDJD", "42939"},
      {"drop schema SYSDJD restrict", "42Y07"}
    };

    // Do not use partitioning as default, use replicate
    // (Some results are expected to be different with partitioning)
    skipDefaultPartitioned = true;

    Connection conn = TestUtil.getConnection();
    Statement stmt = conn.createStatement();
    // Go through the array, execute each string[0], check sqlstate [1]
    // This will fail on the first one that succeeds where it shouldn't
    // or throws unknown exception
    JDBC.SQLUnitTestHelper(stmt, Script_SchemasUT);
  }
  // This test is the as-is LangScript conversion, without any partitioning clauses
  public void testLangScript_SelectTestNoPartitioning() throws Exception {
    // This is a JUnit conversion of the Derby Lang Select.sql script
    // without any GemFireXD extensions

    // Catch exceptions from illegal syntax
    // Tests still not fixed marked FIXME

    // Array of SQL text to execute and sqlstates to expect
    // The first object is a String, the second is either
    // 1) null - this means query returns no rows and throws no exceptions
    // 2) a string - this means query returns no rows and throws expected SQLSTATE
    // 3) a String array - this means query returns rows which must match (unordered) given
    // resultset
    //       - for an empty result set, an uninitialized size [0][0] array is used
    Object[][] Script_SelectUT = {
      {"create table t(i int, s smallint)", null},
      {"insert into t (i,s) values (1956,475)", null},
      {"select i from t", new String[][] {{"1956"}}},
      {"select i,s from t", new String[][] {{"1956", "475"}}},
      {"select s,i from t", new String[][] {{"475", "1956"}}},
      {
        "select i,i,s,s,i,i from t", new String[][] {{"1956", "1956", "475", "475", "1956", "1956"}}
      },
      {"select 10 from t", new String[][] {{"10"}}},
      {"select t.i from t", new String[][] {{"1956"}}},
      {"select b.i from t b", new String[][] {{"1956"}}},
      {"select *, 10, i from t", "42X01"},
      {"select b.* from t b", new String[][] {{"1956", "475"}}},
      {"select t.* from t", new String[][] {{"1956", "475"}}},
      {"(select * from t)", new String[][] {{"1956", "475"}}}, // ?!
      {"select * from t where i", "42X19"},
      {"select asdf.* from t", "42X10"},
      {"drop table t", null},
      // FIXME
      // This large collection of tables should be partitioned, but the join makes colocation very
      // difficult
      // Use replicated for now
      {"CREATE SCHEMA CONTENT", null},
      {
        "CREATE TABLE CONTENT.CONTENT (ID INTEGER NOT NULL, CREATOR VARCHAR(128) NOT NULL, CREATION_DATE DATE NOT NULL, URL VARCHAR(256) NOT NULL, TITLE VARCHAR(128) NOT NULL, DESCRIPTION VARCHAR(512) NOT NULL, HEIGHT INTEGER NOT NULL, WIDTH INTEGER NOT NULL) REPLICATE",
        null
      },
      {"ALTER TABLE CONTENT.CONTENT ADD CONSTRAINT CONTENT_ID PRIMARY KEY (ID)", null},
      {
        "CREATE TABLE CONTENT.STYLE (ID INTEGER NOT NULL,DESCRIPTION VARCHAR(128) NOT NULL) REPLICATE",
        null
      },
      {"ALTER TABLE CONTENT.STYLE ADD CONSTRAINT STYLE_ID PRIMARY KEY (ID)", null},
      {
        "CREATE TABLE CONTENT.CONTENT_STYLE  (CONTENT_ID INTEGER NOT NULL, STYLE_ID INTEGER NOT NULL) REPLICATE",
        null
      },
      {
        "ALTER TABLE CONTENT.CONTENT_STYLE ADD CONSTRAINT CONTENTSTYLEID PRIMARY KEY (CONTENT_ID, STYLE_ID)",
        null
      },
      {
        "CREATE TABLE CONTENT.KEYGEN (KEYVAL INTEGER NOT NULL, KEYNAME VARCHAR(256) NOT NULL) REPLICATE",
        null
      },
      {"ALTER TABLE CONTENT.KEYGEN  ADD CONSTRAINT PK_KEYGEN PRIMARY KEY (KEYNAME)", null},
      {
        "CREATE TABLE CONTENT.RATING  (ID INTEGER NOT NULL,RATING DOUBLE PRECISION NOT NULL,ENTRIES DOUBLE PRECISION NOT NULL) REPLICATE",
        null
      },
      {"ALTER TABLE CONTENT.RATING ADD CONSTRAINT PK_RATING PRIMARY KEY (ID)", null},
      {"INSERT INTO CONTENT.STYLE VALUES (1, 'BIRD')", null},
      {"INSERT INTO CONTENT.STYLE VALUES (2, 'CAR')", null},
      {"INSERT INTO CONTENT.STYLE VALUES (3, 'BUILDING')", null},
      {"INSERT INTO CONTENT.STYLE VALUES (4, 'PERSON')", null},
      {
        "INSERT INTO CONTENT.CONTENT values(1, 'djd', CURRENT DATE, 'http://url.1', 'title1', 'desc1', 100, 100)",
        null
      },
      {
        "INSERT INTO CONTENT.CONTENT values(2, 'djd', CURRENT DATE, 'http://url.2', 'title2', 'desc2', 100, 100)",
        null
      },
      {
        "INSERT INTO CONTENT.CONTENT values(3, 'djd', CURRENT DATE, 'http://url.3', 'title3', 'desc3', 100, 100)",
        null
      },
      {
        "INSERT INTO CONTENT.CONTENT values(4, 'djd', CURRENT DATE, 'http://url.4', 'title4', 'desc4', 100, 100)",
        null
      },
      {
        "INSERT INTO CONTENT.CONTENT values(5, 'djd', CURRENT DATE, 'http://url.5', 'title5', 'desc5', 100, 100)",
        null
      },
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(1,1)", null},
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(1,2)", null},
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(2,1)", null},
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(2,4)", null},
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(3,3)", null},
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(3,4)", null},
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(3,1)", null},
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(4,4)", null},
      {"INSERT INTO CONTENT.CONTENT_STYLE VALUES(5,1)", null},
      {"INSERT INTO CONTENT.RATING VALUES(1, 4.5, 1)", null},
      {"INSERT INTO CONTENT.RATING VALUES(2, 4.0, 1)", null},
      {"INSERT INTO CONTENT.RATING VALUES(3, 3.9, 1)", null},
      {"INSERT INTO CONTENT.RATING VALUES(4, 4.1, 1)", null},
      {"INSERT INTO CONTENT.RATING VALUES(5, 4.0, 1)", null},
      {
        "select S.DESCRIPTION, FAV.MAXRATE, C.TITLE, C.URL FROM CONTENT.RATING R, CONTENT.CONTENT C, CONTENT.STYLE S, CONTENT.CONTENT_STYLE CS, (select S.ID, max(rating) from CONTENT.RATING R, CONTENT.CONTENT C, CONTENT.STYLE S, CONTENT.CONTENT_STYLE CS group by S.ID) AS FAV(FID,MAXRATE) where R.ID = C.ID AND C.ID = CS.CONTENT_ID AND CS.STYLE_ID = FAV.FID AND FAV.FID = S.ID AND FAV.MAXRATE = R.RATING",
        new String[][] {
          {"BIRD", "4.5", "title1", "http://url.1"}, {"CAR", "4.5", "title1", "http://url.1"}
        }
      },
      {"drop table content.rating", null},
      {"drop table content.content_style", null},
      {"drop table content.content", null},
      {"drop table content.style", null},
      {"drop table content.keygen", null},
      {"drop schema content restrict", null}
    };

    // Start 1 client and 3 servers, use default partitioning
    startVMs(1, 3);

    Connection conn = TestUtil.getConnection();
    Statement stmt = conn.createStatement();
    // Go through the array, execute each string[0], check sqlstate [1]
    // This will fail on the first one that succeeds where it shouldn't
    // or throws unknown exception
    JDBC.SQLUnitTestHelper(stmt, Script_SelectUT);
  }