private ResultSetNode wrapRowCountNode(
      ResultSetNode resultSet, ValueNode offset, ValueNode fetchFirst) throws StandardException {

    ResultSetNode topRS = resultSet;
    ResultColumnList selectRCs = topRS.getResultColumns().copyListAndObjects();
    selectRCs.genVirtualColumnNodes(topRS, topRS.getResultColumns());

    return (RowCountNode)
        getNodeFactory()
            .getNode(
                C_NodeTypes.ROW_COUNT_NODE,
                topRS,
                selectRCs,
                offset,
                fetchFirst,
                getContextManager());
  }
  /**
   * Add any new ResultSetNodes that are necessary to the tree. We wait until after optimization to
   * do this in order to make it easier on the optimizer.
   *
   * @return (Potentially new) head of the ResultSetNode tree.
   * @exception StandardException Thrown on error
   */
  private ResultSetNode addNewNodes() throws StandardException {
    /* Only call addNewNodes() once */
    if (addNewNodesCalled) {
      return this;
    }

    addNewNodesCalled = true;

    ResultSetNode treeTop = this;

    if (orderByList != null) {
      // Generate an order by node on top of the intersect/except
      treeTop =
          (ResultSetNode)
              getNodeFactory()
                  .getNode(
                      C_NodeTypes.ORDER_BY_NODE,
                      treeTop,
                      orderByList,
                      tableProperties,
                      getContextManager());
    }

    if (offset != null || fetchFirst != null) {
      ResultColumnList newRcl = treeTop.getResultColumns().copyListAndObjects();
      newRcl.genVirtualColumnNodes(treeTop, treeTop.getResultColumns());

      treeTop =
          (ResultSetNode)
              getNodeFactory()
                  .getNode(
                      C_NodeTypes.ROW_COUNT_NODE,
                      treeTop,
                      newRcl,
                      offset,
                      fetchFirst,
                      Boolean.valueOf(hasJDBClimitClause),
                      getContextManager());
    }

    return treeTop;
  } // end of addNewNodes
  /**
   * Flatten this FSqry into the outer query block. The steps in flattening are: o Mark all
   * ResultColumns as redundant, so that they are "skipped over" at generate(). o Append the
   * wherePredicates to the outer list. o Return the fromList so that the caller will merge the 2
   * lists RESOLVE - FSqrys with subqueries are currently not flattenable. Some of them can be
   * flattened, however. We need to merge the subquery list when we relax this restriction.
   *
   * <p>NOTE: This method returns NULL when flattening RowResultSetNodes (the node for a VALUES
   * clause). The reason is that no reference is left to the RowResultSetNode after flattening is
   * done - the expressions point directly to the ValueNodes in the RowResultSetNode's
   * ResultColumnList.
   *
   * @param rcl The RCL from the outer query
   * @param outerPList PredicateList to append wherePredicates to.
   * @param sql The SubqueryList from the outer query
   * @param gbl The group by list, if any
   * @param havingClause The HAVING clause, if any
   * @return FromList The fromList from the underlying SelectNode.
   * @exception StandardException Thrown on error
   */
  public FromList flatten(
      ResultColumnList rcl,
      PredicateList outerPList,
      SubqueryList sql,
      GroupByList gbl,
      ValueNode havingClause)
      throws StandardException {
    FromList fromList = null;
    SelectNode selectNode;

    resultColumns.setRedundant();

    subquery.getResultColumns().setRedundant();

    /*
     ** RESOLVE: Each type of result set should know how to remap itself.
     */
    if (subquery instanceof SelectNode) {
      selectNode = (SelectNode) subquery;
      fromList = selectNode.getFromList();

      // selectNode.getResultColumns().setRedundant();

      if (selectNode.getWherePredicates().size() > 0) {
        outerPList.destructiveAppend(selectNode.getWherePredicates());
      }

      if (selectNode.getWhereSubquerys().size() > 0) {
        sql.destructiveAppend(selectNode.getWhereSubquerys());
      }
    } else if (!(subquery instanceof RowResultSetNode)) {
      if (SanityManager.DEBUG) {
        SanityManager.THROWASSERT(
            "subquery expected to be either a SelectNode or a RowResultSetNode, but is a "
                + subquery.getClass().getName());
      }
    }

    /* Remap all ColumnReferences from the outer query to this node.
     * (We replace those ColumnReferences with clones of the matching
     * expression in the SELECT's RCL.
     */
    rcl.remapColumnReferencesToExpressions();
    outerPList.remapColumnReferencesToExpressions();
    if (gbl != null) {
      gbl.remapColumnReferencesToExpressions();
    }

    if (havingClause != null) {
      havingClause.remapColumnReferencesToExpressions();
    }

    return fromList;
  }
  /**
   * Bind this CreateTableNode. This means doing any static error checking that can be done before
   * actually creating the base table or declaring the global temporary table. For eg, verifying
   * that the TableElementList does not contain any duplicate column names.
   *
   * @exception StandardException Thrown on error
   */
  public void bindStatement() throws StandardException {
    DataDictionary dataDictionary = getDataDictionary();
    int numPrimaryKeys = 0;
    int numCheckConstraints = 0;
    int numReferenceConstraints = 0;
    int numUniqueConstraints = 0;
    int numGenerationClauses = 0;

    SchemaDescriptor sd =
        getSchemaDescriptor(tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE, true);

    if (queryExpression != null) {
      FromList fromList =
          (FromList)
              getNodeFactory()
                  .getNode(
                      C_NodeTypes.FROM_LIST,
                      getNodeFactory().doJoinOrderOptimization(),
                      getContextManager());

      CompilerContext cc = getCompilerContext();
      ProviderList prevAPL = cc.getCurrentAuxiliaryProviderList();
      ProviderList apl = new ProviderList();

      try {
        cc.setCurrentAuxiliaryProviderList(apl);
        cc.pushCurrentPrivType(Authorizer.SELECT_PRIV);

        /* Bind the tables in the queryExpression */
        queryExpression = queryExpression.bindNonVTITables(dataDictionary, fromList);
        queryExpression = queryExpression.bindVTITables(fromList);

        /* Bind the expressions under the resultSet */
        queryExpression.bindExpressions(fromList);

        /* Bind the query expression */
        queryExpression.bindResultColumns(fromList);

        /* Reject any untyped nulls in the RCL */
        /* e.g. CREATE TABLE t1 (x) AS VALUES NULL WITH NO DATA */
        queryExpression.bindUntypedNullsToResultColumns(null);
      } finally {
        cc.popCurrentPrivType();
        cc.setCurrentAuxiliaryProviderList(prevAPL);
      }

      /* If there is an RCL for the table definition then copy the
       * names to the queryExpression's RCL after verifying that
       * they both have the same size.
       */
      ResultColumnList qeRCL = queryExpression.getResultColumns();

      if (resultColumns != null) {
        if (resultColumns.size() != qeRCL.visibleSize()) {
          throw StandardException.newException(
              SQLState.LANG_TABLE_DEFINITION_R_C_L_MISMATCH, getFullName());
        }
        qeRCL.copyResultColumnNames(resultColumns);
      }

      int schemaCollationType = sd.getCollationType();

      /* Create table element list from columns in query expression */
      tableElementList = new TableElementList();

      for (int index = 0; index < qeRCL.size(); index++) {
        ResultColumn rc = (ResultColumn) qeRCL.elementAt(index);
        if (rc.isGenerated()) {
          continue;
        }
        /* Raise error if column name is system generated. */
        if (rc.isNameGenerated()) {
          throw StandardException.newException(SQLState.LANG_TABLE_REQUIRES_COLUMN_NAMES);
        }

        DataTypeDescriptor dtd = rc.getExpression().getTypeServices();
        if ((dtd != null) && !dtd.isUserCreatableType()) {
          throw StandardException.newException(
              SQLState.LANG_INVALID_COLUMN_TYPE_CREATE_TABLE,
              dtd.getFullSQLTypeName(),
              rc.getName());
        }
        // DERBY-2879  CREATE TABLE AS <subquery> does not maintain the
        // collation for character types.
        // eg for a territory based collation database
        // create table t as select tablename from sys.systables with no data;
        // Derby at this point does not support for a table's character
        // columns to have a collation different from it's schema's
        // collation. Which means that in a territory based database,
        // the query above will cause table t's character columns to
        // have collation of UCS_BASIC but the containing schema of t
        // has collation of territory based. This is not supported and
        // hence we will throw an exception below for the query above in
        // a territory based database.
        if (dtd.getTypeId().isStringTypeId() && dtd.getCollationType() != schemaCollationType) {
          throw StandardException.newException(
              SQLState.LANG_CAN_NOT_CREATE_TABLE,
              dtd.getCollationName(),
              DataTypeDescriptor.getCollationName(schemaCollationType));
        }

        ColumnDefinitionNode column =
            (ColumnDefinitionNode)
                getNodeFactory()
                    .getNode(
                        C_NodeTypes.COLUMN_DEFINITION_NODE,
                        rc.getName(),
                        null,
                        rc.getType(),
                        null,
                        getContextManager());
        tableElementList.addTableElement(column);
      }
    } else {
      // Set the collation type and collation derivation of all the
      // character type columns. Their collation type will be same as the
      // collation of the schema they belong to. Their collation
      // derivation will be "implicit".
      // Earlier we did this in makeConstantAction but that is little too
      // late (DERBY-2955)
      // eg
      // CREATE TABLE STAFF9 (EMPNAME CHAR(20),
      //  CONSTRAINT STAFF9_EMPNAME CHECK (EMPNAME NOT LIKE 'T%'))
      // For the query above, when run in a territory based db, we need
      // to have the correct collation set in bind phase of create table
      // so that when LIKE is handled in LikeEscapeOperatorNode, we have
      // the correct collation set for EMPNAME otherwise it will throw an
      // exception for 'T%' having collation of territory based and
      // EMPNAME having the default collation of UCS_BASIC
      tableElementList.setCollationTypesOnCharacterStringColumns(
          getSchemaDescriptor(tableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE, true));
    }

    tableElementList.validate(this, dataDictionary, (TableDescriptor) null);

    /* Only 1012 columns allowed per table */
    if (tableElementList.countNumberOfColumns() > Limits.DB2_MAX_COLUMNS_IN_TABLE) {
      throw StandardException.newException(
          SQLState.LANG_TOO_MANY_COLUMNS_IN_TABLE_OR_VIEW,
          String.valueOf(tableElementList.countNumberOfColumns()),
          getRelativeName(),
          String.valueOf(Limits.DB2_MAX_COLUMNS_IN_TABLE));
    }

    numPrimaryKeys = tableElementList.countConstraints(DataDictionary.PRIMARYKEY_CONSTRAINT);

    /* Only 1 primary key allowed per table */
    if (numPrimaryKeys > 1) {
      throw StandardException.newException(
          SQLState.LANG_TOO_MANY_PRIMARY_KEY_CONSTRAINTS, getRelativeName());
    }

    /* Check the validity of all check constraints */
    numCheckConstraints = tableElementList.countConstraints(DataDictionary.CHECK_CONSTRAINT);

    numReferenceConstraints =
        tableElementList.countConstraints(DataDictionary.FOREIGNKEY_CONSTRAINT);

    numUniqueConstraints = tableElementList.countConstraints(DataDictionary.UNIQUE_CONSTRAINT);

    numGenerationClauses = tableElementList.countGenerationClauses();

    // temp tables can't have primary key or check or foreign key or unique constraints defined on
    // them
    if ((tableType == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE)
        && (numPrimaryKeys > 0
            || numCheckConstraints > 0
            || numReferenceConstraints > 0
            || numUniqueConstraints > 0))
      throw StandardException.newException(
          SQLState.LANG_NOT_ALLOWED_FOR_DECLARED_GLOBAL_TEMP_TABLE);

    // each of these constraints have a backing index in the back. We need to make sure that a table
    // never has more
    // more than 32767 indexes on it and that is why this check.
    if ((numPrimaryKeys + numReferenceConstraints + numUniqueConstraints)
        > Limits.DB2_MAX_INDEXES_ON_TABLE) {
      throw StandardException.newException(
          SQLState.LANG_TOO_MANY_INDEXES_ON_TABLE,
          String.valueOf(numPrimaryKeys + numReferenceConstraints + numUniqueConstraints),
          getRelativeName(),
          String.valueOf(Limits.DB2_MAX_INDEXES_ON_TABLE));
    }

    if ((numCheckConstraints > 0) || (numGenerationClauses > 0) || (numReferenceConstraints > 0)) {
      /* In order to check the validity of the check constraints and
       * generation clauses
       * we must goober up a FromList containing a single table,
       * the table being created, with an RCL containing the
       * new columns and their types.  This will allow us to
       * bind the constraint definition trees against that
       * FromList.  When doing this, we verify that there are
       * no nodes which can return non-deterministic results.
       */
      FromList fromList = makeFromList(null, tableElementList, true);
      FormatableBitSet generatedColumns = new FormatableBitSet();

      /* Now that we've finally goobered stuff up, bind and validate
       * the check constraints and generation clauses.
       */
      if (numGenerationClauses > 0) {
        tableElementList.bindAndValidateGenerationClauses(sd, fromList, generatedColumns, null);
      }
      if (numCheckConstraints > 0) {
        tableElementList.bindAndValidateCheckConstraints(fromList);
      }
      if (numReferenceConstraints > 0) {
        tableElementList.validateForeignKeysOnGenerationClauses(fromList, generatedColumns);
      }
    }

    if (numPrimaryKeys > 0) {
      tableElementList.validatePrimaryKeyNullability();
    }
  }
  /**
   * This overload variant of optimizeStatement is used by subclass CursorNode (as well as a minion
   * for the no-arg variant).
   *
   * @param offset Any OFFSET row count, or null
   * @param fetchFirst Any FETCH FIRST row count or null
   * @exception StandardException Thrown on error
   * @see DMLStatementNode#optimizeStatement()
   */
  protected void optimizeStatement(ValueNode offset, ValueNode fetchFirst)
      throws StandardException {
    resultSet = resultSet.preprocess(getCompilerContext().getNumTables(), null, (FromList) null);
    resultSet = resultSet.optimize(getDataDictionary(), null, 1.0d);

    resultSet = resultSet.modifyAccessPaths();

    // Any OFFSET/FETCH FIRST narrowing must be done *after* any rewrite of
    // the query tree (if not, underlying GROUP BY fails), but *before* the
    // final scroll insensitive result node set is added - that one needs
    // to sit on top - so now is the time.
    //
    // This example statement fails if we wrap *before* the optimization
    // above:
    //     select max(a) from t1 group by b fetch first row only
    //
    // A java.sql.ResultSet#previous on a scrollable result set will fail
    // if we don't wrap *after* the ScrollInsensitiveResultSetNode below.
    //
    // We need only wrap the RowCountNode set if at least one of the
    // clauses is present.

    if (offset != null || fetchFirst != null) {
      resultSet = wrapRowCountNode(resultSet, offset, fetchFirst);
    }

    /* If this is a cursor, then we
     * need to generate a new ResultSetNode to enable the scrolling
     * on top of the tree before modifying the access paths.
     */
    if (this instanceof CursorNode) {
      ResultColumnList siRCList;
      ResultColumnList childRCList;
      ResultSetNode siChild = resultSet;

      /* We get a shallow copy of the ResultColumnList and its
       * ResultColumns.  (Copy maintains ResultColumn.expression for now.)
       */
      siRCList = resultSet.getResultColumns();
      childRCList = siRCList.copyListAndObjects();
      resultSet.setResultColumns(childRCList);

      /* Replace ResultColumn.expression with new VirtualColumnNodes
       * in the ScrollInsensitiveResultSetNode's ResultColumnList.  (VirtualColumnNodes include
       * pointers to source ResultSetNode, this, and source ResultColumn.)
       */
      siRCList.genVirtualColumnNodes(resultSet, childRCList);

      /* Finally, we create the new ScrollInsensitiveResultSetNode */
      resultSet =
          (ResultSetNode)
              getNodeFactory()
                  .getNode(
                      C_NodeTypes.SCROLL_INSENSITIVE_RESULT_SET_NODE,
                      resultSet,
                      siRCList,
                      null,
                      getContextManager());
      // Propagate the referenced table map if it's already been created
      if (siChild.getReferencedTableMap() != null) {
        resultSet.setReferencedTableMap((JBitSet) siChild.getReferencedTableMap().clone());
      }
    }
  }
  /**
   * Bind the expressions in this FromSubquery. This means binding the sub-expressions, as well as
   * figuring out what the return type is for each expression.
   *
   * @exception StandardException Thrown on error
   */
  public void bindExpressions(FromList fromListParam) throws StandardException {
    FromList emptyFromList =
        (FromList)
            getNodeFactory()
                .getNode(
                    C_NodeTypes.FROM_LIST,
                    getNodeFactory().doJoinOrderOptimization(),
                    getContextManager());
    ResultColumnList derivedRCL = resultColumns;
    ResultColumnList subqueryRCL;
    FromList nestedFromList;

    /* From subqueries cannot be correlated, so we pass an empty FromList
     * to subquery.bindExpressions() and .bindResultColumns()
     */
    if (orderByList != null) {
      orderByList.pullUpOrderByColumns(subquery);
    }

    nestedFromList = emptyFromList;

    CompilerContext compilerContext = getCompilerContext();

    if (origCompilationSchema != null) {
      // View expansion needs the definition time schema
      compilerContext.pushCompilationSchema(origCompilationSchema);
    }

    try {
      subquery.bindExpressions(nestedFromList);
      subquery.bindResultColumns(nestedFromList);
    } finally {
      if (origCompilationSchema != null) {
        compilerContext.popCompilationSchema();
      }
    }

    if (orderByList != null) {
      orderByList.bindOrderByColumns(subquery);
    }

    bindOffsetFetch(offset, fetchFirst);

    /* NOTE: If the size of the derived column list is less than
     * the size of the subquery's RCL and the derived column list is marked
     * for allowing a size mismatch, then we have a select * view
     * on top of a table that has had columns added to it via alter table.
     * In this case, we trim out the columns that have been added to
     * the table since the view was created.
     */
    subqueryRCL = subquery.getResultColumns();
    if (resultColumns != null
        && resultColumns.getCountMismatchAllowed()
        && resultColumns.size() < subqueryRCL.size()) {
      for (int index = subqueryRCL.size() - 1; index >= resultColumns.size(); index--) {
        subqueryRCL.removeElementAt(index);
      }
    }

    /*
     * Create RCL based on subquery, adding a level of VCNs.
     */
    ResultColumnList newRcl = subqueryRCL.copyListAndObjects();
    newRcl.genVirtualColumnNodes(subquery, subquery.getResultColumns());
    resultColumns = newRcl;

    /* Propagate the name info from the derived column list */
    if (derivedRCL != null) {
      resultColumns.propagateDCLInfo(derivedRCL, correlationName);
    }
  }