/*
   ** Create an sps that is used by the trigger.
   */
  private SPSDescriptor createSPS(
      LanguageConnectionContext lcc,
      DataDescriptorGenerator ddg,
      DataDictionary dd,
      TransactionController tc,
      UUID triggerId,
      SchemaDescriptor sd,
      UUID spsId,
      UUID compSchemaId,
      String text,
      boolean isWhen,
      TableDescriptor triggerTable)
      throws StandardException {
    if (text == null) {
      return null;
    }

    /*
     ** Note: the format of this string is very important.
     ** Dont change it arbitrarily -- see sps code.
     */
    String spsName =
        "TRIGGER"
            + (isWhen ? "WHEN_" : "ACTN_")
            + triggerId
            + "_"
            + triggerTable.getUUID().toString();

    SPSDescriptor spsd =
        new SPSDescriptor(
            dd,
            spsName,
            (spsId == null) ? dd.getUUIDFactory().createUUID() : spsId,
            sd.getUUID(),
            compSchemaId == null ? lcc.getDefaultSchema().getUUID() : compSchemaId,
            SPSDescriptor.SPS_TYPE_TRIGGER,
            true, // it is valid
            text, // the text
            true); // no defaults

    /*
     ** Prepared the stored prepared statement
     ** and release the activation class -- we
     ** know we aren't going to execute statement
     ** after create it, so for now we are finished.
     */
    spsd.prepareAndRelease(lcc, triggerTable);

    dd.addSPSDescriptor(spsd, tc);

    return spsd;
  }
  /**
   * This is the guts of the Execution-time logic for DROP ROLE.
   *
   * @see org.apache.derby.iapi.sql.execute.ConstantAction#executeConstantAction
   */
  public void executeConstantAction(Activation activation) throws StandardException {
    LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
    DataDictionary dd = lcc.getDataDictionary();
    TransactionController tc = lcc.getTransactionExecute();

    /*
     ** Inform the data dictionary that we are about to write to it.
     ** There are several calls to data dictionary "get" methods here
     ** that might be done in "read" mode in the data dictionary, but
     ** it seemed safer to do this whole operation in "write" mode.
     **
     ** We tell the data dictionary we're done writing at the end of
     ** the transaction.
     */
    dd.startWriting(lcc);

    RoleGrantDescriptor rdDef = dd.getRoleDefinitionDescriptor(roleName);

    if (rdDef == null) {
      throw StandardException.newException(SQLState.ROLE_INVALID_SPECIFICATION, roleName);
    }

    // When a role is dropped, for every role in its grantee closure, we
    // call the REVOKE_ROLE action. It is used to invalidate dependent
    // objects (constraints, triggers and views).  Note that until
    // DERBY-1632 is fixed, we risk dropping objects not really dependent
    // on this role, but one some other role just because it inherits from
    // this one. See also RevokeRoleConstantAction.
    RoleClosureIterator rci =
        dd.createRoleClosureIterator(activation.getTransactionController(), roleName, false);

    String role;
    while ((role = rci.next()) != null) {
      RoleGrantDescriptor r = dd.getRoleDefinitionDescriptor(role);

      dd.getDependencyManager().invalidateFor(r, DependencyManager.REVOKE_ROLE, lcc);
    }

    rdDef.drop(lcc);

    /*
     * We dropped a role, now drop all dependents:
     * - role grants to this role
     * - grants of this role to other roles or users
     * - privilege grants to this role
     */

    dd.dropRoleGrantsByGrantee(roleName, tc);
    dd.dropRoleGrantsByName(roleName, tc);
    dd.dropAllPermsByGrantee(roleName, tc);
  }
  /**
   * Make a ConstraintDescriptor out of a SYSCONSTRAINTS row
   *
   * @param row a SYSCONSTRAINTS row
   * @param parentTupleDescriptor Subconstraint descriptor with auxiliary info.
   * @param dd dataDictionary
   * @exception StandardException thrown on failure
   */
  public TupleDescriptor buildDescriptor(
      ExecRow row, TupleDescriptor parentTupleDescriptor, DataDictionary dd)
      throws StandardException {
    ConstraintDescriptor constraintDesc = null;

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(
          row.nColumns() == SYSCONSTRAINTS_COLUMN_COUNT,
          "Wrong number of columns for a SYSCONSTRAINTS row");
    }

    DataValueDescriptor col;
    ConglomerateDescriptor conglomDesc;
    DataDescriptorGenerator ddg;
    TableDescriptor td = null;
    int constraintIType = -1;
    int[] keyColumns = null;
    UUID constraintUUID;
    UUID schemaUUID;
    UUID tableUUID;
    UUID referencedConstraintId = null;
    SchemaDescriptor schema;
    String tableUUIDString;
    String constraintName;
    String constraintSType;
    String constraintStateStr;
    boolean constraintEnabled;
    int referenceCount;
    String constraintUUIDString;
    String schemaUUIDString;
    SubConstraintDescriptor scd;

    if (SanityManager.DEBUG) {
      if (!(parentTupleDescriptor instanceof SubConstraintDescriptor)) {
        SanityManager.THROWASSERT(
            "parentTupleDescriptor expected to be instanceof "
                + "SubConstraintDescriptor, not "
                + parentTupleDescriptor.getClass().getName());
      }
    }

    scd = (SubConstraintDescriptor) parentTupleDescriptor;

    ddg = dd.getDataDescriptorGenerator();

    /* 1st column is CONSTRAINTID (UUID - char(36)) */
    col = row.getColumn(SYSCONSTRAINTS_CONSTRAINTID);
    constraintUUIDString = col.getString();
    constraintUUID = getUUIDFactory().recreateUUID(constraintUUIDString);

    /* 2nd column is TABLEID (UUID - char(36)) */
    col = row.getColumn(SYSCONSTRAINTS_TABLEID);
    tableUUIDString = col.getString();
    tableUUID = getUUIDFactory().recreateUUID(tableUUIDString);

    /* Get the TableDescriptor.
     * It may be cached in the SCD,
     * otherwise we need to go to the
     * DD.
     */
    if (scd != null) {
      td = scd.getTableDescriptor();
    }
    if (td == null) {
      td = dd.getTableDescriptor(tableUUID);
    }

    /* 3rd column is NAME (varchar(128)) */
    col = row.getColumn(SYSCONSTRAINTS_CONSTRAINTNAME);
    constraintName = col.getString();

    /* 4th column is TYPE (char(1)) */
    col = row.getColumn(SYSCONSTRAINTS_TYPE);
    constraintSType = col.getString();
    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(constraintSType.length() == 1, "Fourth column type incorrect");
    }

    boolean typeSet = false;
    switch (constraintSType.charAt(0)) {
      case 'P':
        constraintIType = DataDictionary.PRIMARYKEY_CONSTRAINT;
        typeSet = true;
        // fall through

      case 'U':
        if (!typeSet) {
          constraintIType = DataDictionary.UNIQUE_CONSTRAINT;
          typeSet = true;
        }
        // fall through

      case 'F':
        if (!typeSet) constraintIType = DataDictionary.FOREIGNKEY_CONSTRAINT;
        if (SanityManager.DEBUG) {
          if (!(parentTupleDescriptor instanceof SubKeyConstraintDescriptor)) {
            SanityManager.THROWASSERT(
                "parentTupleDescriptor expected to be instanceof "
                    + "SubKeyConstraintDescriptor, not "
                    + parentTupleDescriptor.getClass().getName());
          }
        }
        conglomDesc =
            td.getConglomerateDescriptor(
                ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId());
        /* Take care the rare case of conglomDesc being null.  The
         * reason is that our "td" is out of date.  Another thread
         * which was adding a constraint committed between the moment
         * we got the table descriptor (conglomerate list) and the
         * moment we scanned and got the constraint desc list.  Since
         * that thread just added a new row to SYSCONGLOMERATES,
         * SYSCONSTRAINTS, etc.  We wouldn't have wanted to lock the
         * system tables just to prevent other threads from adding new
         * rows.
         */
        if (conglomDesc == null) {
          // we can't be getting td from cache because if we are
          // here, we must have been in dd's ddl mode (that's why
          // the ddl thread went through), we are not done yet, the
          // dd ref count is not 0, hence it couldn't have turned
          // into COMPILE_ONLY mode
          td = dd.getTableDescriptor(tableUUID);
          if (scd != null) scd.setTableDescriptor(td);
          // try again now
          conglomDesc =
              td.getConglomerateDescriptor(
                  ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId());
        }

        if (SanityManager.DEBUG) {
          SanityManager.ASSERT(
              conglomDesc != null, "conglomDesc is expected to be non-null for backing index");
        }
        keyColumns = conglomDesc.getIndexDescriptor().baseColumnPositions();
        referencedConstraintId =
            ((SubKeyConstraintDescriptor) parentTupleDescriptor).getKeyConstraintId();
        keyColumns = conglomDesc.getIndexDescriptor().baseColumnPositions();
        break;

      case 'C':
        constraintIType = DataDictionary.CHECK_CONSTRAINT;
        if (SanityManager.DEBUG) {
          if (!(parentTupleDescriptor instanceof SubCheckConstraintDescriptor)) {
            SanityManager.THROWASSERT(
                "parentTupleDescriptor expected to be instanceof "
                    + "SubCheckConstraintDescriptor, not "
                    + parentTupleDescriptor.getClass().getName());
          }
        }
        break;

      default:
        if (SanityManager.DEBUG) {
          SanityManager.THROWASSERT("Fourth column value invalid");
        }
    }

    /* 5th column is SCHEMAID (UUID - char(36)) */
    col = row.getColumn(SYSCONSTRAINTS_SCHEMAID);
    schemaUUIDString = col.getString();
    schemaUUID = getUUIDFactory().recreateUUID(schemaUUIDString);

    schema = dd.getSchemaDescriptor(schemaUUID, null);

    /* 6th column is STATE (char(1)) */
    col = row.getColumn(SYSCONSTRAINTS_STATE);
    constraintStateStr = col.getString();
    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(constraintStateStr.length() == 1, "Sixth column (state) type incorrect");
    }

    switch (constraintStateStr.charAt(0)) {
      case 'E':
        constraintEnabled = true;
        break;
      case 'D':
        constraintEnabled = false;
        break;
      default:
        constraintEnabled = true;
        if (SanityManager.DEBUG) {
          SanityManager.THROWASSERT(
              "Invalidate state value '" + constraintStateStr + "' for constraint");
        }
    }

    /* 7th column is REFERENCECOUNT, boolean */
    col = row.getColumn(SYSCONSTRAINTS_REFERENCECOUNT);
    referenceCount = col.getInt();

    /* now build and return the descriptor */

    switch (constraintIType) {
      case DataDictionary.PRIMARYKEY_CONSTRAINT:
        constraintDesc =
            ddg.newPrimaryKeyConstraintDescriptor(
                td,
                constraintName,
                false, // deferable,
                false, // initiallyDeferred,
                keyColumns, // genReferencedColumns(dd, td), //int referencedColumns[],
                constraintUUID,
                ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(),
                schema,
                constraintEnabled,
                referenceCount);
        break;

      case DataDictionary.UNIQUE_CONSTRAINT:
        constraintDesc =
            ddg.newUniqueConstraintDescriptor(
                td,
                constraintName,
                false, // deferable,
                false, // initiallyDeferred,
                keyColumns, // genReferencedColumns(dd, td), //int referencedColumns[],
                constraintUUID,
                ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(),
                schema,
                constraintEnabled,
                referenceCount);
        break;

      case DataDictionary.FOREIGNKEY_CONSTRAINT:
        if (SanityManager.DEBUG) {
          SanityManager.ASSERT(
              referenceCount == 0, "REFERENCECOUNT column is nonzero for fk constraint");
        }

        constraintDesc =
            ddg.newForeignKeyConstraintDescriptor(
                td,
                constraintName,
                false, // deferable,
                false, // initiallyDeferred,
                keyColumns, // genReferencedColumns(dd, td), //int referencedColumns[],
                constraintUUID,
                ((SubKeyConstraintDescriptor) parentTupleDescriptor).getIndexId(),
                schema,
                referencedConstraintId,
                constraintEnabled,
                ((SubKeyConstraintDescriptor) parentTupleDescriptor).getRaDeleteRule(),
                ((SubKeyConstraintDescriptor) parentTupleDescriptor).getRaUpdateRule());
        break;

      case DataDictionary.CHECK_CONSTRAINT:
        if (SanityManager.DEBUG) {
          SanityManager.ASSERT(
              referenceCount == 0, "REFERENCECOUNT column is nonzero for check constraint");
        }

        constraintDesc =
            ddg.newCheckConstraintDescriptor(
                td,
                constraintName,
                false, // deferable,
                false, // initiallyDeferred,
                constraintUUID,
                ((SubCheckConstraintDescriptor) parentTupleDescriptor).getConstraintText(),
                ((SubCheckConstraintDescriptor) parentTupleDescriptor)
                    .getReferencedColumnsDescriptor(),
                schema,
                constraintEnabled);
        break;
    }
    return constraintDesc;
  }
  /**
   * This is the guts of the Execution-time logic for CREATE TRIGGER.
   *
   * @see ConstantAction#executeConstantAction
   * @exception StandardException Thrown on failure
   */
  public void executeConstantAction(Activation activation) throws StandardException {
    SPSDescriptor whenspsd = null;
    SPSDescriptor actionspsd;

    LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
    DataDictionary dd = lcc.getDataDictionary();
    DependencyManager dm = dd.getDependencyManager();
    TransactionController tc = lcc.getTransactionExecute();

    /*
     ** Indicate that we are about to modify the data dictionary.
     **
     ** We tell the data dictionary we're done writing at the end of
     ** the transaction.
     */
    dd.startWriting(lcc);

    SchemaDescriptor triggerSd = getSchemaDescriptorForCreate(dd, activation, triggerSchemaName);

    if (spsCompSchemaId == null) {
      SchemaDescriptor def = lcc.getDefaultSchema();
      if (def.getUUID() == null) {
        // Descriptor for default schema is stale,
        // look it up in the dictionary
        def = dd.getSchemaDescriptor(def.getDescriptorName(), tc, false);
      }

      /*
       ** It is possible for spsCompSchemaId to be null.  For instance,
       ** the current schema may not have been physically created yet but
       ** it exists "virtually".  In this case, its UUID will have the
       ** value of null meaning that it is not persistent.  e.g.:
       **
       ** CONNECT 'db;create=true' user 'ernie';
       ** CREATE TABLE bert.t1 (i INT);
       ** CREATE TRIGGER bert.tr1 AFTER INSERT ON bert.t1
       **    FOR EACH STATEMENT MODE DB2SQL
       **    SELECT * FROM SYS.SYSTABLES;
       **
       ** Note that in the above case, the trigger action statement have a
       ** null compilation schema.  A compilation schema with null value
       ** indicates that the trigger action statement text does not have
       ** any dependencies with the CURRENT SCHEMA.  This means:
       **
       ** o  It is safe to compile this statement in any schema since
       **    there is no dependency with the CURRENT SCHEMA. i.e.: All
       **    relevent identifiers are qualified with a specific schema.
       **
       ** o  The statement cache mechanism can utilize this piece of
       **    information to enable better statement plan sharing across
       **    connections in different schemas; thus, avoiding unnecessary
       **    statement compilation.
       */
      if (def != null) spsCompSchemaId = def.getUUID();
    }

    String tabName;
    if (triggerTable != null) {
      triggerTableId = triggerTable.getUUID();
      tabName = triggerTable.getName();
    } else tabName = "with UUID " + triggerTableId;

    /* We need to get table descriptor again.  We simply can't trust the
     * one we got at compile time, the lock on system table was released
     * when compile was done, and the table might well have been dropped.
     */
    triggerTable = dd.getTableDescriptor(triggerTableId);
    if (triggerTable == null) {
      throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tabName);
    }
    /* Lock the table for DDL.  Otherwise during our execution, the table
     * might be changed, even dropped.  Beetle 4269
     */
    lockTableForDDL(tc, triggerTable.getHeapConglomerateId(), true);
    /* get triggerTable again for correctness, in case it's changed before
     * the lock is aquired
     */
    triggerTable = dd.getTableDescriptor(triggerTableId);
    if (triggerTable == null) {
      throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION, tabName);
    }

    /*
     ** Send an invalidate on the table from which
     ** the triggering event emanates.  This it
     ** to make sure that DML statements on this table
     ** will be recompiled.  Do this before we create
     ** our trigger spses lest we invalidate them just
     ** after creating them.
     */
    dm.invalidateFor(triggerTable, DependencyManager.CREATE_TRIGGER, lcc);

    /*
     ** Lets get our trigger id up front, we'll use it when
     ** we create our spses.
     */
    UUID tmpTriggerId = dd.getUUIDFactory().createUUID();

    actionSPSId = (actionSPSId == null) ? dd.getUUIDFactory().createUUID() : actionSPSId;

    DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();

    /*
     ** Create the trigger descriptor first so the trigger action
     ** compilation can pick up the relevant trigger especially in
     ** the case of self triggering.
     */
    TriggerDescriptor triggerd =
        ddg.newTriggerDescriptor(
            triggerSd,
            tmpTriggerId,
            triggerName,
            eventMask,
            isBefore,
            isRow,
            isEnabled,
            triggerTable,
            whenspsd == null ? null : whenspsd.getUUID(),
            actionSPSId,
            creationTimestamp == null
                ? new Timestamp(System.currentTimeMillis())
                : creationTimestamp,
            referencedCols,
            originalActionText,
            referencingOld,
            referencingNew,
            oldReferencingName,
            newReferencingName);

    dd.addDescriptor(triggerd, triggerSd, DataDictionary.SYSTRIGGERS_CATALOG_NUM, false, tc);

    /*
     ** If we have a WHEN action we create it now.
     */
    if (whenText != null) {
      whenspsd =
          createSPS(
              lcc,
              ddg,
              dd,
              tc,
              tmpTriggerId,
              triggerSd,
              whenSPSId,
              spsCompSchemaId,
              whenText,
              true,
              triggerTable);
    }

    /*
     ** Create the trigger action
     */
    actionspsd =
        createSPS(
            lcc,
            ddg,
            dd,
            tc,
            tmpTriggerId,
            triggerSd,
            actionSPSId,
            spsCompSchemaId,
            actionText,
            false,
            triggerTable);

    /*
     ** Make underlying spses dependent on the trigger.
     */
    if (whenspsd != null) {
      dm.addDependency(triggerd, whenspsd, lcc.getContextManager());
    }
    dm.addDependency(triggerd, actionspsd, lcc.getContextManager());
    dm.addDependency(triggerd, triggerTable, lcc.getContextManager());
    dm.addDependency(actionspsd, triggerTable, lcc.getContextManager());
    // store trigger's dependency on various privileges in the dependeny system
    storeViewTriggerDependenciesOnPrivileges(activation, triggerd);
  }
  /**
   * @see Authorizer#authorize
   * @exception StandardException Thrown if the operation is not allowed
   */
  public void authorize(Activation activation, int operation) throws StandardException {
    int sqlAllowed = lcc.getStatementContext().getSQLAllowed();

    switch (operation) {
      case Authorizer.SQL_ARBITARY_OP:
      case Authorizer.SQL_CALL_OP:
        if (sqlAllowed == RoutineAliasInfo.NO_SQL)
          throw externalRoutineException(operation, sqlAllowed);
        break;
      case Authorizer.SQL_SELECT_OP:
        if (sqlAllowed > RoutineAliasInfo.READS_SQL_DATA)
          throw externalRoutineException(operation, sqlAllowed);
        break;

        // SQL write operations
      case Authorizer.SQL_WRITE_OP:
      case Authorizer.PROPERTY_WRITE_OP:
        if (isReadOnlyConnection())
          throw StandardException.newException(SQLState.AUTH_WRITE_WITH_READ_ONLY_CONNECTION);
        if (sqlAllowed > RoutineAliasInfo.MODIFIES_SQL_DATA)
          throw externalRoutineException(operation, sqlAllowed);
        break;

        // SQL DDL operations
      case Authorizer.JAR_WRITE_OP:
      case Authorizer.SQL_DDL_OP:
        if (isReadOnlyConnection())
          throw StandardException.newException(SQLState.AUTH_DDL_WITH_READ_ONLY_CONNECTION);

        if (sqlAllowed > RoutineAliasInfo.MODIFIES_SQL_DATA)
          throw externalRoutineException(operation, sqlAllowed);
        break;

      default:
        if (SanityManager.DEBUG) SanityManager.THROWASSERT("Bad operation code " + operation);
    }
    if (activation != null) {
      List requiredPermissionsList = activation.getPreparedStatement().getRequiredPermissionsList();
      DataDictionary dd = lcc.getDataDictionary();

      // Database Owner can access any object. Ignore
      // requiredPermissionsList for Database Owner
      if (requiredPermissionsList != null
          && !requiredPermissionsList.isEmpty()
          && !authorizationId.equals(dd.getAuthorizationDatabaseOwner())) {
        int ddMode = dd.startReading(lcc);

        /*
         * The system may need to read the permission descriptor(s)
         * from the system table(s) if they are not available in the
         * permission cache.  So start an internal read-only nested
         * transaction for this.
         *
         * The reason to use a nested transaction here is to not hold
         * locks on system tables on a user transaction.  e.g.:  when
         * attempting to revoke an user, the statement may time out
         * since the user-to-be-revoked transaction may have acquired
         * shared locks on the permission system tables; hence, this
         * may not be desirable.
         *
         * All locks acquired by StatementPermission object's check()
         * method will be released when the system ends the nested
         * transaction.
         *
         * In Derby, the locks from read nested transactions come from
         * the same space as the parent transaction; hence, they do not
         * conflict with parent locks.
         */
        lcc.beginNestedTransaction(true);

        try {
          try {
            // perform the permission checking
            for (Iterator iter = requiredPermissionsList.iterator(); iter.hasNext(); ) {
              ((StatementPermission) iter.next()).check(lcc, authorizationId, false, activation);
            }
          } finally {
            dd.doneReading(ddMode, lcc);
          }
        } finally {
          // make sure we commit; otherwise, we will end up with
          // mismatch nested level in the language connection context.
          lcc.commitNestedTransaction();
        }
      }
    }
  }