/**
   * Bind this LockTableNode. This means looking up the table, verifying it exists and getting the
   * heap conglomerate number.
   *
   * @exception StandardException Thrown on error
   */
  public void bindStatement() throws StandardException {
    CompilerContext cc = getCompilerContext();
    ConglomerateDescriptor cd;
    DataDictionary dd = getDataDictionary();
    SchemaDescriptor sd;

    String schemaName = tableName.getSchemaName();
    sd = getSchemaDescriptor(schemaName);

    // Users are not allowed to lock system tables
    if (sd.isSystemSchema()) {
      throw StandardException.newException(
          SQLState.LANG_NO_USER_DDL_IN_SYSTEM_SCHEMA, statementToString(), schemaName);
    }

    lockTableDescriptor = getTableDescriptor(tableName.getTableName(), sd);

    if (lockTableDescriptor == null) {
      // Check if the reference is for a synonym.
      TableName synonymTab = resolveTableToSynonym(tableName);
      if (synonymTab == null)
        throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND, tableName);
      tableName = synonymTab;
      sd = getSchemaDescriptor(tableName.getSchemaName());

      lockTableDescriptor = getTableDescriptor(synonymTab.getTableName(), sd);
      if (lockTableDescriptor == null)
        throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND, tableName);
    }

    // throw an exception if user is attempting to lock a temporary table
    if (lockTableDescriptor.getTableType() == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {
      throw StandardException.newException(
          SQLState.LANG_NOT_ALLOWED_FOR_DECLARED_GLOBAL_TEMP_TABLE);
    }

    conglomerateNumber = lockTableDescriptor.getHeapConglomerateId();

    /* Get the base conglomerate descriptor */
    cd = lockTableDescriptor.getConglomerateDescriptor(conglomerateNumber);

    /* Statement is dependent on the TableDescriptor and ConglomerateDescriptor */
    cc.createDependency(lockTableDescriptor);
    cc.createDependency(cd);

    if (isPrivilegeCollectionRequired()) {
      // need SELECT privilege to perform lock table statement.
      cc.pushCurrentPrivType(Authorizer.SELECT_PRIV);
      cc.addRequiredTablePriv(lockTableDescriptor);
      cc.popCurrentPrivType();
    }
  }
  /**
   * Return the requested values computed from the next row (if any) for which the restriction
   * evaluates to true.
   *
   * <p>Restriction and projection parameters are evaluated for each row.
   *
   * @exception StandardException thrown on failure.
   * @exception StandardException ResultSetNotOpen thrown if not yet open.
   * @return the next row in the result
   */
  public ExecRow getNextRowCore() throws StandardException {
    if (isXplainOnlyMode()) return null;

    ExecRow sourceRow = null;
    ExecRow retval = null;
    boolean restrict = false;
    DataValueDescriptor restrictBoolean;
    long beginRT = 0;

    beginTime = getCurrentTimeMillis();

    if (!isOpen) {
      throw StandardException.newException(SQLState.LANG_RESULT_SET_NOT_OPEN, "next");
    }

    /*
     * Loop until we get a row from the source that qualifies, or there are
     * no more rows to qualify. For each iteration fetch a row from the
     * source, and evaluate against the restriction if any.
     */
    ExecRow tmpRow = null;

    do {
      sourceRow = source.getNextRowCore();

      if (sourceRow != null) {
        this.rownumber++;
        tmpRow = getAllocatedRow();
        populateFromSourceRow(sourceRow, tmpRow);
        setCurrentRow(tmpRow);

        /* Evaluate any restrictions */
        restrictBoolean =
            (DataValueDescriptor) ((restriction == null) ? null : restriction.invoke(activation));

        restrictionTime += getElapsedMillis(beginRT);

        // if the result is null, we make it false --
        // so the row won't be returned.
        restrict =
            (restrictBoolean == null)
                || ((!restrictBoolean.isNull()) && restrictBoolean.getBoolean());

        if (!restrict) {
          rowsFiltered++;
          clearCurrentRow();
        }

        /* Update the run time statistics */
        rowsSeen++;
        retval = currentRow;
      } else {
        clearCurrentRow();
        retval = null;
      }
    } while ((sourceRow != null) && (!restrict));

    nextTime += getElapsedMillis(beginTime);
    return retval;
  }
 /**
  * @see Authorizer#isReadOnlyConnection
  * @exception StandardException Thrown if the operation is not allowed
  */
 public void setReadOnlyConnection(boolean on, boolean authorize) throws StandardException {
   if (authorize && !on) {
     if (connectionMustRemainReadOnly())
       throw StandardException.newException(SQLState.AUTH_CANNOT_SET_READ_WRITE);
   }
   readOnlyConnection = on;
 }
  /**
   * Get a BigDecimal representing the value of a DataValueDescriptor
   *
   * @param value Non-null value to be converted
   * @return BigDecimal value
   * @throws StandardException Invalid conversion or out of range.
   */
  public static BigDecimal getBigDecimal(DataValueDescriptor value) throws StandardException {
    if (SanityManager.DEBUG) {
      if (value.isNull())
        SanityManager.THROWASSERT("NULL value passed to SQLDecimal.getBigDecimal");
    }

    switch (value.typeToBigDecimal()) {
      case Types.DECIMAL:
        return (BigDecimal) value.getObject();
      case Types.CHAR:
        try {
          return new BigDecimal(value.getString().trim());
        } catch (NumberFormatException nfe) {
          throw StandardException.newException(
              SQLState.LANG_FORMAT_EXCEPTION, "java.math.BigDecimal");
        }
      case Types.BIGINT:
        return BigDecimal.valueOf(value.getLong());
      default:
        if (SanityManager.DEBUG)
          SanityManager.THROWASSERT(
              "invalid return from "
                  + value.getClass()
                  + ".typeToBigDecimal() "
                  + value.typeToBigDecimal());
        return null;
    }
  }
  /**
   * read the page from disk into this CachedPage object.
   *
   * <p>A page is read in from disk into the pageData array of this object, and then put in the
   * cache.
   *
   * <p>
   *
   * @param myContainer the container to read the page from.
   * @param newIdentity indentity (ie. page number) of the page to read
   * @exception StandardException Standard exception policy.
   */
  private void readPage(FileContainer myContainer, PageKey newIdentity) throws StandardException {
    int pagesize = myContainer.getPageSize();

    // we will reuse the existing page array if it is same size, the
    // cache does support caching various sized pages.
    setPageArray(pagesize);

    for (int io_retry_count = 0; ; ) {
      try {
        myContainer.readPage(newIdentity.getPageNumber(), pageData);
        break;
      } catch (IOException ioe) {
        io_retry_count++;

        // Retrying read I/O's has been found to be successful sometimes
        // in completing the read without having to fail the calling
        // query, and in some cases avoiding complete db shutdown.
        // Some situations are:
        //     spurious interrupts being sent to thread by clients.
        //     unreliable hardware like a network mounted file system.
        //
        // The only option other than retrying is to fail the I/O
        // immediately and throwing an error, thus performance cost
        // not really a consideration.
        //
        // The retry max of 4 is arbitrary, but has been enough that
        // not many read I/O errors have been reported.
        if (io_retry_count > 4) {
          // page cannot be physically read

          StandardException se =
              StandardException.newException(
                  SQLState.FILE_READ_PAGE_EXCEPTION, ioe, newIdentity, new Integer(pagesize));

          if (dataFactory.getLogFactory().inRFR()) {
            // if in rollforward recovery, it is possible that this
            // page actually does not exist on the disk yet because
            // the log record we are proccessing now is actually
            // creating the page, we will recreate the page if we
            // are in rollforward recovery, so just throw the
            // exception.
            throw se;
          } else {
            if (SanityManager.DEBUG) {
              // by shutting down system in debug mode, maybe
              // we can catch root cause of the interrupt.
              throw dataFactory.markCorrupt(se);
            } else {
              // No need to shut down runtime database on read
              // error in delivered system, throwing exception
              // should be enough.  Thrown exception has nested
              // IO exception which is root cause of error.
              throw se;
            }
          }
        }
      }
    }
  }
  /**
   * @see Authorizer#refresh
   * @exception StandardException Thrown if the operation is not allowed
   */
  public void refresh() throws StandardException {
    getUserAccessLevel();
    if (!readOnlyConnection) readOnlyConnection = connectionMustRemainReadOnly();

    // Is a connection allowed.
    if (userAccessLevel == NO_ACCESS)
      throw StandardException.newException(SQLState.AUTH_DATABASE_CONNECTION_REFUSED);
  }
 /**
  * @see TypeCompiler#resolveArithmeticOperation
  * @exception StandardException Thrown on error
  */
 public DataTypeDescriptor resolveArithmeticOperation(
     DataTypeDescriptor leftType, DataTypeDescriptor rightType, String operator)
     throws StandardException {
   throw StandardException.newException(
       SQLState.LANG_BINARY_OPERATOR_NOT_SUPPORTED,
       operator,
       leftType.getTypeId().getSQLTypeName(),
       rightType.getTypeId().getSQLTypeName());
 }
示例#8
0
 /**
  * Open a b-tree compress scan.
  *
  * <p>B2I does not support a compress scan.
  *
  * <p>
  *
  * @see Conglomerate#defragmentConglomerate
  * @exception StandardException Standard exception policy.
  */
 public ScanManager defragmentConglomerate(
     TransactionManager xact_manager,
     Transaction rawtran,
     boolean hold,
     int open_mode,
     int lock_level,
     LockingPolicy locking_policy,
     int isolation_level)
     throws StandardException {
   throw StandardException.newException(SQLState.BTREE_UNIMPLEMENTED_FEATURE);
 }
  /**
   * Bind this expression. This means binding the sub-expressions, as well as figuring out what the
   * return type is for this expression.
   *
   * @param fromList The FROM list for the query this expression is in, for binding columns.
   * @param subqueryList The subquery list being built as we find SubqueryNodes
   * @param aggregateVector The aggregate vector being built as we find AggregateNodes
   * @return The new top of the expression tree.
   * @exception StandardException Thrown on error
   */
  public ValueNode bindExpression(
      FromList fromList, SubqueryList subqueryList, List aggregateVector) throws StandardException {
    // method invocations are not allowed in ADD TABLE clauses.
    // And neither are field references.
    javaNode.checkReliability(this);

    /* Bind the expression under us */
    javaNode = javaNode.bindExpression(fromList, subqueryList, aggregateVector);

    if (javaNode instanceof StaticMethodCallNode) {
      AggregateNode agg = ((StaticMethodCallNode) javaNode).getResolvedAggregate();

      if (agg != null) {
        return agg.bindExpression(fromList, subqueryList, aggregateVector);
      }
    }

    DataTypeDescriptor dts = javaNode.getDataType();
    if (dts == null) {
      throw StandardException.newException(
          SQLState.LANG_NO_CORRESPONDING_S_Q_L_TYPE, javaNode.getJavaTypeName());
    }

    TypeDescriptor catalogType = dts.getCatalogType();

    if (catalogType.isRowMultiSet() || (catalogType.getTypeName().equals("java.sql.ResultSet"))) {
      throw StandardException.newException(SQLState.LANG_TABLE_FUNCTION_NOT_ALLOWED);
    }

    setType(dts);

    // For functions returning string types we should set the collation to match the
    // java method's schema DERBY-2972. This is propogated from
    // RoutineAliasInfo to javaNode.
    if (dts.getTypeId().isStringTypeId()) {
      this.setCollationInfo(
          javaNode.getCollationType(), StringDataValue.COLLATION_DERIVATION_IMPLICIT);
    }

    return this;
  }
  public RowLocation newRowLocationTemplate() throws StandardException {
    if (open_conglom.isClosed()) {
      if (open_conglom.getHold()) {
        open_conglom.reopen();
      } else {
        throw (StandardException.newException(
            SQLState.HEAP_IS_CLOSED, open_conglom.getConglomerate().getId()));
      }
    }

    return new HeapRowLocation();
  }
示例#11
0
  /** @exception StandardException thrown on failure to convert */
  public int getInt() throws StandardException {
    if (isNull()) return 0;

    try {
      long lv = getLong();

      if ((lv >= Integer.MIN_VALUE) && (lv <= Integer.MAX_VALUE)) return (int) lv;

    } catch (StandardException se) {
    }

    throw StandardException.newException(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, "INTEGER");
  }
示例#12
0
  /** @exception StandardException thrown on failure to convert */
  public short getShort() throws StandardException {
    if (isNull()) return (short) 0;

    try {
      long lv = getLong();

      if ((lv >= Short.MIN_VALUE) && (lv <= Short.MAX_VALUE)) return (short) lv;

    } catch (StandardException se) {
    }

    throw StandardException.newException(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, "SMALLINT");
  }
示例#13
0
  /**
   * Set the precision/scale of the to the desired values. Used when CASTing. Ideally we'd recycle
   * normalize(), but the use is different.
   *
   * @param desiredPrecision the desired precision -- IGNORE_PREICISION if it is to be ignored.
   * @param desiredScale the desired scale
   * @param errorOnTrunc throw error on truncation (ignored -- always thrown if we truncate the
   *     non-decimal part of the value)
   * @exception StandardException Thrown on non-zero truncation if errorOnTrunc is true
   */
  public void setWidth(int desiredPrecision, int desiredScale, boolean errorOnTrunc)
      throws StandardException {
    if (isNull()) return;

    if (desiredPrecision != IGNORE_PRECISION
        && ((desiredPrecision - desiredScale) < SQLDecimal.getWholeDigits(getBigDecimal()))) {
      throw StandardException.newException(
          SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE,
          ("DECIMAL/NUMERIC(" + desiredPrecision + "," + desiredScale + ")"));
    }
    value = value.setScale(desiredScale, BigDecimal.ROUND_DOWN);
    rawData = null;
  }
示例#14
0
  /** @exception StandardException thrown on failure to convert */
  public byte getByte() throws StandardException {
    if (isNull()) return (byte) 0;

    try {
      long lv = getLong();

      if ((lv >= Byte.MIN_VALUE) && (lv <= Byte.MAX_VALUE)) return (byte) lv;

    } catch (StandardException se) {
    }

    throw StandardException.newException(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, "TINYINT");
  }
  /**
   * This is the guts of the Execution-time logic for DROP ROLE.
   *
   * @see org.apache.derby.iapi.sql.execute.ConstantAction#executeConstantAction
   */
  public void executeConstantAction(Activation activation) throws StandardException {
    LanguageConnectionContext lcc = activation.getLanguageConnectionContext();
    DataDictionary dd = lcc.getDataDictionary();
    TransactionController tc = lcc.getTransactionExecute();

    /*
     ** Inform the data dictionary that we are about to write to it.
     ** There are several calls to data dictionary "get" methods here
     ** that might be done in "read" mode in the data dictionary, but
     ** it seemed safer to do this whole operation in "write" mode.
     **
     ** We tell the data dictionary we're done writing at the end of
     ** the transaction.
     */
    dd.startWriting(lcc);

    RoleGrantDescriptor rdDef = dd.getRoleDefinitionDescriptor(roleName);

    if (rdDef == null) {
      throw StandardException.newException(SQLState.ROLE_INVALID_SPECIFICATION, roleName);
    }

    // When a role is dropped, for every role in its grantee closure, we
    // call the REVOKE_ROLE action. It is used to invalidate dependent
    // objects (constraints, triggers and views).  Note that until
    // DERBY-1632 is fixed, we risk dropping objects not really dependent
    // on this role, but one some other role just because it inherits from
    // this one. See also RevokeRoleConstantAction.
    RoleClosureIterator rci =
        dd.createRoleClosureIterator(activation.getTransactionController(), roleName, false);

    String role;
    while ((role = rci.next()) != null) {
      RoleGrantDescriptor r = dd.getRoleDefinitionDescriptor(role);

      dd.getDependencyManager().invalidateFor(r, DependencyManager.REVOKE_ROLE, lcc);
    }

    rdDef.drop(lcc);

    /*
     * We dropped a role, now drop all dependents:
     * - role grants to this role
     * - grants of this role to other roles or users
     * - privilege grants to this role
     */

    dd.dropRoleGrantsByGrantee(roleName, tc);
    dd.dropRoleGrantsByName(roleName, tc);
    dd.dropAllPermsByGrantee(roleName, tc);
  }
  /**
   * ************************************************************************ Public Methods of This
   * class: *************************************************************************
   */
  public int insert(DataValueDescriptor[] row) throws StandardException {
    if (open_conglom.isClosed()) {
      if (open_conglom.getHold()) {
        open_conglom.reopen();
      } else {
        throw (StandardException.newException(
            SQLState.HEAP_IS_CLOSED, open_conglom.getConglomerate().getId()));
      }
    }

    doInsert(row);

    return (0);
  }
 /**
  * If no schema name specified for global temporary table, SESSION is the implicit schema.
  * Otherwise, make sure the specified schema name for global temporary table is SESSION.
  *
  * @param objectName The name of the new object being declared (ie temporary table)
  */
 private Object tempTableSchemaNameCheck(Object objectName) throws StandardException {
   TableName tempTableName = (TableName) objectName;
   if (tempTableName != null) {
     if (tempTableName.getSchemaName() == null)
       tempTableName.setSchemaName(
           SchemaDescriptor
               .STD_DECLARED_GLOBAL_TEMPORARY_TABLES_SCHEMA_NAME); // If no schema specified,
     // SESSION is the implicit
     // schema.
     else if (!(isSessionSchema(tempTableName.getSchemaName())))
       throw StandardException.newException(
           SQLState.LANG_DECLARED_GLOBAL_TEMP_TABLE_ONLY_IN_SESSION_SCHEMA);
   }
   return (tempTableName);
 }
示例#18
0
  /**
   * Set the width of the to the desired value. Used when CASTing. Ideally we'd recycle normalize(),
   * but the behavior is different (we issue a warning instead of an error, and we aren't interested
   * in nullability).
   *
   * @param desiredWidth the desired length
   * @param desiredScale the desired scale (ignored)
   * @param errorOnTrunc throw error on truncation
   * @exception StandardException Thrown on non-zero truncation if errorOnTrunc is true
   */
  public void setWidth(
      int desiredWidth,
      int desiredScale, // Ignored
      boolean errorOnTrunc)
      throws StandardException {
    /*
     ** If the input is NULL, nothing to do.
     */
    if (getValue() == null) {
      return;
    }

    int sourceWidth = dataValue.length;

    /*
     ** If the input is shorter than the desired type,
     ** then pad with blanks to the right length.
     */
    if (sourceWidth < desiredWidth) {
      byte[] actualData = new byte[desiredWidth];
      System.arraycopy(dataValue, 0, actualData, 0, dataValue.length);
      java.util.Arrays.fill(actualData, dataValue.length, actualData.length, SQLBinary.PAD);
      dataValue = actualData;
    }
    /*
     ** Truncation?
     */
    else if (sourceWidth > desiredWidth) {
      if (errorOnTrunc) {
        // error if truncating non pad characters.
        for (int i = desiredWidth; i < dataValue.length; i++) {

          if (dataValue[i] != SQLBinary.PAD)
            throw StandardException.newException(
                SQLState.LANG_STRING_TRUNCATION,
                getTypeName(),
                StringUtil.formatForPrint(this.toString()),
                String.valueOf(desiredWidth));
        }
      }

      /*
       ** Truncate to the desired width.
       */
      truncate(sourceWidth, desiredWidth, !errorOnTrunc);
    }
  }
  public void insertAndFetchLocation(DataValueDescriptor[] row, RowLocation templateRowLocation)
      throws StandardException {
    if (open_conglom.isClosed()) {
      if (open_conglom.getHold()) {
        open_conglom.reopen();
      } else {
        throw (StandardException.newException(
            SQLState.HEAP_IS_CLOSED, open_conglom.getConglomerate().getId()));
      }
    }

    RecordHandle rh = doInsert(row);
    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(templateRowLocation instanceof HeapRowLocation);
    }
    HeapRowLocation hrl = (HeapRowLocation) templateRowLocation;
    hrl.setFrom(rh);
  }
  /**
   * Prepare to mark the dependent as invalid (due to at least one of its dependencies being
   * invalid).
   *
   * @param action The action causing the invalidation
   * @param p the provider
   * @param lcc the language connection context
   * @exception StandardException thrown if unable to make it invalid
   */
  public void prepareToInvalidate(Provider p, int action, LanguageConnectionContext lcc)
      throws StandardException {
    switch (action) {
        /*
         ** We are only dependent on the underlying table, and our spses and
         ** privileges on various objects.  (we should be dropped before our
         ** table is dropped. Also, we should be dropped before revoke
         ** RESTRICT privilege is issued otherwise revoke RESTRICT will
         ** throw an exception).
         ** Currently, in Derby, an execute routine privilege can be revoked
         ** only if there are no dependents on that privilege. When revoke
         ** execute RESTRICT is exectued, all the dependents will receive
         ** REVOKE_PRIVILEGE_RESTRICT and they should throw exception.
         ** We handle this for TriggerDescriptor by throwning an exception
         ** below. For all the other types of revoke privileges, for
         ** instance, SELECT, UPDATE, DELETE, INSERT, REFERENCES,
         ** TRIGGER, we don't do anything here and later in makeInvalid, we
         ** make the TriggerDescriptor drop itself.
         */
      case DependencyManager.DROP_TABLE:
      case DependencyManager.DROP_SYNONYM:
      case DependencyManager.DROP_SPS:
      case DependencyManager.RENAME:
      case DependencyManager.REVOKE_PRIVILEGE_RESTRICT:
        DependencyManager dm = getDataDictionary().getDependencyManager();
        throw StandardException.newException(
            SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT,
            dm.getActionString(action),
            p.getObjectName(),
            "TRIGGER",
            name);

        /*
         ** The trigger descriptor depends on the trigger table.
         ** This means that we get called whenever anything happens
         ** to the trigger table. There are so many cases where this
         ** can happen that it doesn't make sense to have an assertion
         ** here to check whether the action was expected (it makes
         ** the code hard to maintain, and creates a big switch statement).
         */
      default:
        break;
    }
  }
  public UpdateFieldOperation(
      RawTransaction t,
      BasePage page,
      int slot,
      int recordId,
      int fieldId,
      Object column,
      LogicalUndo undo)
      throws StandardException {
    super(page, undo, recordId);

    this.doMeSlot = slot;
    this.fieldId = fieldId;

    try {
      writeOptionalDataToBuffer(t, column);
    } catch (IOException ioe) {
      throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
    }
  }
示例#22
0
  /**
   * Convert this page to requested type, as defined by input format id.
   *
   * <p>The current cache entry is a different format id than the requested type, change it. This
   * object is instantiated to the wrong subtype of cachedPage, this routine will create an object
   * with the correct subtype, and transfer all pertinent information from this to the new correct
   * object.
   *
   * <p>
   *
   * @return The new object created with the input fid and transfered info.
   * @param fid The format id of the new page.
   * @param newIdentity The key of the new page.
   * @exception StandardException Standard exception policy.
   */
  private CachedPage changeInstanceTo(int fid, PageKey newIdentity) throws StandardException {
    CachedPage realPage;

    try {
      realPage = (CachedPage) Monitor.newInstanceFromIdentifier(fid);

    } catch (StandardException se) {
      if (se.getSeverity() > ExceptionSeverity.STATEMENT_SEVERITY) {
        throw se;
      } else {
        throw StandardException.newException(
            SQLState.DATA_UNKNOWN_PAGE_FORMAT_2,
            newIdentity,
            org.apache.derby.iapi.util.StringUtil.hexDump(pageData));
      }
    }

    realPage.setFactory(dataFactory);

    // avoid creating the data buffer if possible, transfer it to the new
    // page if this is the first time the page buffer is used, then
    // createPage will create the page array with the correct page size
    if (this.pageData != null) {
      realPage.alreadyReadPage = true;
      realPage.usePageBuffer(this.pageData);
    }

    // RESOLVE (12/15/06) - the following code is commented out, but
    // not sure why.

    // this page should not be used any more, null out all its content and
    // wait for GC to clean it up

    // destroyPage();// let this subtype have a chance to get rid of stuff
    // this.pageData = null;	// this instance no longer own the data array
    // this.pageCache = null;
    // this.dataFactory = null;
    // this.containerCache = null;

    return realPage;
  }
  private static StandardException externalRoutineException(int operation, int sqlAllowed) {

    String sqlState;
    if (sqlAllowed == RoutineAliasInfo.READS_SQL_DATA)
      sqlState = SQLState.EXTERNAL_ROUTINE_NO_MODIFIES_SQL;
    else if (sqlAllowed == RoutineAliasInfo.CONTAINS_SQL) {
      switch (operation) {
        case Authorizer.SQL_WRITE_OP:
        case Authorizer.PROPERTY_WRITE_OP:
        case Authorizer.JAR_WRITE_OP:
        case Authorizer.SQL_DDL_OP:
          sqlState = SQLState.EXTERNAL_ROUTINE_NO_MODIFIES_SQL;
          break;
        default:
          sqlState = SQLState.EXTERNAL_ROUTINE_NO_READS_SQL;
          break;
      }
    } else sqlState = SQLState.EXTERNAL_ROUTINE_NO_SQL;

    return StandardException.newException(sqlState);
  }
  public Object newInstance(Context context) throws StandardException {

    Throwable t;
    try {
      GeneratedByteCode ni = (GeneratedByteCode) ci.getNewInstance();
      ni.initFromContext(context);
      ni.setGC(this);
      ni.postConstructor();
      return ni;

    } catch (InstantiationException ie) {
      t = ie;
    } catch (IllegalAccessException iae) {
      t = iae;
    } catch (java.lang.reflect.InvocationTargetException ite) {
      t = ite;
    } catch (LinkageError le) {
      t = le;
    }

    throw StandardException.newException(SQLState.GENERATED_CLASS_INSTANCE_ERROR, t, getName());
  }
  /**
   * Notify all listeners that a commit is about to occur. If a listener throws an exception then no
   * further listeners will be notified and a StandardException with rollback severity will be
   * thrown.
   *
   * @throws StandardException
   */
  public void preCommitNotify() throws StandardException {
    if (listeners.isEmpty()) return;

    for (Iterator i = listeners.iterator(); i.hasNext(); ) {
      TransactionListener listener = (TransactionListener) i.next();

      try {
        if (listener.preCommit()) i.remove();
      } catch (StandardException se) {
        // This catches any exceptions that have Transaction severity
        // or less (e.g. Statement exception).
        // If we received any lesser
        // error then we abort the transaction anyway.

        if (se.getSeverity() < ExceptionSeverity.TRANSACTION_SEVERITY) {
          throw StandardException.newException(SQLState.XACT_COMMIT_EXCEPTION, se);
        }

        throw se;
      }
    }
  }
示例#26
0
  /** @exception StandardException thrown on failure to convert */
  public long getLong() throws StandardException {
    BigDecimal localValue = getBigDecimal();
    if (localValue == null) return (long) 0;

    // Valid range for long is
    //   greater than Long.MIN_VALUE - 1
    // *and*
    //   less than Long.MAX_VALUE + 1
    //
    // This ensures that DECIMAL values with an integral value
    // equal to the Long.MIN/MAX_VALUE round correctly to those values.
    // e.g. 9223372036854775807.1  converts to 9223372036854775807
    // this matches DB2 UDB behaviour

    if ((localValue.compareTo(MINLONG_MINUS_ONE) == 1)
        && (localValue.compareTo(MAXLONG_PLUS_ONE) == -1)) {

      return localValue.longValue();
    }

    throw StandardException.newException(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, "BIGINT");
  }
  /**
   * Does this trigger need to fire on this type of DML?
   *
   * @param stmtType the type of DML
   *     (StatementType.INSERT|StatementType.UPDATE|StatementType.DELETE)
   * @param modifiedCols the columns modified, or null for all
   * @return true/false
   * @exception StandardException on error
   */
  public boolean needsToFire(int stmtType, int[] modifiedCols) throws StandardException {

    if (SanityManager.DEBUG) {
      if (!((stmtType == StatementType.INSERT)
          || (stmtType == StatementType.BULK_INSERT_REPLACE)
          || (stmtType == StatementType.UPDATE)
          || (stmtType == StatementType.DELETE))) {
        SanityManager.THROWASSERT("invalid statement type " + stmtType);
      }
    }

    /*
     ** If we are disabled, we never fire
     */
    if (!isEnabled) {
      return false;
    }

    if (stmtType == StatementType.INSERT) {
      return (eventMask & TRIGGER_EVENT_INSERT) == eventMask;
    }
    if (stmtType == StatementType.DELETE) {
      return (eventMask & TRIGGER_EVENT_DELETE) == eventMask;
    }

    // this is a temporary restriction, but it may not be lifted
    // anytime soon.
    if (stmtType == StatementType.BULK_INSERT_REPLACE) {
      throw StandardException.newException(
          SQLState.LANG_NO_BULK_INSERT_REPLACE_WITH_TRIGGER,
          getTableDescriptor().getQualifiedName(),
          name);
    }

    // if update, only relevant if columns intersect
    return ((eventMask & TRIGGER_EVENT_UPDATE) == eventMask)
        && ConstraintDescriptor.doColumnsIntersect(modifiedCols, referencedCols);
  }
示例#28
0
  /**
   * This method implements the / operator for BigDecimal/BigDecimal
   *
   * @param dividend The numerator
   * @param divisor The denominator
   * @param result The result of a previous call to this method, null if not called yet
   * @param scale The result scale, if < 0, calculate the scale according to the actual values'
   *     sizes
   * @return A SQLDecimal containing the result of the division
   * @exception StandardException Thrown on error
   */
  public NumberDataValue divide(
      NumberDataValue dividend, NumberDataValue divisor, NumberDataValue result, int scale)
      throws StandardException {
    if (result == null) {
      result = new SQLDecimal();
    }

    if (dividend.isNull() || divisor.isNull()) {
      result.setToNull();
      return result;
    }

    BigDecimal divisorBigDecimal = SQLDecimal.getBigDecimal(divisor);

    if (divisorBigDecimal.compareTo(ZERO) == 0) {
      throw StandardException.newException(SQLState.LANG_DIVIDE_BY_ZERO);
    }
    BigDecimal dividendBigDecimal = SQLDecimal.getBigDecimal(dividend);

    /*
     ** Set the result scale to be either the passed in scale, whcih was
     ** calculated at bind time to be max(ls+rp-rs+1, 4), where ls,rp,rs
     ** are static data types' sizes, which are predictable and stable
     ** (for the whole result set column, eg.); otherwise dynamically
     ** calculates the scale according to actual values.  Beetle 3901
     */
    result.setBigDecimal(
        dividendBigDecimal.divide(
            divisorBigDecimal,
            scale > -1
                ? scale
                : Math.max(
                    (dividendBigDecimal.scale() + SQLDecimal.getWholeDigits(divisorBigDecimal) + 1),
                    NumberDataValue.MIN_DECIMAL_DIVIDE_SCALE),
            BigDecimal.ROUND_DOWN));

    return result;
  }
示例#29
0
  /**
   * write the page from this CachedPage object to disk.
   *
   * <p>
   *
   * @param identity indentity (ie. page number) of the page to read
   * @param syncMe does the write of this single page have to be sync'd?
   * @exception StandardException Standard exception policy.
   */
  private void writePage(PageKey identity, boolean syncMe) throws StandardException {

    // make subclass write the page format
    writeFormatId(identity);

    // let subclass have a chance to write any cached data to page data
    // array
    writePage(identity);

    // force WAL - and check to see if database is corrupt or is frozen.
    // last log Instant may be null if the page is being forced
    // to disk on a createPage (which violates the WAL protocol actually).
    // See FileContainer.newPage
    LogInstant flushLogTo = getLastLogInstant();
    dataFactory.flush(flushLogTo);

    if (flushLogTo != null) {
      clearLastLogInstant();
    }

    // find the container and file access object
    FileContainer myContainer = (FileContainer) containerCache.find(identity.getContainerId());

    if (myContainer == null) {
      StandardException nested =
          StandardException.newException(
              SQLState.DATA_CONTAINER_VANISHED, identity.getContainerId());
      throw dataFactory.markCorrupt(
          StandardException.newException(SQLState.FILE_WRITE_PAGE_EXCEPTION, nested, identity));
    }

    try {
      myContainer.writePage(identity.getPageNumber(), pageData, syncMe);

      //
      // Do some in memory unlogged bookkeeping tasks while we have
      // the container.
      //

      if (!isOverflowPage() && isDirty()) {

        // let the container knows whether this page is a not
        // filled, non-overflow page
        myContainer.trackUnfilledPage(identity.getPageNumber(), unfilled());

        // if this is not an overflow page, see if the page's row
        // count has changed since it come into the cache.
        //
        // if the page is not invalid, row count is 0.	Otherwise,
        // count non-deleted records on page.
        //
        // Cannot call nonDeletedRecordCount because the page is
        // unlatched now even though nobody is changing it
        int currentRowCount = internalNonDeletedRecordCount();

        if (currentRowCount != initialRowCount) {
          myContainer.updateEstimatedRowCount(currentRowCount - initialRowCount);

          setContainerRowCount(myContainer.getEstimatedRowCount(0));

          initialRowCount = currentRowCount;
        }
      }

    } catch (IOException ioe) {
      // page cannot be written
      throw StandardException.newException(SQLState.FILE_WRITE_PAGE_EXCEPTION, ioe, identity);
    } finally {
      containerCache.release(myContainer);
      myContainer = null;
    }

    synchronized (this) {
      // change page state to not dirty after the successful write
      isDirty = false;
      preDirty = false;
    }
  }
示例#30
0
  /**
   * Find the container and then create the page in that container.
   *
   * <p>This is the process of creating a new page in a container, in that case no need to read the
   * page from disk - just need to initialize it in the cache.
   *
   * <p>
   *
   * @return new page, higher levels have already checked the page number is valid for an open.
   * @param key Which page is this?
   * @param createParameter details needed to create page like size, format id, ...
   * @exception StandardException Standard exception policy.
   * @see Cacheable#createIdentity
   */
  public Cacheable createIdentity(Object key, Object createParameter) throws StandardException {

    if (SanityManager.DEBUG) {
      SanityManager.ASSERT(key instanceof PageKey);
    }

    initialize();

    PageKey newIdentity = (PageKey) key;

    PageCreationArgs createArgs = (PageCreationArgs) createParameter;
    int formatId = createArgs.formatId;

    if (formatId == -1) {
      throw StandardException.newException(
          SQLState.DATA_UNKNOWN_PAGE_FORMAT_2,
          newIdentity,
          org.apache.derby.iapi.util.StringUtil.hexDump(pageData));
    }

    // createArgs[0] contains the integer form of the formatId
    // if it is not the same as this instance's formatId, instantiate the
    // real page object
    if (formatId != getTypeFormatId()) {
      return (changeInstanceTo(formatId, newIdentity).createIdentity(key, createParameter));
    }

    // this is the correct page instance
    initializeHeaders(5);
    createPage(newIdentity, createArgs);

    fillInIdentity(newIdentity);

    initialRowCount = 0;

    /*
     * if we need to grow the container and the page has not been
     * preallocated, writing page before the log is written so that we
     * know if there is an IO error - like running out of disk space - then
     * we don't write out the log record, because if we do, it may fail
     * after the log goes to disk and then the database may not be
     * recoverable.
     *
     * WRITE_SYNC is used when we create the page without first
     *	preallocating it
     * WRITE_NO_SYNC is used when we are preallocating the page - there
     *	will be a SYNC call after all the pages are preallocated
     * 0 means creating a page that has already been preallocated.
     */
    int syncFlag = createArgs.syncFlag;
    if ((syncFlag & WRITE_SYNC) != 0 || (syncFlag & WRITE_NO_SYNC) != 0)
      writePage(newIdentity, (syncFlag & WRITE_SYNC) != 0);

    if (SanityManager.DEBUG) {
      if (SanityManager.DEBUG_ON(FileContainer.SPACE_TRACE)) {
        String sync =
            ((syncFlag & WRITE_SYNC) != 0)
                ? "Write_Sync"
                : (((syncFlag & WRITE_NO_SYNC) != 0) ? "Write_NO_Sync" : "No_write");

        SanityManager.DEBUG(
            FileContainer.SPACE_TRACE, "creating new page " + newIdentity + " with " + sync);
      }
    }

    return this;
  }