/** * @see org.apache.derby.iapi.sql.ResultSet#cleanUp * @exception StandardException Thrown on error */ public void cleanUp() throws StandardException { if (rowHolder != null) { rowHolder.close(); } if (rs != null) { try { rs.close(); } catch (Throwable t) { throw StandardException.unexpectedUserException(t); } rs = null; } // Close the ps if it needs to be instantiated on each execution if (!vtiRS.isReuseablePs() && ps != null) { try { ps.close(); ps = null; } catch (Throwable t) { throw StandardException.unexpectedUserException(t); } } super.cleanUp(); } // end of cleanUp
/** * Bind this LockTableNode. This means looking up the table, verifying it exists and getting the * heap conglomerate number. * * @exception StandardException Thrown on error */ public void bindStatement() throws StandardException { CompilerContext cc = getCompilerContext(); ConglomerateDescriptor cd; DataDictionary dd = getDataDictionary(); SchemaDescriptor sd; String schemaName = tableName.getSchemaName(); sd = getSchemaDescriptor(schemaName); // Users are not allowed to lock system tables if (sd.isSystemSchema()) { throw StandardException.newException( SQLState.LANG_NO_USER_DDL_IN_SYSTEM_SCHEMA, statementToString(), schemaName); } lockTableDescriptor = getTableDescriptor(tableName.getTableName(), sd); if (lockTableDescriptor == null) { // Check if the reference is for a synonym. TableName synonymTab = resolveTableToSynonym(tableName); if (synonymTab == null) throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND, tableName); tableName = synonymTab; sd = getSchemaDescriptor(tableName.getSchemaName()); lockTableDescriptor = getTableDescriptor(synonymTab.getTableName(), sd); if (lockTableDescriptor == null) throw StandardException.newException(SQLState.LANG_TABLE_NOT_FOUND, tableName); } // throw an exception if user is attempting to lock a temporary table if (lockTableDescriptor.getTableType() == TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) { throw StandardException.newException( SQLState.LANG_NOT_ALLOWED_FOR_DECLARED_GLOBAL_TEMP_TABLE); } conglomerateNumber = lockTableDescriptor.getHeapConglomerateId(); /* Get the base conglomerate descriptor */ cd = lockTableDescriptor.getConglomerateDescriptor(conglomerateNumber); /* Statement is dependent on the TableDescriptor and ConglomerateDescriptor */ cc.createDependency(lockTableDescriptor); cc.createDependency(cd); if (isPrivilegeCollectionRequired()) { // need SELECT privilege to perform lock table statement. cc.pushCurrentPrivType(Authorizer.SELECT_PRIV); cc.addRequiredTablePriv(lockTableDescriptor); cc.popCurrentPrivType(); } }
/** * @see Authorizer#isReadOnlyConnection * @exception StandardException Thrown if the operation is not allowed */ public void setReadOnlyConnection(boolean on, boolean authorize) throws StandardException { if (authorize && !on) { if (connectionMustRemainReadOnly()) throw StandardException.newException(SQLState.AUTH_CANNOT_SET_READ_WRITE); } readOnlyConnection = on; }
/** * Return the requested values computed from the next row (if any) for which the restriction * evaluates to true. * * <p>Restriction and projection parameters are evaluated for each row. * * @exception StandardException thrown on failure. * @exception StandardException ResultSetNotOpen thrown if not yet open. * @return the next row in the result */ public ExecRow getNextRowCore() throws StandardException { if (isXplainOnlyMode()) return null; ExecRow sourceRow = null; ExecRow retval = null; boolean restrict = false; DataValueDescriptor restrictBoolean; long beginRT = 0; beginTime = getCurrentTimeMillis(); if (!isOpen) { throw StandardException.newException(SQLState.LANG_RESULT_SET_NOT_OPEN, "next"); } /* * Loop until we get a row from the source that qualifies, or there are * no more rows to qualify. For each iteration fetch a row from the * source, and evaluate against the restriction if any. */ ExecRow tmpRow = null; do { sourceRow = source.getNextRowCore(); if (sourceRow != null) { this.rownumber++; tmpRow = getAllocatedRow(); populateFromSourceRow(sourceRow, tmpRow); setCurrentRow(tmpRow); /* Evaluate any restrictions */ restrictBoolean = (DataValueDescriptor) ((restriction == null) ? null : restriction.invoke(activation)); restrictionTime += getElapsedMillis(beginRT); // if the result is null, we make it false -- // so the row won't be returned. restrict = (restrictBoolean == null) || ((!restrictBoolean.isNull()) && restrictBoolean.getBoolean()); if (!restrict) { rowsFiltered++; clearCurrentRow(); } /* Update the run time statistics */ rowsSeen++; retval = currentRow; } else { clearCurrentRow(); retval = null; } } while ((sourceRow != null) && (!restrict)); nextTime += getElapsedMillis(beginTime); return retval; }
/** * Get a BigDecimal representing the value of a DataValueDescriptor * * @param value Non-null value to be converted * @return BigDecimal value * @throws StandardException Invalid conversion or out of range. */ public static BigDecimal getBigDecimal(DataValueDescriptor value) throws StandardException { if (SanityManager.DEBUG) { if (value.isNull()) SanityManager.THROWASSERT("NULL value passed to SQLDecimal.getBigDecimal"); } switch (value.typeToBigDecimal()) { case Types.DECIMAL: return (BigDecimal) value.getObject(); case Types.CHAR: try { return new BigDecimal(value.getString().trim()); } catch (NumberFormatException nfe) { throw StandardException.newException( SQLState.LANG_FORMAT_EXCEPTION, "java.math.BigDecimal"); } case Types.BIGINT: return BigDecimal.valueOf(value.getLong()); default: if (SanityManager.DEBUG) SanityManager.THROWASSERT( "invalid return from " + value.getClass() + ".typeToBigDecimal() " + value.typeToBigDecimal()); return null; } }
/** * Mark the dependent as invalid (due to at least one of its dependencies being invalid). Always * an error for a trigger -- should never have gotten here. * * @param lcc the language connection context * @param action The action causing the invalidation * @exception StandardException thrown if called in sanity mode */ public void makeInvalid(int action, LanguageConnectionContext lcc) throws StandardException { // No sanity check for valid action. Trigger descriptors depend on // the trigger table, so there is a very large number of actions // that we would have to check against. This is hard to maintain, // so don't bother. switch (action) { // invalidate this trigger descriptor case DependencyManager.USER_RECOMPILE_REQUEST: DependencyManager dm = getDataDictionary().getDependencyManager(); dm.invalidateFor(this, DependencyManager.PREPARED_STATEMENT_RELEASE, lcc); break; // When REVOKE_PRIVILEGE gets sent (this happens for privilege // types SELECT, UPDATE, DELETE, INSERT, REFERENCES, TRIGGER), we // make the TriggerDescriptor drop itself. // Ditto for revoking a role conferring a privilege. case DependencyManager.REVOKE_PRIVILEGE: case DependencyManager.REVOKE_ROLE: drop(lcc); lcc.getLastActivation() .addWarning( StandardException.newWarning(SQLState.LANG_TRIGGER_DROPPED, this.getObjectName())); break; default: break; } }
/** * read the page from disk into this CachedPage object. * * <p>A page is read in from disk into the pageData array of this object, and then put in the * cache. * * <p> * * @param myContainer the container to read the page from. * @param newIdentity indentity (ie. page number) of the page to read * @exception StandardException Standard exception policy. */ private void readPage(FileContainer myContainer, PageKey newIdentity) throws StandardException { int pagesize = myContainer.getPageSize(); // we will reuse the existing page array if it is same size, the // cache does support caching various sized pages. setPageArray(pagesize); for (int io_retry_count = 0; ; ) { try { myContainer.readPage(newIdentity.getPageNumber(), pageData); break; } catch (IOException ioe) { io_retry_count++; // Retrying read I/O's has been found to be successful sometimes // in completing the read without having to fail the calling // query, and in some cases avoiding complete db shutdown. // Some situations are: // spurious interrupts being sent to thread by clients. // unreliable hardware like a network mounted file system. // // The only option other than retrying is to fail the I/O // immediately and throwing an error, thus performance cost // not really a consideration. // // The retry max of 4 is arbitrary, but has been enough that // not many read I/O errors have been reported. if (io_retry_count > 4) { // page cannot be physically read StandardException se = StandardException.newException( SQLState.FILE_READ_PAGE_EXCEPTION, ioe, newIdentity, new Integer(pagesize)); if (dataFactory.getLogFactory().inRFR()) { // if in rollforward recovery, it is possible that this // page actually does not exist on the disk yet because // the log record we are proccessing now is actually // creating the page, we will recreate the page if we // are in rollforward recovery, so just throw the // exception. throw se; } else { if (SanityManager.DEBUG) { // by shutting down system in debug mode, maybe // we can catch root cause of the interrupt. throw dataFactory.markCorrupt(se); } else { // No need to shut down runtime database on read // error in delivered system, throwing exception // should be enough. Thrown exception has nested // IO exception which is root cause of error. throw se; } } } } } }
/** * @see Authorizer#refresh * @exception StandardException Thrown if the operation is not allowed */ public void refresh() throws StandardException { getUserAccessLevel(); if (!readOnlyConnection) readOnlyConnection = connectionMustRemainReadOnly(); // Is a connection allowed. if (userAccessLevel == NO_ACCESS) throw StandardException.newException(SQLState.AUTH_DATABASE_CONNECTION_REFUSED); }
/** * Notify all listeners that a rollback is about to occur. If a listener throws an exception then * no further listeners will be notified and a StandardException with shutdown database(?) * severity will be thrown. * * @throws StandardException */ public void preRollbackNotify() throws StandardException { if (listeners.isEmpty()) return; for (Iterator i = listeners.iterator(); i.hasNext(); ) { TransactionListener listener = (TransactionListener) i.next(); try { listener.preRollback(); i.remove(); } catch (StandardException se) { // TODO: Define behaviour on exception during rollback. if (se.getSeverity() < ExceptionSeverity.TRANSACTION_SEVERITY) {} throw se; } } }
/** * @see TypeCompiler#resolveArithmeticOperation * @exception StandardException Thrown on error */ public DataTypeDescriptor resolveArithmeticOperation( DataTypeDescriptor leftType, DataTypeDescriptor rightType, String operator) throws StandardException { throw StandardException.newException( SQLState.LANG_BINARY_OPERATOR_NOT_SUPPORTED, operator, leftType.getTypeId().getSQLTypeName(), rightType.getTypeId().getSQLTypeName()); }
/** * Open a b-tree compress scan. * * <p>B2I does not support a compress scan. * * <p> * * @see Conglomerate#defragmentConglomerate * @exception StandardException Standard exception policy. */ public ScanManager defragmentConglomerate( TransactionManager xact_manager, Transaction rawtran, boolean hold, int open_mode, int lock_level, LockingPolicy locking_policy, int isolation_level) throws StandardException { throw StandardException.newException(SQLState.BTREE_UNIMPLEMENTED_FEATURE); }
/** * Bind this expression. This means binding the sub-expressions, as well as figuring out what the * return type is for this expression. * * @param fromList The FROM list for the query this expression is in, for binding columns. * @param subqueryList The subquery list being built as we find SubqueryNodes * @param aggregateVector The aggregate vector being built as we find AggregateNodes * @return The new top of the expression tree. * @exception StandardException Thrown on error */ public ValueNode bindExpression( FromList fromList, SubqueryList subqueryList, List aggregateVector) throws StandardException { // method invocations are not allowed in ADD TABLE clauses. // And neither are field references. javaNode.checkReliability(this); /* Bind the expression under us */ javaNode = javaNode.bindExpression(fromList, subqueryList, aggregateVector); if (javaNode instanceof StaticMethodCallNode) { AggregateNode agg = ((StaticMethodCallNode) javaNode).getResolvedAggregate(); if (agg != null) { return agg.bindExpression(fromList, subqueryList, aggregateVector); } } DataTypeDescriptor dts = javaNode.getDataType(); if (dts == null) { throw StandardException.newException( SQLState.LANG_NO_CORRESPONDING_S_Q_L_TYPE, javaNode.getJavaTypeName()); } TypeDescriptor catalogType = dts.getCatalogType(); if (catalogType.isRowMultiSet() || (catalogType.getTypeName().equals("java.sql.ResultSet"))) { throw StandardException.newException(SQLState.LANG_TABLE_FUNCTION_NOT_ALLOWED); } setType(dts); // For functions returning string types we should set the collation to match the // java method's schema DERBY-2972. This is propogated from // RoutineAliasInfo to javaNode. if (dts.getTypeId().isStringTypeId()) { this.setCollationInfo( javaNode.getCollationType(), StringDataValue.COLLATION_DERIVATION_IMPLICIT); } return this; }
/** * Convert this page to requested type, as defined by input format id. * * <p>The current cache entry is a different format id than the requested type, change it. This * object is instantiated to the wrong subtype of cachedPage, this routine will create an object * with the correct subtype, and transfer all pertinent information from this to the new correct * object. * * <p> * * @return The new object created with the input fid and transfered info. * @param fid The format id of the new page. * @param newIdentity The key of the new page. * @exception StandardException Standard exception policy. */ private CachedPage changeInstanceTo(int fid, PageKey newIdentity) throws StandardException { CachedPage realPage; try { realPage = (CachedPage) Monitor.newInstanceFromIdentifier(fid); } catch (StandardException se) { if (se.getSeverity() > ExceptionSeverity.STATEMENT_SEVERITY) { throw se; } else { throw StandardException.newException( SQLState.DATA_UNKNOWN_PAGE_FORMAT_2, newIdentity, org.apache.derby.iapi.util.StringUtil.hexDump(pageData)); } } realPage.setFactory(dataFactory); // avoid creating the data buffer if possible, transfer it to the new // page if this is the first time the page buffer is used, then // createPage will create the page array with the correct page size if (this.pageData != null) { realPage.alreadyReadPage = true; realPage.usePageBuffer(this.pageData); } // RESOLVE (12/15/06) - the following code is commented out, but // not sure why. // this page should not be used any more, null out all its content and // wait for GC to clean it up // destroyPage();// let this subtype have a chance to get rid of stuff // this.pageData = null; // this instance no longer own the data array // this.pageCache = null; // this.dataFactory = null; // this.containerCache = null; return realPage; }
public void finish() throws StandardException { if ((ps != null) && !vtiRS.isReuseablePs()) { try { ps.close(); ps = null; } catch (Throwable t) { throw StandardException.unexpectedUserException(t); } } super.finish(); } // end of finish
public RowLocation newRowLocationTemplate() throws StandardException { if (open_conglom.isClosed()) { if (open_conglom.getHold()) { open_conglom.reopen(); } else { throw (StandardException.newException( SQLState.HEAP_IS_CLOSED, open_conglom.getConglomerate().getId())); } } return new HeapRowLocation(); }
/** @exception StandardException thrown on failure to convert */ public short getShort() throws StandardException { if (isNull()) return (short) 0; try { long lv = getLong(); if ((lv >= Short.MIN_VALUE) && (lv <= Short.MAX_VALUE)) return (short) lv; } catch (StandardException se) { } throw StandardException.newException(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, "SMALLINT"); }
/** @exception StandardException thrown on failure to convert */ public int getInt() throws StandardException { if (isNull()) return 0; try { long lv = getLong(); if ((lv >= Integer.MIN_VALUE) && (lv <= Integer.MAX_VALUE)) return (int) lv; } catch (StandardException se) { } throw StandardException.newException(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, "INTEGER"); }
/** * Set the precision/scale of the to the desired values. Used when CASTing. Ideally we'd recycle * normalize(), but the use is different. * * @param desiredPrecision the desired precision -- IGNORE_PREICISION if it is to be ignored. * @param desiredScale the desired scale * @param errorOnTrunc throw error on truncation (ignored -- always thrown if we truncate the * non-decimal part of the value) * @exception StandardException Thrown on non-zero truncation if errorOnTrunc is true */ public void setWidth(int desiredPrecision, int desiredScale, boolean errorOnTrunc) throws StandardException { if (isNull()) return; if (desiredPrecision != IGNORE_PRECISION && ((desiredPrecision - desiredScale) < SQLDecimal.getWholeDigits(getBigDecimal()))) { throw StandardException.newException( SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, ("DECIMAL/NUMERIC(" + desiredPrecision + "," + desiredScale + ")")); } value = value.setScale(desiredScale, BigDecimal.ROUND_DOWN); rawData = null; }
/** @exception StandardException thrown on failure to convert */ public byte getByte() throws StandardException { if (isNull()) return (byte) 0; try { long lv = getLong(); if ((lv >= Byte.MIN_VALUE) && (lv <= Byte.MAX_VALUE)) return (byte) lv; } catch (StandardException se) { } throw StandardException.newException(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, "TINYINT"); }
/** * Notify all listeners that a commit is about to occur. If a listener throws an exception then no * further listeners will be notified and a StandardException with rollback severity will be * thrown. * * @throws StandardException */ public void preCommitNotify() throws StandardException { if (listeners.isEmpty()) return; for (Iterator i = listeners.iterator(); i.hasNext(); ) { TransactionListener listener = (TransactionListener) i.next(); try { if (listener.preCommit()) i.remove(); } catch (StandardException se) { // This catches any exceptions that have Transaction severity // or less (e.g. Statement exception). // If we received any lesser // error then we abort the transaction anyway. if (se.getSeverity() < ExceptionSeverity.TRANSACTION_SEVERITY) { throw StandardException.newException(SQLState.XACT_COMMIT_EXCEPTION, se); } throw se; } } }
/** * This is the guts of the Execution-time logic for DROP ROLE. * * @see org.apache.derby.iapi.sql.execute.ConstantAction#executeConstantAction */ public void executeConstantAction(Activation activation) throws StandardException { LanguageConnectionContext lcc = activation.getLanguageConnectionContext(); DataDictionary dd = lcc.getDataDictionary(); TransactionController tc = lcc.getTransactionExecute(); /* ** Inform the data dictionary that we are about to write to it. ** There are several calls to data dictionary "get" methods here ** that might be done in "read" mode in the data dictionary, but ** it seemed safer to do this whole operation in "write" mode. ** ** We tell the data dictionary we're done writing at the end of ** the transaction. */ dd.startWriting(lcc); RoleGrantDescriptor rdDef = dd.getRoleDefinitionDescriptor(roleName); if (rdDef == null) { throw StandardException.newException(SQLState.ROLE_INVALID_SPECIFICATION, roleName); } // When a role is dropped, for every role in its grantee closure, we // call the REVOKE_ROLE action. It is used to invalidate dependent // objects (constraints, triggers and views). Note that until // DERBY-1632 is fixed, we risk dropping objects not really dependent // on this role, but one some other role just because it inherits from // this one. See also RevokeRoleConstantAction. RoleClosureIterator rci = dd.createRoleClosureIterator(activation.getTransactionController(), roleName, false); String role; while ((role = rci.next()) != null) { RoleGrantDescriptor r = dd.getRoleDefinitionDescriptor(role); dd.getDependencyManager().invalidateFor(r, DependencyManager.REVOKE_ROLE, lcc); } rdDef.drop(lcc); /* * We dropped a role, now drop all dependents: * - role grants to this role * - grants of this role to other roles or users * - privilege grants to this role */ dd.dropRoleGrantsByGrantee(roleName, tc); dd.dropRoleGrantsByName(roleName, tc); dd.dropAllPermsByGrantee(roleName, tc); }
/** * ************************************************************************ Public Methods of This * class: ************************************************************************* */ public int insert(DataValueDescriptor[] row) throws StandardException { if (open_conglom.isClosed()) { if (open_conglom.getHold()) { open_conglom.reopen(); } else { throw (StandardException.newException( SQLState.HEAP_IS_CLOSED, open_conglom.getConglomerate().getId())); } } doInsert(row); return (0); }
/** * If no schema name specified for global temporary table, SESSION is the implicit schema. * Otherwise, make sure the specified schema name for global temporary table is SESSION. * * @param objectName The name of the new object being declared (ie temporary table) */ private Object tempTableSchemaNameCheck(Object objectName) throws StandardException { TableName tempTableName = (TableName) objectName; if (tempTableName != null) { if (tempTableName.getSchemaName() == null) tempTableName.setSchemaName( SchemaDescriptor .STD_DECLARED_GLOBAL_TEMPORARY_TABLES_SCHEMA_NAME); // If no schema specified, // SESSION is the implicit // schema. else if (!(isSessionSchema(tempTableName.getSchemaName()))) throw StandardException.newException( SQLState.LANG_DECLARED_GLOBAL_TEMP_TABLE_ONLY_IN_SESSION_SCHEMA); } return (tempTableName); }
/** * Set the width of the to the desired value. Used when CASTing. Ideally we'd recycle normalize(), * but the behavior is different (we issue a warning instead of an error, and we aren't interested * in nullability). * * @param desiredWidth the desired length * @param desiredScale the desired scale (ignored) * @param errorOnTrunc throw error on truncation * @exception StandardException Thrown on non-zero truncation if errorOnTrunc is true */ public void setWidth( int desiredWidth, int desiredScale, // Ignored boolean errorOnTrunc) throws StandardException { /* ** If the input is NULL, nothing to do. */ if (getValue() == null) { return; } int sourceWidth = dataValue.length; /* ** If the input is shorter than the desired type, ** then pad with blanks to the right length. */ if (sourceWidth < desiredWidth) { byte[] actualData = new byte[desiredWidth]; System.arraycopy(dataValue, 0, actualData, 0, dataValue.length); java.util.Arrays.fill(actualData, dataValue.length, actualData.length, SQLBinary.PAD); dataValue = actualData; } /* ** Truncation? */ else if (sourceWidth > desiredWidth) { if (errorOnTrunc) { // error if truncating non pad characters. for (int i = desiredWidth; i < dataValue.length; i++) { if (dataValue[i] != SQLBinary.PAD) throw StandardException.newException( SQLState.LANG_STRING_TRUNCATION, getTypeName(), StringUtil.formatForPrint(this.toString()), String.valueOf(desiredWidth)); } } /* ** Truncate to the desired width. */ truncate(sourceWidth, desiredWidth, !errorOnTrunc); } }
public void insertAndFetchLocation(DataValueDescriptor[] row, RowLocation templateRowLocation) throws StandardException { if (open_conglom.isClosed()) { if (open_conglom.getHold()) { open_conglom.reopen(); } else { throw (StandardException.newException( SQLState.HEAP_IS_CLOSED, open_conglom.getConglomerate().getId())); } } RecordHandle rh = doInsert(row); if (SanityManager.DEBUG) { SanityManager.ASSERT(templateRowLocation instanceof HeapRowLocation); } HeapRowLocation hrl = (HeapRowLocation) templateRowLocation; hrl.setFrom(rh); }
/** * Prepare to mark the dependent as invalid (due to at least one of its dependencies being * invalid). * * @param action The action causing the invalidation * @param p the provider * @param lcc the language connection context * @exception StandardException thrown if unable to make it invalid */ public void prepareToInvalidate(Provider p, int action, LanguageConnectionContext lcc) throws StandardException { switch (action) { /* ** We are only dependent on the underlying table, and our spses and ** privileges on various objects. (we should be dropped before our ** table is dropped. Also, we should be dropped before revoke ** RESTRICT privilege is issued otherwise revoke RESTRICT will ** throw an exception). ** Currently, in Derby, an execute routine privilege can be revoked ** only if there are no dependents on that privilege. When revoke ** execute RESTRICT is exectued, all the dependents will receive ** REVOKE_PRIVILEGE_RESTRICT and they should throw exception. ** We handle this for TriggerDescriptor by throwning an exception ** below. For all the other types of revoke privileges, for ** instance, SELECT, UPDATE, DELETE, INSERT, REFERENCES, ** TRIGGER, we don't do anything here and later in makeInvalid, we ** make the TriggerDescriptor drop itself. */ case DependencyManager.DROP_TABLE: case DependencyManager.DROP_SYNONYM: case DependencyManager.DROP_SPS: case DependencyManager.RENAME: case DependencyManager.REVOKE_PRIVILEGE_RESTRICT: DependencyManager dm = getDataDictionary().getDependencyManager(); throw StandardException.newException( SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT, dm.getActionString(action), p.getObjectName(), "TRIGGER", name); /* ** The trigger descriptor depends on the trigger table. ** This means that we get called whenever anything happens ** to the trigger table. There are so many cases where this ** can happen that it doesn't make sense to have an assertion ** here to check whether the action was expected (it makes ** the code hard to maintain, and creates a big switch statement). */ default: break; } }
public UpdateFieldOperation( RawTransaction t, BasePage page, int slot, int recordId, int fieldId, Object column, LogicalUndo undo) throws StandardException { super(page, undo, recordId); this.doMeSlot = slot; this.fieldId = fieldId; try { writeOptionalDataToBuffer(t, column); } catch (IOException ioe) { throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, ioe); } }
/** * Copy columns from srcrow into destrow, or insert ROW_NUMBER. * * <p><b>FIXME</b> This is temporary. Window function treatment needs to generalized to work for * other window functions. * * @exception StandardException thrown on failure to open */ public void populateFromSourceRow(ExecRow srcrow, ExecRow destrow) throws StandardException { int srcindex = 1; try { DataValueDescriptor[] columns = destrow.getRowArray(); for (int index = 0; index < columns.length; index++) { if (referencedColumns != null && !referencedColumns.get(index)) { columns[index].setValue((long) this.rownumber); } else { destrow.setColumn(index + 1, srcrow.getColumn(srcindex)); srcindex++; } } } catch (StandardException se) { throw se; } catch (Throwable t) { throw StandardException.unexpectedUserException(t); } }
private static StandardException externalRoutineException(int operation, int sqlAllowed) { String sqlState; if (sqlAllowed == RoutineAliasInfo.READS_SQL_DATA) sqlState = SQLState.EXTERNAL_ROUTINE_NO_MODIFIES_SQL; else if (sqlAllowed == RoutineAliasInfo.CONTAINS_SQL) { switch (operation) { case Authorizer.SQL_WRITE_OP: case Authorizer.PROPERTY_WRITE_OP: case Authorizer.JAR_WRITE_OP: case Authorizer.SQL_DDL_OP: sqlState = SQLState.EXTERNAL_ROUTINE_NO_MODIFIES_SQL; break; default: sqlState = SQLState.EXTERNAL_ROUTINE_NO_READS_SQL; break; } } else sqlState = SQLState.EXTERNAL_ROUTINE_NO_SQL; return StandardException.newException(sqlState); }
/** @exception StandardException thrown on failure to convert */ public long getLong() throws StandardException { BigDecimal localValue = getBigDecimal(); if (localValue == null) return (long) 0; // Valid range for long is // greater than Long.MIN_VALUE - 1 // *and* // less than Long.MAX_VALUE + 1 // // This ensures that DECIMAL values with an integral value // equal to the Long.MIN/MAX_VALUE round correctly to those values. // e.g. 9223372036854775807.1 converts to 9223372036854775807 // this matches DB2 UDB behaviour if ((localValue.compareTo(MINLONG_MINUS_ONE) == 1) && (localValue.compareTo(MAXLONG_PLUS_ONE) == -1)) { return localValue.longValue(); } throw StandardException.newException(SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE, "BIGINT"); }