/** * Retrieves the remarks, if any, recorded against the specified column. * * <p> * * @param i zero-based column index * @return the remarks recorded against the specified column. */ String getColRemarks(int i) { String key; if (table.getTableType() != TableBase.INFO_SCHEMA_TABLE) { return table.getColumn(i).getName().comment; } key = getName() + "_" + getColName(i); return BundleHandler.getString(hnd_column_remarks, key); }
/** called from outside after the complete end of defrag */ void updateTableIndexRoots() { HsqlArrayList allTables = database.schemaManager.getAllTables(); for (int i = 0, size = allTables.size(); i < size; i++) { Table t = (Table) allTables.get(i); if (t.getTableType() == TableBase.CACHED_TABLE) { int[] rootsArray = rootsList[i]; t.setIndexRoots(rootsArray); } } }
// int : row size (0 if no more rows) , // BinaryServerRowInput : row (column values) protected boolean readRow(Table t) throws IOException, HsqlException { boolean more = readRow(rowIn, 0); if (!more) { return false; } Object[] data = rowIn.readData(t.getColumnTypes()); t.insertFromScript(data); return true; }
/** * Moves the data from an old store to new after changes to table The colindex argument is the * index of the column that was added or removed. The adjust argument is {-1 | 0 | +1} */ public final void moveData(Session session, PersistentStore other, int colindex, int adjust) { Object colvalue = null; Type oldtype = null; Type newtype = null; if (adjust >= 0 && colindex != -1) { ColumnSchema column = ((Table) table).getColumn(colindex); colvalue = column.getDefaultValue(session); if (adjust == 0) { oldtype = ((Table) other.getTable()).getColumnTypes()[colindex]; newtype = ((Table) table).getColumnTypes()[colindex]; } } RowIterator it = other.rowIterator(); Table table = (Table) this.table; try { while (it.hasNext()) { Row row = it.getNextRow(); Object[] o = row.getData(); Object[] data = table.getEmptyRowData(); if (adjust == 0 && colindex != -1) { colvalue = newtype.convertToType(session, o[colindex], oldtype); } ArrayUtil.copyAdjustArray(o, data, colvalue, colindex, adjust); table.systemSetIdentityColumn(session, data); table.enforceTypeLimits(session, data); table.enforceRowConstraints(session, data); // get object without RowAction Row newrow = (Row) getNewCachedObject(null, data); indexRow(null, newrow); } } catch (java.lang.OutOfMemoryError e) { throw Error.error(ErrorCode.OUT_OF_MEMORY); } }
public RowStoreAVLMemory(PersistentStoreCollection manager, Table table) { this.manager = manager; this.table = table; this.indexList = table.getIndexList(); this.accessorList = new CachedObject[indexList.length]; rowIdMap = new IntKeyHashMapConcurrent(); manager.setStore(table, this); }
public RowStoreAVLMemory(PersistentStoreCollection manager, Table table) { this.database = table.database; this.manager = manager; this.table = table; this.indexList = table.getIndexList(); this.accessorList = new CachedObject[indexList.length]; manager.setStore(table, this); }
/** * Retrieves the standard JDBC type of the table. * * <p>"TABLE" for user-defined tables, "VIEW" for user-defined views, and so on. * * @return the standard JDBC type of the table */ String getJDBCStandardType() { switch (table.getTableType()) { case TableBase.VIEW_TABLE: return "VIEW"; case TableBase.TEMP_TABLE: case TableBase.TEMP_TEXT_TABLE: return "GLOBAL TEMPORARY"; case TableBase.INFO_SCHEMA_TABLE: return "SYSTEM TABLE"; default: if (table.getOwner().isSystem()) { return "SYSTEM TABLE"; } return "TABLE"; } }
public RowStoreAVLDisk(PersistentStoreCollection manager, DataFileCache cache, Table table) { this.manager = manager; this.table = table; this.indexList = table.getIndexList(); this.accessorList = new CachedObject[indexList.length]; this.cache = cache; manager.setStore(table, this); txManager = table.database.txManager; }
/** * Retrieves the HSQLDB-specific type of the table. * * <p> * * @return the HSQLDB-specific type of the table */ String getHsqlType() { switch (table.getTableType()) { case TableBase.MEMORY_TABLE: case TableBase.TEMP_TABLE: case TableBase.INFO_SCHEMA_TABLE: return "MEMORY"; case TableBase.CACHED_TABLE: return "CACHED"; case TableBase.TEMP_TEXT_TABLE: case TableBase.TEXT_TABLE: return "TEXT"; case TableBase.VIEW_TABLE: default: return null; } }
/** * Retrieves the remarks (if any) recorded against the Table. * * <p> * * @return the remarks recorded against the Table */ String getRemark() { return (table.getTableType() == TableBase.INFO_SCHEMA_TABLE) ? BundleHandler.getString(hnd_table_remarks, getName()) : table.getName().comment; }
/** * Retrieves the simple name of the table. * * <p> * * @return the simple name of the table */ String getName() { return table.getName().name; }
/** * Retrieves the simple name of the specified column. * * <p> * * @param i zero-based column index * @return the simple name of the specified column. */ String getColName(int i) { return table.getColumn(i).getName().name; }
/** * Retrieves the scope of the best row identifier. * * <p>This implements the rules described in DatabaseInformationMain.SYSTEM_BESTROWIDENTIFIER. * * <p> * * @return the scope of the best row identifier */ Integer getBRIScope() { return (table.isWritable()) ? ValuePool.getInt(bestRowTemporary) : ValuePool.getInt(bestRowSession); }
void process() throws IOException { boolean complete = false; Error.printSystemOut("Defrag Transfer begins"); transactionRowLookup = database.txManager.getTransactionIDList(); HsqlArrayList allTables = database.schemaManager.getAllTables(); rootsList = new int[allTables.size()][]; Storage dest = null; try { OutputStream fos = database.logger.getFileAccess().openOutputStreamElement(dataFileName + ".new"); fileStreamOut = new BufferedOutputStream(fos, 1 << 12); for (int i = 0; i < DataFileCache.INITIAL_FREE_POS; i++) { fileStreamOut.write(0); } fileOffset = DataFileCache.INITIAL_FREE_POS; for (int i = 0, tSize = allTables.size(); i < tSize; i++) { Table t = (Table) allTables.get(i); if (t.getTableType() == TableBase.CACHED_TABLE) { int[] rootsArray = writeTableToDataFile(t); rootsList[i] = rootsArray; } else { rootsList[i] = null; } Error.printSystemOut(t.getName().name + " complete"); } writeTransactionRows(); fileStreamOut.flush(); fileStreamOut.close(); fileStreamOut = null; // write out the end of file position dest = ScaledRAFile.newScaledRAFile( database, dataFileName + ".new", false, ScaledRAFile.DATA_FILE_RAF, database .getURLProperties() .getProperty(HsqlDatabaseProperties.url_storage_class_name), database.getURLProperties().getProperty(HsqlDatabaseProperties.url_storage_key)); dest.seek(DataFileCache.LONG_FREE_POS_POS); dest.writeLong(fileOffset); // set shadowed flag; int flags = 0; if (database.logger.propIncrementBackup) { flags = BitMap.set(flags, DataFileCache.FLAG_ISSHADOWED); } flags = BitMap.set(flags, DataFileCache.FLAG_190); flags = BitMap.set(flags, DataFileCache.FLAG_ISSAVED); dest.seek(DataFileCache.FLAGS_POS); dest.writeInt(flags); dest.close(); dest = null; for (int i = 0, size = rootsList.length; i < size; i++) { int[] roots = rootsList[i]; if (roots != null) { Error.printSystemOut(org.hsqldb.lib.StringUtil.getList(roots, ",", "")); } } complete = true; } catch (IOException e) { throw Error.error(ErrorCode.FILE_IO_ERROR, dataFileName + ".new"); } catch (OutOfMemoryError e) { throw Error.error(ErrorCode.OUT_OF_MEMORY); } finally { if (fileStreamOut != null) { fileStreamOut.close(); } if (dest != null) { dest.close(); } if (!complete) { database.logger.getFileAccess().removeElement(dataFileName + ".new"); } } // Error.printSystemOut("Transfer complete: ", stopw.elapsedTime()); }
int[] writeTableToDataFile(Table table) throws IOException { Session session = database.getSessionManager().getSysSession(); PersistentStore store = session.sessionData.getRowStore(table); RowOutputInterface rowOut = new RowOutputBinary(1024, scale); DoubleIntIndex pointerLookup = new DoubleIntIndex(table.getPrimaryIndex().sizeEstimate(store), false); int[] rootsArray = table.getIndexRootsArray(); long pos = fileOffset; int count = 0; pointerLookup.setKeysSearchTarget(); Error.printSystemOut("lookup begins: " + stopw.elapsedTime()); RowIterator it = table.rowIterator(session); for (; it.hasNext(); count++) { CachedObject row = it.getNextRow(); pointerLookup.addUnsorted(row.getPos(), (int) (pos / scale)); if (count % 50000 == 0) { Error.printSystemOut("pointer pair for row " + count + " " + row.getPos() + " " + pos); } pos += row.getStorageSize(); } Error.printSystemOut(table.getName().name + " list done: " + stopw.elapsedTime()); count = 0; it = table.rowIterator(session); for (; it.hasNext(); count++) { CachedObject row = it.getNextRow(); rowOut.reset(); row.write(rowOut, pointerLookup); fileStreamOut.write(rowOut.getOutputStream().getBuffer(), 0, rowOut.size()); fileOffset += row.getStorageSize(); if ((count) % 50000 == 0) { Error.printSystemOut(count + " rows " + stopw.elapsedTime()); } } for (int i = 0; i < rootsArray.length; i++) { if (rootsArray[i] == -1) { continue; } int lookupIndex = pointerLookup.findFirstEqualKeyIndex(rootsArray[i]); if (lookupIndex == -1) { throw Error.error(ErrorCode.DATA_FILE_ERROR); } rootsArray[i] = pointerLookup.getValue(lookupIndex); } setTransactionRowLookups(pointerLookup); Error.printSystemOut(table.getName().name + " : table converted"); return rootsArray; }