/** * This method deletes a data file or resets its free position. this is used only for nio files - * not OOo files */ public static void deleteOrResetFreePos(Database database, String filename) { ScaledRAFile raFile = null; try { database.getFileAccess().removeElement(filename); } catch (IOException e) { database.logger.appLog.logContext(e); } if (database.isStoredFileAccess()) { return; } if (!database.getFileAccess().isStreamElement(filename)) { return; } try { raFile = new ScaledRAFile(filename, false); raFile.seek(LONG_FREE_POS_POS); raFile.writeLong(INITIAL_FREE_POS); } catch (IOException e) { database.logger.appLog.logContext(e); } finally { if (raFile != null) { try { raFile.close(); } catch (IOException e) { database.logger.appLog.logContext(e); } } } }
/** Opens a data source file. */ void open(boolean readonly) throws HsqlException { try { rFile = ScaledRAFile.newScaledRAFile(sName, readonly, 1, ScaledRAFile.DATA_FILE_RAF); iFreePos = (int) rFile.length(); if ((iFreePos == 0) && ignoreFirst) { byte[] buf = null; try { buf = ignoredFirst.getBytes(stringEncoding); } catch (UnsupportedEncodingException e) { buf = ignoredFirst.getBytes(); } rFile.write(buf, 0, buf.length); iFreePos = ignoredFirst.length(); } } catch (Exception e) { throw Trace.error( Trace.FILE_IO_ERROR, Trace.TextCache_openning_file_error, new Object[] {sName, e}); } readOnly = readonly; }
public static int getFlags(String filename) throws HsqlException { try { ScaledRAFile raFile = (ScaledRAFile) ScaledRAFile.newScaledRAFile(filename, true, ScaledRAFile.DATA_FILE_RAF, null, null); raFile.seek(FLAGS_POS); int flags = raFile.readInt(); raFile.close(); return flags; } catch (IOException e) { throw Trace.error(Trace.DATA_FILE_ERROR); } }
HsqlArrayList defrag(Database db, ScaledRAFile sourcenotused, String filename) throws IOException, HsqlException { Trace.printSystemOut("Defrag Transfer begins"); HsqlArrayList rootsList = new HsqlArrayList(); HsqlArrayList tTable = db.getTables(); // erik to specify scale; ScaledRAFile dest = ScaledRAFile.newScaledRAFile(filename + ".new", false, 1, ScaledRAFile.DATA_FILE_RAF); // erik desl.seek(Cache.INITIAL_FREE_POS / cacheFileScale); dest.seek(Cache.INITIAL_FREE_POS); for (int i = 0, tSize = tTable.size(); i < tSize; i++) { Table t = (Table) tTable.get(i); if (t.tableType == Table.CACHED_TABLE) { int[] rootsArray = writeTableToDataFile(t, dest); rootsList.add(rootsArray); } else { rootsList.add(null); } Trace.printSystemOut(t.getName().name, " complete"); } // erik no change int pos = (int) dest.getFilePointer(); // erik desl.seek(Cache.FREE_POS_POS / cacheFileScale); dest.seek(Cache.FREE_POS_POS); dest.writeInt(pos); dest.close(); for (int i = 0, size = rootsList.size(); i < size; i++) { int[] roots = (int[]) rootsList.get(i); if (roots != null) { Trace.printSystemOut(org.hsqldb.lib.StringUtil.getList(roots, ",", "")); } } Trace.printSystemOut("Transfer complete: ", stopw.elapsedTime()); return rootsList; }
/** @todo fredt - use an upward estimate of number of rows based on Index */ int[] writeTableToDataFile(Table table, ScaledRAFile destFile) throws IOException, HsqlException { BinaryServerRowOutput rowOut = new BinaryServerRowOutput(); // rowOut.setSystemId(true); DoubleIntTable pointerLookup = new DoubleIntTable(1000000); int[] rootsArray = table.getIndexRootsArray(); Index index = table.getPrimaryIndex(); // erik long pos = destFile.getFilePointer() / cacheFileScale; long pos = destFile.getFilePointer(); int[] pointerPair = new int[2]; int count = 0; Trace.printSystemOut("lookup begins: " + stopw.elapsedTime()); for (Node n = index.first(); n != null; count++) { CachedRow row = (CachedRow) n.getRow(); pointerLookup.add(row.iPos, (int) pos); // erik pos += row.storageSize / cacheFileScale; pos += row.storageSize; if (count % 50000 == 0) { // System.gc(); Trace.printSystemOut( "pointer pair for row " + count + " " + pointerPair[0] + " " + pointerPair[1]); } n = index.next(n); } Trace.printSystemOut(table.getName().name + " list done ", stopw.elapsedTime()); count = 0; for (Node n = index.first(); n != null; count++) { CachedRow row = (CachedRow) n.getRow(); // erik int rowPointer = (int) destFile.getFilePointer() / cacheFileScale; int rowPointer = (int) destFile.getFilePointer(); rowOut.reset(); // code should be moved to CachedRow.java rowOut.writeSize(row.storageSize); Node rownode = row.nPrimaryNode; while (rownode != null) { ((DiskNode) rownode).writeTranslate(rowOut, pointerLookup); rownode = rownode.nNext; } rowOut.writeData(row.getData(), row.getTable()); rowOut.writePos(rowPointer); // end destFile.write(rowOut.getOutputStream().getBuffer(), 0, rowOut.size()); if ((count) % 50000 == 0) { Trace.printSystemOut(count + " rows " + stopw.elapsedTime()); } n = index.next(n); } for (int i = 0; i < rootsArray.length; i++) { int lookupIndex = pointerLookup.find(0, rootsArray[i]); if (lookupIndex == -1) { throw Trace.error(Trace.DataFileDefrag_writeTableToDataFile); } rootsArray[i] = pointerLookup.get(lookupIndex, 1); } Trace.printSystemOut(table.getName().name, " : table converted"); return rootsArray; }
/** * Opens the *.data file for this cache, setting the variables that allow access to the particular * database version of the *.data file. */ public void open(boolean readonly) throws HsqlException { fileFreePosition = 0; try { boolean preexists = database.isFilesInJar(); long freesize = 0; if (!preexists && fa.isStreamElement(fileName)) { if (database.isStoredFileAccess()) { preexists = true; } else { // discard "empty" databases File f = new File(fileName); preexists = f.length() > INITIAL_FREE_POS; } } if (preexists) { String version = database.getProperties().getProperty(HsqlDatabaseProperties.hsqldb_cache_version); boolean v17 = HsqlDatabaseProperties.VERSION_STRING_1_7_0.equals(version); // for later versions boolean v18 = HsqlDatabaseProperties.VERSION_STRING_1_8_0.equals(version); if (!v17) { throw Trace.error(Trace.WRONG_DATABASE_FILE_VERSION); } } boolean isNio = database.getProperties().isPropertyTrue("hsqldb.nio_data_file"); int fileType = isNio ? ScaledRAFile.DATA_FILE_NIO : ScaledRAFile.DATA_FILE_RAF; if (database.isFilesInJar()) { fileType = ScaledRAFile.DATA_FILE_JAR; } // [email protected] - change to file access api String cname = database.getURLProperties().getProperty("storage_class_name"); String skey = database.getURLProperties().getProperty("storage_key"); dataFile = ScaledRAFile.newScaledRAFile(fileName, readonly, fileType, cname, skey); if (preexists) { dataFile.seek(FLAGS_POS); int flags = dataFile.readInt(); hasRowInfo = BitMap.isSet(flags, FLAG_ROWINFO); dataFile.seek(LONG_EMPTY_SIZE); freesize = dataFile.readLong(); dataFile.seek(LONG_FREE_POS_POS); fileFreePosition = dataFile.readLong(); if (fileFreePosition < INITIAL_FREE_POS) { fileFreePosition = INITIAL_FREE_POS; } } else { fileFreePosition = INITIAL_FREE_POS; } resetBuffers(); fileModified = false; freeBlocks = new DataFileBlockManager(FREE_BLOCKS_COUNT, cacheFileScale, freesize); } catch (Exception e) { database.logger.appLog.logContext(e); close(false); throw Trace.error(Trace.FILE_IO_ERROR, Trace.DataFileCache_open, new Object[] {e, fileName}); } }
void process() throws IOException { boolean complete = false; Error.printSystemOut("Defrag Transfer begins"); transactionRowLookup = database.txManager.getTransactionIDList(); HsqlArrayList allTables = database.schemaManager.getAllTables(); rootsList = new int[allTables.size()][]; Storage dest = null; try { OutputStream fos = database.logger.getFileAccess().openOutputStreamElement(dataFileName + ".new"); fileStreamOut = new BufferedOutputStream(fos, 1 << 12); for (int i = 0; i < DataFileCache.INITIAL_FREE_POS; i++) { fileStreamOut.write(0); } fileOffset = DataFileCache.INITIAL_FREE_POS; for (int i = 0, tSize = allTables.size(); i < tSize; i++) { Table t = (Table) allTables.get(i); if (t.getTableType() == TableBase.CACHED_TABLE) { int[] rootsArray = writeTableToDataFile(t); rootsList[i] = rootsArray; } else { rootsList[i] = null; } Error.printSystemOut(t.getName().name + " complete"); } writeTransactionRows(); fileStreamOut.flush(); fileStreamOut.close(); fileStreamOut = null; // write out the end of file position dest = ScaledRAFile.newScaledRAFile( database, dataFileName + ".new", false, ScaledRAFile.DATA_FILE_RAF, database .getURLProperties() .getProperty(HsqlDatabaseProperties.url_storage_class_name), database.getURLProperties().getProperty(HsqlDatabaseProperties.url_storage_key)); dest.seek(DataFileCache.LONG_FREE_POS_POS); dest.writeLong(fileOffset); // set shadowed flag; int flags = 0; if (database.logger.propIncrementBackup) { flags = BitMap.set(flags, DataFileCache.FLAG_ISSHADOWED); } flags = BitMap.set(flags, DataFileCache.FLAG_190); flags = BitMap.set(flags, DataFileCache.FLAG_ISSAVED); dest.seek(DataFileCache.FLAGS_POS); dest.writeInt(flags); dest.close(); dest = null; for (int i = 0, size = rootsList.length; i < size; i++) { int[] roots = rootsList[i]; if (roots != null) { Error.printSystemOut(org.hsqldb.lib.StringUtil.getList(roots, ",", "")); } } complete = true; } catch (IOException e) { throw Error.error(ErrorCode.FILE_IO_ERROR, dataFileName + ".new"); } catch (OutOfMemoryError e) { throw Error.error(ErrorCode.OUT_OF_MEMORY); } finally { if (fileStreamOut != null) { fileStreamOut.close(); } if (dest != null) { dest.close(); } if (!complete) { database.logger.getFileAccess().removeElement(dataFileName + ".new"); } } // Error.printSystemOut("Transfer complete: ", stopw.elapsedTime()); }