/** * Used when a row is deleted as a result of some DML or DDL command. Adds the file space for the * row to the list of free positions. If there exists more than MAX_FREE_COUNT free positions, * then they are probably all too small, so we start a new list. * * <p>todo: This is wrong when deleting lots of records * * <p>Then remove the row from the cache data structures. */ public void remove(int i, PersistentStore store) throws HsqlException { CachedObject r = release(i); int size = r == null ? getStorageSize(i) : r.getStorageSize(); freeBlocks.add(i, size); }
/** * Reduces the number of rows held in this Cache object. * * <p>Cleanup is done by checking the accessCount of the Rows and removing the rows with the * lowest access count. * * <p>Index operations require that up to 5 recently accessed rows remain in the cache. */ private synchronized void cleanUp() throws IOException { int removeCount = cacheMap.size() / 2; int accessTarget = cacheMap.getAccessCountCeiling(removeCount, removeCount / 8); ObjectCacheHashMap.ObjectCacheIterator it = cacheMap.iterator(); int savecount = 0; for (; it.hasNext(); ) { CachedObject r = (CachedObject) it.next(); if (it.getAccessCount() <= accessTarget) { if (!r.isKeepInMemory()) { if (r.hasChanged()) { rowTable[savecount++] = r; } it.remove(); cacheBytesLength -= r.getStorageSize(); } } } cacheMap.setAccessCountFloor(accessTarget); saveRows(savecount); }
public synchronized int getStorageSize(int i) throws IOException { CachedObject value = cache.get(i); if (value != null) { return value.getStorageSize(); } return readSize(i); }
/** Removes an object from memory cache. Does not release the file storage. */ public synchronized CachedObject release(int i) { CachedObject r = (CachedObject) cacheMap.remove(i); if (r == null) { return null; } cacheBytesLength -= r.getStorageSize(); return r; }
/** Adds a row to the cache. */ public synchronized void put(int key, CachedObject row) throws IOException { int storageSize = row.getStorageSize(); if (cacheMap.size() >= capacity || storageSize + cacheBytesLength > bytesCapacity) { cleanUp(); } cacheMap.put(key, row); cacheBytesLength += storageSize; }
public synchronized int getStorageSize(int i) throws HsqlException { try { CachedObject value = cache.get(i); if (value != null) { return value.getStorageSize(); } return readSize(i); } catch (IOException e) { database.logger.appLog.logContext(e); throw Trace.error( Trace.DATA_FILE_ERROR, Trace.DataFileCache_makeRow, new Object[] {e, fileName}); } }
/** * Allocates file space for the row. * * <p>A Row is added by walking the list of CacheFree objects to see if there is available space * to store it, reusing space if it exists. Otherwise the file is grown to accommodate it. */ private int setFilePos(CachedObject r) throws IOException { int rowSize = r.getStorageSize(); int i = freeBlocks == null ? -1 : freeBlocks.get(rowSize); if (i == -1) { i = (int) (fileFreePosition / cacheFileScale); long newFreePosition = fileFreePosition + rowSize; if (newFreePosition > maxDataFileSize) { throw new IOException(Trace.getMessage(Trace.DATA_FILE_IS_FULL)); } fileFreePosition = newFreePosition; } r.setPos(i); return i; }
int[] writeTableToDataFile(Table table) throws IOException { Session session = database.getSessionManager().getSysSession(); PersistentStore store = session.sessionData.getRowStore(table); RowOutputInterface rowOut = new RowOutputBinary(1024, scale); DoubleIntIndex pointerLookup = new DoubleIntIndex(table.getPrimaryIndex().sizeEstimate(store), false); int[] rootsArray = table.getIndexRootsArray(); long pos = fileOffset; int count = 0; pointerLookup.setKeysSearchTarget(); Error.printSystemOut("lookup begins: " + stopw.elapsedTime()); RowIterator it = table.rowIterator(session); for (; it.hasNext(); count++) { CachedObject row = it.getNextRow(); pointerLookup.addUnsorted(row.getPos(), (int) (pos / scale)); if (count % 50000 == 0) { Error.printSystemOut("pointer pair for row " + count + " " + row.getPos() + " " + pos); } pos += row.getStorageSize(); } Error.printSystemOut(table.getName().name + " list done: " + stopw.elapsedTime()); count = 0; it = table.rowIterator(session); for (; it.hasNext(); count++) { CachedObject row = it.getNextRow(); rowOut.reset(); row.write(rowOut, pointerLookup); fileStreamOut.write(rowOut.getOutputStream().getBuffer(), 0, rowOut.size()); fileOffset += row.getStorageSize(); if ((count) % 50000 == 0) { Error.printSystemOut(count + " rows " + stopw.elapsedTime()); } } for (int i = 0; i < rootsArray.length; i++) { if (rootsArray[i] == -1) { continue; } int lookupIndex = pointerLookup.findFirstEqualKeyIndex(rootsArray[i]); if (lookupIndex == -1) { throw Error.error(ErrorCode.DATA_FILE_ERROR); } rootsArray[i] = pointerLookup.getValue(lookupIndex); } setTransactionRowLookups(pointerLookup); Error.printSystemOut(table.getName().name + " : table converted"); return rootsArray; }