/** * exclusive latch on page is being released. * * <p>The only work done in CachedPage is to update the row count on the container if it is too * out of sync. */ protected void releaseExclusive() { // look at dirty bit without latching, the updating of the row // count is just an optimization so does not need the latch. // // if this page actually has > 1/8 rows of the entire container, then // consider updating the row count if it is different. // // No need to special case allocation pages because it has recordCount // of zero, thus the if clause will never be true for an allocation // page. if (isDirty && !isOverflowPage() && (containerRowCount / 8) < recordCount()) { int currentRowCount = internalNonDeletedRecordCount(); int delta = currentRowCount - initialRowCount; int posDelta = delta > 0 ? delta : (-delta); if ((containerRowCount / 8) < posDelta) { // This pages delta row count represents a significant change // with respect to current container row count so update // container row count FileContainer myContainer = null; try { myContainer = (FileContainer) containerCache.find(identity.getContainerId()); if (myContainer != null) { myContainer.updateEstimatedRowCount(delta); setContainerRowCount(myContainer.getEstimatedRowCount(0)); initialRowCount = currentRowCount; // since I have the container, might as well update the // unfilled information myContainer.trackUnfilledPage(identity.getPageNumber(), unfilled()); } } catch (StandardException se) { // do nothing, not sure what could fail but this update // is just an optimization so no need to throw error. } finally { if (myContainer != null) containerCache.release(myContainer); } } } super.releaseExclusive(); }
/** * Find the container and then read the page from that container. * * <p>This is the way new pages enter the page cache. * * <p> * * @return always true, higher levels have already checked the page number is valid for an open. * @exception StandardException Standard Derby policy. * @see Cacheable#setIdentity */ public Cacheable setIdentity(Object key) throws StandardException { if (SanityManager.DEBUG) { SanityManager.ASSERT(key instanceof PageKey); } initialize(); PageKey newIdentity = (PageKey) key; FileContainer myContainer = (FileContainer) containerCache.find(newIdentity.getContainerId()); setContainerRowCount(myContainer.getEstimatedRowCount(0)); try { if (!alreadyReadPage) { // Fill in the pageData array by reading bytes from disk. readPage(myContainer, newIdentity); } else { // pageData array already filled alreadyReadPage = false; } // if the formatID on disk is not the same as this page instance's // format id, instantiate the real page object int fmtId = getTypeFormatId(); int onPageFormatId = FormatIdUtil.readFormatIdInteger(pageData); if (fmtId != onPageFormatId) { return changeInstanceTo(onPageFormatId, newIdentity).setIdentity(key); } // this is the correct page instance initFromData(myContainer, newIdentity); } finally { containerCache.release(myContainer); myContainer = null; } fillInIdentity(newIdentity); initialRowCount = 0; return this; }
/** * write the page from this CachedPage object to disk. * * <p> * * @param identity indentity (ie. page number) of the page to read * @param syncMe does the write of this single page have to be sync'd? * @exception StandardException Standard exception policy. */ private void writePage(PageKey identity, boolean syncMe) throws StandardException { // make subclass write the page format writeFormatId(identity); // let subclass have a chance to write any cached data to page data // array writePage(identity); // force WAL - and check to see if database is corrupt or is frozen. // last log Instant may be null if the page is being forced // to disk on a createPage (which violates the WAL protocol actually). // See FileContainer.newPage LogInstant flushLogTo = getLastLogInstant(); dataFactory.flush(flushLogTo); if (flushLogTo != null) { clearLastLogInstant(); } // find the container and file access object FileContainer myContainer = (FileContainer) containerCache.find(identity.getContainerId()); if (myContainer == null) { StandardException nested = StandardException.newException( SQLState.DATA_CONTAINER_VANISHED, identity.getContainerId()); throw dataFactory.markCorrupt( StandardException.newException(SQLState.FILE_WRITE_PAGE_EXCEPTION, nested, identity)); } try { myContainer.writePage(identity.getPageNumber(), pageData, syncMe); // // Do some in memory unlogged bookkeeping tasks while we have // the container. // if (!isOverflowPage() && isDirty()) { // let the container knows whether this page is a not // filled, non-overflow page myContainer.trackUnfilledPage(identity.getPageNumber(), unfilled()); // if this is not an overflow page, see if the page's row // count has changed since it come into the cache. // // if the page is not invalid, row count is 0. Otherwise, // count non-deleted records on page. // // Cannot call nonDeletedRecordCount because the page is // unlatched now even though nobody is changing it int currentRowCount = internalNonDeletedRecordCount(); if (currentRowCount != initialRowCount) { myContainer.updateEstimatedRowCount(currentRowCount - initialRowCount); setContainerRowCount(myContainer.getEstimatedRowCount(0)); initialRowCount = currentRowCount; } } } catch (IOException ioe) { // page cannot be written throw StandardException.newException(SQLState.FILE_WRITE_PAGE_EXCEPTION, ioe, identity); } finally { containerCache.release(myContainer); myContainer = null; } synchronized (this) { // change page state to not dirty after the successful write isDirty = false; preDirty = false; } }