@Override public Value[] next() { if (resultCursor == null) { Index idx; if (distinct || sort != null) { idx = index; } else { idx = table.getScanIndex(session); } if (session.getDatabase().getMvStore() != null) { // sometimes the transaction is already committed, // in which case we can't use the session if (idx.getRowCount(session) == 0 && rowCount > 0) { // this means querying is not transactional resultCursor = idx.find((Session) null, null, null); } else { // the transaction is still open resultCursor = idx.find(session, null, null); } } else { resultCursor = idx.find(session, null, null); } } if (!resultCursor.next()) { return null; } Row row = resultCursor.get(); return row.getValueList(); }
/** * Get the row at the given index. * * @param at the index * @return the row */ Row getRowAt(int at) { Row r = rows[at]; if (r == null) { if (firstOverflowPageId == 0) { r = readRow(data, offsets[at], columnCount); } else { if (rowRef != null) { r = rowRef.get(); if (r != null) { return r; } } PageStore store = index.getPageStore(); Data buff = store.createData(); int pageSize = store.getPageSize(); int offset = offsets[at]; buff.write(data.getBytes(), offset, pageSize - offset); int next = firstOverflowPageId; do { PageDataOverflow page = index.getPageOverflow(next); next = page.readInto(buff); } while (next != 0); overflowRowSize = pageSize + buff.length(); r = readRow(buff, 0, columnCount); } r.setKey(keys[at]); if (firstOverflowPageId != 0) { rowRef = new SoftReference<Row>(r); } else { rows[at] = r; memoryChange(true, r); } } return r; }
private void writeData() { if (written) { return; } if (!optimizeUpdate) { readAllRows(); } writeHead(); if (firstOverflowPageId != 0) { data.writeInt(firstOverflowPageId); data.checkCapacity(overflowRowSize); } for (int i = 0; i < entryCount; i++) { data.writeVarLong(keys[i]); data.writeShortInt(offsets[i]); } if (!writtenData || !optimizeUpdate) { for (int i = 0; i < entryCount; i++) { data.setPos(offsets[i]); Row r = getRowAt(i); for (int j = 0; j < columnCount; j++) { data.writeValue(r.getValue(j)); } } writtenData = true; } written = true; }
private int insertRows() { session.getUser().checkRight(table, Right.INSERT); setCurrentRowNumber(0); table.fire(session, Trigger.INSERT, true); rowNumber = 0; int listSize = list.size(); if (listSize > 0) { int columnLen = columns.length; for (int x = 0; x < listSize; x++) { Row newRow = table.getTemplateRow(); // newRow的长度是全表字段的个数是,会>=columns的长度 Expression[] expr = list.get(x); setCurrentRowNumber(x + 1); for (int i = 0; i < columnLen; i++) { Column c = columns[i]; int index = c.getColumnId(); // 从0开始 Expression e = expr[i]; if (e != null) { // e can be null (DEFAULT) e = e.optimize(session); try { Value v = c.convert(e.getValue(session)); newRow.setValue(index, v); } catch (DbException ex) { throw setRow(ex, x, getSQL(expr)); } } } rowNumber++; table.validateConvertUpdateSequence(session, newRow); boolean done = table.fireBeforeRow(session, null, newRow); // INSTEAD OF触发器会返回true if (!done) { // 直到事务commit或rollback时才解琐,见org.h2.engine.Session.unlockAll() table.lock(session, true, false); table.addRow(session, newRow); // 在org.h2.index.PageDataIndex.addTry(Session, Row)中事先记了一次PageLog // 也就是org.h2.store.PageStore.logAddOrRemoveRow(Session, int, Row, boolean) // 这里又记了一次UndoLog // UndoLog在org.h2.engine.Session.commit(boolean)时就清除了 session.log(table, UndoLogRecord.INSERT, newRow); table.fireAfterRow(session, null, newRow, false); } } } else { table.lock(session, true, false); // 这种方式主要是避免循环两次,因为query内部己循环一次了,得到记录后像else中的非insertFromSelect一样,还要循环一次 if (insertFromSelect) { query.query(0, this); // 每遍历一行会回调下面的addRow方法 } else { ResultInterface rows = query.query(0); while (rows.next()) { Value[] r = rows.currentRow(); addRow(r); } rows.close(); } } table.fire(session, Trigger.INSERT, false); return rowNumber; }
@Override public Row getRow(Session session, long key) { TransactionMap<Value, Value> map = getMap(session); Value v = map.get(ValueLong.get(key)); ValueArray array = (ValueArray) v; Row row = new Row(array.getList(), 0); row.setKey(key); return row; }
/** * Commit the current transaction. If the statement was not a data definition statement, and if * there are temporary tables that should be dropped or truncated at commit, this is done as well. * * @param ddl if the statement was a data definition statement */ public void commit(boolean ddl) { checkCommitRollback(); currentTransactionName = null; transactionStart = 0; if (transaction != null) { // increment the data mod count, so that other sessions // see the changes // TODO should not rely on locking if (locks.size() > 0) { for (int i = 0, size = locks.size(); i < size; i++) { Table t = locks.get(i); if (t instanceof MVTable) { ((MVTable) t).commit(); } } } transaction.commit(); transaction = null; } if (containsUncommitted()) { // need to commit even if rollback is not possible // (create/drop table and so on) database.commit(this); } removeTemporaryLobs(true); if (undoLog.size() > 0) { // commit the rows when using MVCC if (database.isMultiVersion()) { ArrayList<Row> rows = New.arrayList(); synchronized (database) { while (undoLog.size() > 0) { UndoLogRecord entry = undoLog.getLast(); entry.commit(); rows.add(entry.getRow()); undoLog.removeLast(false); } for (int i = 0, size = rows.size(); i < size; i++) { Row r = rows.get(i); r.commit(); } } } undoLog.clear(); } if (!ddl) { // do not clean the temp tables if the last command was a // create/drop cleanTempTables(false); if (autoCommitAtTransactionEnd) { autoCommit = true; autoCommitAtTransactionEnd = false; } } endTransaction(); }
/** * Read a row from an input stream. * * @param in the input stream * @param data a temporary buffer * @return the row */ public static Row readRow(DataReader in, Data data) throws IOException { long key = in.readVarLong(); int len = in.readVarInt(); data.reset(); data.checkCapacity(len); in.readFully(data.getBytes(), len); int columnCount = data.readVarInt(); Value[] values = new Value[columnCount]; for (int i = 0; i < columnCount; i++) { values[i] = data.readValue(); } Row row = new Row(values, Row.MEMORY_CALCULATE); row.setKey(key); return row; }
/** * Add an undo log entry to this session. * * @param table the table * @param operation the operation type (see {@link UndoLogRecord}) * @param row the row */ public void log(Table table, short operation, Row row) { if (table.isMVStore()) { return; } if (undoLogEnabled) { UndoLogRecord log = new UndoLogRecord(table, operation, row); // called _after_ the row was inserted successfully into the table, // otherwise rollback will try to rollback a not-inserted row if (SysProperties.CHECK) { int lockMode = database.getLockMode(); if (lockMode != Constants.LOCK_MODE_OFF && !database.isMultiVersion()) { String tableType = log.getTable().getTableType(); if (locks.indexOf(log.getTable()) < 0 && !Table.TABLE_LINK.equals(tableType) && !Table.EXTERNAL_TABLE_ENGINE.equals(tableType)) { DbException.throwInternalError(); } } } undoLog.add(log); } else { if (database.isMultiVersion()) { // see also UndoLogRecord.commit ArrayList<Index> indexes = table.getIndexes(); for (int i = 0, size = indexes.size(); i < size; i++) { Index index = indexes.get(i); index.commit(operation, row); } row.commit(); } } }
@Override public void add(Session session, Row row) { if (closed) { throw DbException.throwInternalError(); } TreeNode i = new TreeNode(row); TreeNode n = root, x = n; boolean isLeft = true; while (true) { if (n == null) { if (x == null) { root = i; rowCount++; return; } set(x, isLeft, i); break; } Row r = n.row; int compare = compareRows(row, r); if (compare == 0) { if (indexType.isUnique()) { if (!containsNullAndAllowMultipleNull(row)) { throw getDuplicateKeyException(row.toString()); } } compare = compareKeys(row, r); } isLeft = compare < 0; x = n; n = child(x, isLeft); } balance(x, isLeft); rowCount++; }
private int getRowLength(Row row) { int size = 0; for (int i = 0; i < columnCount; i++) { size += data.getValueLen(row.getValue(i)); } return size; }
/** * Add a row to the list. * * @param r the row to add */ public void add(Row r) { list.add(r); memory += r.getMemory() + Constants.MEMORY_POINTER; if (maxMemory > 0 && memory > maxMemory) { writeAllRows(); } size++; }
/** * A record is added to a table, or removed from a table. * * @param session the session * @param tableId the table id * @param row the row to add * @param add true if the row is added, false if it is removed */ void logAddOrRemoveRow(Session session, int tableId, Row row, boolean add) { if (trace.isDebugEnabled()) { trace.debug( "log " + (add ? "+" : "-") + " s: " + session.getId() + " table: " + tableId + " row: " + row); } session.addLogPos(logSectionId, logPos); logPos++; Data data = dataBuffer; data.reset(); int columns = row.getColumnCount(); data.writeVarInt(columns); data.checkCapacity(row.getByteCount(data)); if (session.isRedoLogBinaryEnabled()) { for (int i = 0; i < columns; i++) { data.writeValue(row.getValue(i)); } } else { for (int i = 0; i < columns; i++) { Value v = row.getValue(i); if (v.getType() == Value.BYTES) { data.writeValue(ValueNull.INSTANCE); } else { data.writeValue(v); } } } Data buffer = getBuffer(); buffer.writeByte((byte) (add ? ADD : REMOVE)); buffer.writeVarInt(session.getId()); buffer.writeVarInt(tableId); buffer.writeVarLong(row.getKey()); if (add) { buffer.writeVarInt(data.length()); buffer.checkCapacity(data.length()); buffer.write(data.getBytes(), 0, data.length()); } write(buffer); }
/** * Partially roll back the current transaction. * * @param savepoint the savepoint to which should be rolled back * @param trimToSize if the list should be trimmed */ public void rollbackTo(Savepoint savepoint, boolean trimToSize) { int index = savepoint == null ? 0 : savepoint.logIndex; while (undoLog.size() > index) { UndoLogRecord entry = undoLog.getLast(); entry.undo(this); undoLog.removeLast(trimToSize); } if (transaction != null) { long savepointId = savepoint == null ? 0 : savepoint.transactionSavepoint; HashMap<String, MVTable> tableMap = database.getMvStore().getTables(); Iterator<Change> it = transaction.getChanges(savepointId); while (it.hasNext()) { Change c = it.next(); MVTable t = tableMap.get(c.mapName); if (t != null) { long key = ((ValueLong) c.key).getLong(); ValueArray value = (ValueArray) c.value; short op; Row row; if (value == null) { op = UndoLogRecord.INSERT; row = t.getRow(this, key); } else { op = UndoLogRecord.DELETE; row = new Row(value.getList(), Row.MEMORY_CALCULATE); } row.setKey(key); UndoLogRecord log = new UndoLogRecord(t, op, row); log.undo(this); } } } if (savepoints != null) { String[] names = new String[savepoints.size()]; savepoints.keySet().toArray(names); for (String name : names) { Savepoint sp = savepoints.get(name); int savepointIndex = sp.logIndex; if (savepointIndex > index) { savepoints.remove(name); } } } }
@Override public void add(Session session, Row row) { if (mainIndexColumn == -1) { if (row.getKey() == 0) { row.setKey(++lastKey); } } else { long c = row.getValue(mainIndexColumn).getLong(); row.setKey(c); } if (mvTable.getContainsLargeObject()) { for (int i = 0, len = row.getColumnCount(); i < len; i++) { Value v = row.getValue(i); Value v2 = v.link(database, getId()); if (v2.isLinked()) { session.unlinkAtCommitStop(v2); } if (v != v2) { row.setValue(i, v2); } } } TransactionMap<Value, Value> map = getMap(session); Value key = ValueLong.get(row.getKey()); Value old = map.getLatest(key); if (old != null) { String sql = "PRIMARY KEY ON " + table.getSQL(); if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) { sql += "(" + indexColumns[mainIndexColumn].getSQL() + ")"; } DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, sql); e.setSource(this); throw e; } try { map.put(key, ValueArray.get(row.getValueList())); } catch (IllegalStateException e) { throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName()); } lastKey = Math.max(lastKey, row.getKey()); }
public void add(Session session, Row row) throws SQLException { Value key = getKey(row); IntArray positions = rows.get(key); if (positions == null) { positions = new IntArray(1); rows.put(key, positions); } positions.add((int) row.getKey()); rowCount++; }
@Override public Row get() { if (row == null) { if (current != null) { ValueArray array = (ValueArray) current.getValue(); row = session.createRow(array.getList(), 0); row.setKey(current.getKey().getLong()); } } return row; }
@Override public void remove(Session session, Row row) { if (mvTable.getContainsLargeObject()) { for (int i = 0, len = row.getColumnCount(); i < len; i++) { Value v = row.getValue(i); if (v.isLinked()) { session.unlinkAtCommit(v); } } } TransactionMap<Value, Value> map = getMap(session); try { Value old = map.remove(ValueLong.get(row.getKey())); if (old == null) { throw DbException.get( ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1, getSQL() + ": " + row.getKey()); } } catch (IllegalStateException e) { throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName()); } }
@Override public void addRow(Value[] values) { Row newRow = table.getTemplateRow(); setCurrentRowNumber(++rowNumber); for (int j = 0, len = columns.length; j < len; j++) { Column c = columns[j]; int index = c.getColumnId(); try { Value v = c.convert(values[j]); newRow.setValue(index, v); } catch (DbException ex) { throw setRow(ex, rowNumber, getSQL(values)); } } table.validateConvertUpdateSequence(session, newRow); boolean done = table.fireBeforeRow(session, null, newRow); if (!done) { table.addRow(session, newRow); session.log(table, UndoLogRecord.INSERT, newRow); table.fireAfterRow(session, null, newRow, false); } }
private Row readRow(Data buff) { if (buff.readByte() == 0) { return null; } int mem = buff.readInt(); int columnCount = buff.readInt(); long key = buff.readLong(); int version = buff.readInt(); if (readUncached) { key = 0; } boolean deleted = buff.readInt() == 1; int sessionId = buff.readInt(); Value[] values = new Value[columnCount]; for (int i = 0; i < columnCount; i++) { Value v; if (buff.readByte() == 0) { v = null; } else { v = buff.readValue(); if (v.isLinked()) { // the table id is 0 if it was linked when writing // a temporary entry if (v.getTableId() == 0) { session.unlinkAtCommit(v); } } } values[i] = v; } Row row = new Row(values, mem); row.setKey(key); row.setVersion(version); row.setDeleted(deleted); row.setSessionId(sessionId); return row; }
private Cursor find(Row row) { if (index == null) { // for the case "in(select ...)", the query might // use an optimization and not create the index // up front createIndex(); } Cursor cursor = index.find(session, row, row); while (cursor.next()) { SearchRow found = cursor.getSearchRow(); boolean ok = true; Database db = session.getDatabase(); for (int i = 0; i < row.getColumnCount(); i++) { if (!db.areEqual(row.getValue(i), found.getValue(i))) { ok = false; break; } } if (ok) { return cursor; } } return null; }
public void remove(Session session, Row row) throws SQLException { if (rowCount == 1) { // last row in table reset(); } else { Value key = getKey(row); IntArray positions = rows.get(key); if (positions.size() == 1) { // last row with such key rows.remove(key); } else { positions.removeValue((int) row.getKey()); } rowCount--; } }
private void writeRow(Data buff, Row r) { buff.checkCapacity(1 + Data.LENGTH_INT * 8); buff.writeByte((byte) 1); buff.writeInt(r.getMemory()); int columnCount = r.getColumnCount(); buff.writeInt(columnCount); buff.writeLong(r.getKey()); buff.writeInt(r.getVersion()); buff.writeInt(r.isDeleted() ? 1 : 0); buff.writeInt(r.getSessionId()); for (int i = 0; i < columnCount; i++) { Value v = r.getValue(i); buff.checkCapacity(1); if (v == null) { buff.writeByte((byte) 0); } else { buff.writeByte((byte) 1); if (v.getType() == Value.CLOB || v.getType() == Value.BLOB) { // need to keep a reference to temporary lobs, // otherwise the temp file is deleted if (v.getSmall() == null && v.getTableId() == 0) { if (lobs == null) { lobs = New.arrayList(); } // need to create a copy, otherwise, // if stored multiple times, it may be renamed // and then not found v = v.copyToTemp(); lobs.add(v); } } buff.checkCapacity(buff.getValueLen(v)); buff.writeValue(v); } } }
/** * @param cctx Cache context. * @param qry Query. * @param keepPortable Keep portable. * @return Cursor. */ public Iterator<List<?>> query( GridCacheContext<?, ?> cctx, GridCacheTwoStepQuery qry, boolean keepPortable) { for (int attempt = 0; ; attempt++) { if (attempt != 0) { try { Thread.sleep(attempt * 10); // Wait for exchange. } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new CacheException("Query was interrupted.", e); } } long qryReqId = reqIdGen.incrementAndGet(); QueryRun r = new QueryRun(); r.pageSize = qry.pageSize() <= 0 ? GridCacheTwoStepQuery.DFLT_PAGE_SIZE : qry.pageSize(); r.idxs = new ArrayList<>(qry.mapQueries().size()); String space = cctx.name(); r.conn = (JdbcConnection) h2.connectionForSpace(space); AffinityTopologyVersion topVer = h2.readyTopologyVersion(); List<String> extraSpaces = extraSpaces(space, qry.spaces()); Collection<ClusterNode> nodes; // Explicit partition mapping for unstable topology. Map<ClusterNode, IntArray> partsMap = null; if (isPreloadingActive(cctx, extraSpaces)) { if (cctx.isReplicated()) nodes = replicatedUnstableDataNodes(cctx, extraSpaces); else { partsMap = partitionedUnstableDataNodes(cctx, extraSpaces); nodes = partsMap == null ? null : partsMap.keySet(); } } else nodes = stableDataNodes(topVer, cctx, extraSpaces); if (nodes == null) continue; // Retry. assert !nodes.isEmpty(); if (cctx.isReplicated() || qry.explain()) { assert qry.explain() || !nodes.contains(ctx.discovery().localNode()) : "We must be on a client node."; // Select random data node to run query on a replicated data or get EXPLAIN PLAN from a // single node. nodes = Collections.singleton(F.rand(nodes)); } int tblIdx = 0; final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable(); for (GridCacheSqlQuery mapQry : qry.mapQueries()) { GridMergeIndex idx; if (!skipMergeTbl) { GridMergeTable tbl; try { tbl = createMergeTable(r.conn, mapQry, qry.explain()); } catch (IgniteCheckedException e) { throw new IgniteException(e); } idx = tbl.getScanIndex(null); fakeTable(r.conn, tblIdx++).setInnerTable(tbl); } else idx = GridMergeIndexUnsorted.createDummy(); for (ClusterNode node : nodes) idx.addSource(node.id()); r.idxs.add(idx); } r.latch = new CountDownLatch(r.idxs.size() * nodes.size()); runs.put(qryReqId, r); try { if (ctx.clientDisconnected()) { throw new CacheException( "Query was cancelled, client node disconnected.", new IgniteClientDisconnectedException( ctx.cluster().clientReconnectFuture(), "Client node disconnected.")); } Collection<GridCacheSqlQuery> mapQrys = qry.mapQueries(); if (qry.explain()) { mapQrys = new ArrayList<>(qry.mapQueries().size()); for (GridCacheSqlQuery mapQry : qry.mapQueries()) mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query(), mapQry.parameters())); } if (nodes.size() != 1 || !F.first(nodes).isLocal()) { // Marshall params for remotes. Marshaller m = ctx.config().getMarshaller(); for (GridCacheSqlQuery mapQry : mapQrys) mapQry.marshallParams(m); } boolean retry = false; if (send( nodes, new GridQueryRequest(qryReqId, r.pageSize, space, mapQrys, topVer, extraSpaces, null), partsMap)) { awaitAllReplies(r, nodes); Object state = r.state.get(); if (state != null) { if (state instanceof CacheException) { CacheException err = (CacheException) state; if (err.getCause() instanceof IgniteClientDisconnectedException) throw err; throw new CacheException("Failed to run map query remotely.", err); } if (state instanceof AffinityTopologyVersion) { retry = true; // If remote node asks us to retry then we have outdated full partition map. h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state); } } } else // Send failed. retry = true; Iterator<List<?>> resIter = null; if (!retry) { if (qry.explain()) return explainPlan(r.conn, space, qry); if (skipMergeTbl) { List<List<?>> res = new ArrayList<>(); assert r.idxs.size() == 1 : r.idxs; GridMergeIndex idx = r.idxs.get(0); Cursor cur = idx.findInStream(null, null); while (cur.next()) { Row row = cur.get(); int cols = row.getColumnCount(); List<Object> resRow = new ArrayList<>(cols); for (int c = 0; c < cols; c++) resRow.add(row.getValue(c).getObject()); res.add(resRow); } resIter = res.iterator(); } else { GridCacheSqlQuery rdc = qry.reduceQuery(); // Statement caching is prohibited here because we can't guarantee correct merge index // reuse. ResultSet res = h2.executeSqlQueryWithTimer( space, r.conn, rdc.query(), F.asList(rdc.parameters()), false); resIter = new Iter(res); } } for (GridMergeIndex idx : r.idxs) { if (!idx.fetchedAll()) // We have to explicitly cancel queries on remote nodes. send(nodes, new GridQueryCancelRequest(qryReqId), null); } if (retry) { if (Thread.currentThread().isInterrupted()) throw new IgniteInterruptedCheckedException("Query was interrupted."); continue; } return new GridQueryCacheObjectsIterator(resIter, cctx, keepPortable); } catch (IgniteCheckedException | RuntimeException e) { U.closeQuiet(r.conn); if (e instanceof CacheException) throw (CacheException) e; Throwable cause = e; if (e instanceof IgniteCheckedException) { Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class); if (disconnectedErr != null) cause = disconnectedErr; } throw new CacheException("Failed to run reduce query locally.", cause); } finally { if (!runs.remove(qryReqId, r)) U.warn(log, "Query run was already removed: " + qryReqId); if (!skipMergeTbl) { for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) fakeTable(null, i).setInnerTable(null); // Drop all merge tables. } } } }
int addRowTry(Row row) { index.getPageStore().logUndo(this, data); int rowLength = getRowLength(row); int pageSize = index.getPageStore().getPageSize(); int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; int keyOffsetPairLen = 2 + Data.getVarLongLen(row.getKey()); if (entryCount > 0 && last - rowLength < start + keyOffsetPairLen) { int x = findInsertionPoint(row.getKey()); if (entryCount > 1) { if (entryCount < 5) { // required, otherwise the index doesn't work correctly return entryCount / 2; } if (index.isSortedInsertMode()) { return x < 2 ? 1 : x > entryCount - 1 ? entryCount - 1 : x; } // split near the insertion point to better fill pages // split in half would be: // return entryCount / 2; int third = entryCount / 3; return x < third ? third : x >= 2 * third ? 2 * third : x; } return x; } index.getPageStore().logUndo(this, data); int x; if (entryCount == 0) { x = 0; } else { if (!optimizeUpdate) { readAllRows(); } x = findInsertionPoint(row.getKey()); } written = false; changeCount = index.getPageStore().getChangeCount(); last = x == 0 ? pageSize : offsets[x - 1]; int offset = last - rowLength; start += keyOffsetPairLen; offsets = insert(offsets, entryCount, x, offset); add(offsets, x + 1, entryCount + 1, -rowLength); keys = insert(keys, entryCount, x, row.getKey()); rows = insert(rows, entryCount, x, row); entryCount++; index.getPageStore().update(this); if (optimizeUpdate) { if (writtenData && offset >= start) { byte[] d = data.getBytes(); int dataStart = offsets[entryCount - 1] + rowLength; int dataEnd = offsets[x]; System.arraycopy(d, dataStart, d, dataStart - rowLength, dataEnd - dataStart + rowLength); data.setPos(dataEnd); for (int j = 0; j < columnCount; j++) { data.writeValue(row.getValue(j)); } } } if (offset < start) { writtenData = false; if (entryCount > 1) { DbException.throwInternalError(); } // need to write the overflow page id start += 4; int remaining = rowLength - (pageSize - start); // fix offset offset = start; offsets[x] = offset; int previous = getPos(); int dataOffset = pageSize; int page = index.getPageStore().allocatePage(); firstOverflowPageId = page; this.overflowRowSize = pageSize + rowLength; writeData(); // free up the space used by the row Row r = rows[0]; rowRef = new SoftReference<Row>(r); rows[0] = null; Data all = index.getPageStore().createData(); all.checkCapacity(data.length()); all.write(data.getBytes(), 0, data.length()); data.truncate(index.getPageStore().getPageSize()); do { int type, size, next; if (remaining <= pageSize - PageDataOverflow.START_LAST) { type = Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST; size = remaining; next = 0; } else { type = Page.TYPE_DATA_OVERFLOW; size = pageSize - PageDataOverflow.START_MORE; next = index.getPageStore().allocatePage(); } PageDataOverflow overflow = PageDataOverflow.create( index.getPageStore(), page, type, previous, next, all, dataOffset, size); index.getPageStore().update(overflow); dataOffset += size; remaining -= size; previous = page; page = next; } while (remaining > 0); } if (rowRef == null) { memoryChange(true, row); } else { memoryChange(true, null); } return -1; }
@Override public boolean next() { synchronized (sync) { if (SysProperties.CHECK && end) { DbException.throwInternalError(); } while (true) { if (needNewDelta) { loadNext(false); needNewDelta = false; } if (needNewBase) { loadNext(true); needNewBase = false; } if (deltaRow == null) { if (baseRow == null) { end = true; return false; } onBase = true; needNewBase = true; return true; } int sessionId = deltaRow.getSessionId(); boolean isThisSession = sessionId == session.getId(); boolean isDeleted = deltaRow.isDeleted(); if (isThisSession && isDeleted) { needNewDelta = true; continue; } if (baseRow == null) { if (isDeleted) { if (isThisSession) { end = true; return false; } // the row was deleted by another session: return it onBase = false; needNewDelta = true; return true; } DbException.throwInternalError(); } int compare = index.compareRows(deltaRow, baseRow); if (compare == 0) { // can't use compareKeys because the // version would be compared as well long k1 = deltaRow.getKey(); long k2 = baseRow.getKey(); compare = MathUtils.compareLong(k1, k2); } if (compare == 0) { if (isDeleted) { if (isThisSession) { DbException.throwInternalError(); } // another session updated the row } else { if (isThisSession) { onBase = false; needNewBase = true; needNewDelta = true; return true; } // another session inserted the row: ignore needNewBase = true; needNewDelta = true; continue; } } if (compare > 0) { onBase = true; needNewBase = true; return true; } onBase = false; needNewDelta = true; return true; } } }
private void memoryChange(boolean add, Row r) { int diff = r == null ? 0 : 4 + 8 + Constants.MEMORY_POINTER + r.getMemory(); memoryData += add ? diff : -diff; index.memoryChange( (Constants.MEMORY_PAGE_DATA + memoryData + index.getPageStore().getPageSize()) >> 2); }