@Override public void abort(Transaction txn) { debug("abort"); if (nodeTableJournal == null) throw new TDBTransactionException( txn.getLabel() + ": Not in a transaction for a commit to happen"); // Ensure the cache does not flush. nodeTableJournal = null; // then make sure the journal file is empty. if (journalObjFile != null) { journalObjFile.truncate(journalObjFileStartOffset); journalObjFile.sync(); } finish(); }
@Override public void begin(Transaction txn) { // debug("%s begin", txn.getLabel()) ; if (this.txn.getTxnId() != txn.getTxnId()) throw new TDBException( String.format("Different transactions: %s %s", this.txn.getLabel(), txn.getLabel())); if (passthrough) throw new TDBException("Already active"); passthrough = false; allocOffset = base.allocOffset().getId(); // base node table empty e.g. first use. journalObjFileStartOffset = journalObjFile.length(); // Because the data is written in prepare, the journal of object data is // always empty at the start of a transaction. if (journalObjFileStartOffset != 0) warn( log, "%s journalStartOffset not zero: %d/0x%02X", txn.getLabel(), journalObjFileStartOffset, journalObjFileStartOffset); allocOffset += journalObjFileStartOffset; this.nodeTableJournal = new NodeTableNative(nodeIndex, journalObjFile); this.nodeTableJournal = NodeTableCache.create(nodeTableJournal, CacheSize, CacheSize, 100); // This class knows about non-mappable inline values. mapToJournal(NodeId)/mapFromJournal. this.nodeTableJournal = NodeTableInline.create(nodeTableJournal); }
@Override public void close() { if (nodeIndex != null) nodeIndex.close(); nodeIndex = null; // Closing the journal flushes it; i.e. disk IO. if (journalObjFile != null) journalObjFile.close(); journalObjFile = null; }
@Override public void commitPrepare(Transaction txn) { debug("commitPrepare"); // The node table is append-only so it can be written during prepare. // The index isn't written (via the transaction journal) until enact. if (nodeTableJournal == null) throw new TDBTransactionException( txn.getLabel() + ": Not in a transaction for a commit to happen"); writeNodeJournal(); if (journalObjFile != null && journalObjFile.length() != 0) { long x = journalObjFile.length(); throw new TDBTransactionException( txn.getLabel() + ": journalObjFile not cleared (" + x + ")"); } }
private void writeNodeJournal() { long expected = base.allocOffset().getId(); long len = journalObjFile.length(); if (expected != allocOffset) warn(log, "Inconsistency: base.allocOffset() = %d : allocOffset = %d", expected, allocOffset); long newbase = -1; append(); // Calls all() which does a buffer flish. // Reset (in case we use this again) nodeIndex.clear(); journalObjFile.truncate(journalObjFileStartOffset); // Side effect is a buffer flush. // journalObjFile.sync() ; journalObjFile.close(); // Side effect is a buffer flush. journalObjFile = null; base.sync(); allocOffset = -99; // base.allocOffset().getId() ; // Will be invalid as we may write through to the base // table later. passthrough = true; }
public NodeTableTrans( Transaction txn, String label, NodeTable sub, Index nodeIndex, ObjectFile objFile) { this.txn = txn; this.base = sub; this.nodeIndex = nodeIndex; this.journalObjFile = objFile; // Clear bytes from an old run // (a crash while writing means the old transaction did not commit // any bytes in the file are junk) // This is coupled to the fact the prepare phase does the actually data writing. journalObjFile.truncate(0); this.label = label; }
// Debugging only private void dump() { System.err.println(">>>>>>>>>>"); System.err.println("label = " + label); System.err.println("txn = " + txn); System.err.println("offset = " + allocOffset); System.err.println("journalStartOffset = " + journalObjFileStartOffset); System.err.println("journal = " + journalObjFile.getLabel()); if (true) return; System.err.println("nodeTableJournal >>>"); Iterator<Pair<NodeId, Node>> iter = nodeTableJournal.all(); for (; iter.hasNext(); ) { Pair<NodeId, Node> x = iter.next(); NodeId nodeId = x.getLeft(); Node node = x.getRight(); NodeId mapped = mapFromJournal(nodeId); // debug("append: %s -> %s", x, mapFromJournal(nodeId)) ; // This does the write. NodeId nodeId2 = base.getAllocateNodeId(node); System.err.println(x + " mapped=" + mapped + " getAlloc=" + nodeId2); } System.err.println("journal >>>"); Iterator<Pair<Long, ByteBuffer>> iter1 = this.journalObjFile.all(); for (; iter1.hasNext(); ) { Pair<Long, ByteBuffer> p = iter1.next(); System.err.println(p.getLeft() + " : " + p.getRight()); ByteBufferLib.print(System.err, p.getRight()); } System.err.println("nodeIndex >>>"); Iterator<Record> iter2 = this.nodeIndex.iterator(); for (; iter2.hasNext(); ) { Record r = iter2.next(); System.err.println(r); } System.err.println("<<<<<<<<<<"); }