/** * Add an object to the end of the Journal. * * @param obj the object to add * @throws com.nfsdb.exceptions.JournalException if there is an error */ public void append(T obj) throws JournalException { if (obj == null) { throw new JournalException("Cannot append NULL to %s", this); } if (!txActive) { beginTx(); } if (checkOrder) { long timestamp = getTimestamp(obj); if (timestamp > appendTimestampHi) { switchAppendPartition(timestamp); } if (timestamp < appendTimestampLo) { throw new JournalException( "Cannot insert records out of order. maxHardTimestamp=%d (%s), timestamp=%d (%s): %s", appendTimestampLo, Dates.toString(appendTimestampLo), timestamp, Dates.toString(timestamp), this); } appendPartition.append(obj); appendTimestampLo = timestamp; } else { getAppendPartition().append(obj); } }
private void splitAppend(Iterator<T> it, long hard, long soft, Partition<T> partition) throws JournalException { while (it.hasNext()) { T obj = it.next(); if (doDiscard && getTimestamp(obj) < hard) { // discard continue; } else if (doDiscard) { doDiscard = false; } if (doJournal && getTimestamp(obj) < soft) { append(obj); continue; } else if (doJournal) { doJournal = false; } partition.append(obj); } }
public void mergeAppend(PeekingIterator<T> data) throws JournalException { if (lagMillis == 0) { throw new JournalException("This journal is not configured to have lag partition"); } beginTx(); if (data == null || data.isEmpty()) { return; } long dataMaxTimestamp = getTimestamp(data.peekLast()); long hard = getAppendTimestampLo(); if (dataMaxTimestamp < hard) { return; } final Partition<T> lagPartition = openOrCreateLagPartition(); this.doDiscard = true; this.doJournal = true; long dataMinTimestamp = getTimestamp(data.peekFirst()); long lagMaxTimestamp = getMaxTimestamp(); long lagMinTimestamp = lagPartition.size() == 0L ? 0 : getTimestamp(lagPartition.read(0)); long soft = Math.max(dataMaxTimestamp, lagMaxTimestamp) - lagMillis; if (dataMinTimestamp > lagMaxTimestamp) { // this could be as simple as just appending data to lag // the only complication is that after adding records to lag it could swell beyond // the allocated "lagSwellTimestamp" // we should check if this is going to happen and optimise copying of data long lagSizeMillis; if (hard > 0L) { lagSizeMillis = dataMaxTimestamp - hard; } else if (lagMinTimestamp > 0L) { lagSizeMillis = dataMaxTimestamp - lagMinTimestamp; } else { lagSizeMillis = 0L; } if (lagSizeMillis > lagSwellMillis) { // data would be too big and would stretch outside of swell timestamp // this is when lag partition should be split, but it is still a straight split without // re-order Partition<T> tempPartition = createTempPartition().open(); splitAppend(lagPartition.bufferedIterator(), hard, soft, tempPartition); splitAppend(data, hard, soft, tempPartition); replaceIrregularPartition(tempPartition); } else { // simplest case, just append to lag lagPartition.append(data); } } else { Partition<T> tempPartition = createTempPartition().open(); if (dataMinTimestamp > lagMinTimestamp && dataMaxTimestamp < lagMaxTimestamp) { // // overlap scenario 1: data is fully inside of lag // // calc boundaries of lag that intersects with data long lagMid1 = lagPartition.indexOf(dataMinTimestamp, BSearchType.OLDER_OR_SAME); long lagMid2 = lagPartition.indexOf(dataMaxTimestamp, BSearchType.NEWER_OR_SAME); // copy part of lag above data splitAppend(lagPartition.bufferedIterator(0, lagMid1), hard, soft, tempPartition); // merge lag with data and copy result to temp partition splitAppendMerge( data, lagPartition.bufferedIterator(lagMid1 + 1, lagMid2 - 1), hard, soft, tempPartition); // copy part of lag below data splitAppend( lagPartition.bufferedIterator(lagMid2, lagPartition.size() - 1), hard, soft, tempPartition); } else if (dataMaxTimestamp < lagMinTimestamp && dataMaxTimestamp <= lagMinTimestamp) { // // overlap scenario 2: data sits directly above lag // splitAppend(data, hard, soft, tempPartition); splitAppend(lagPartition.bufferedIterator(), hard, soft, tempPartition); } else if (dataMinTimestamp <= lagMinTimestamp && dataMaxTimestamp < lagMaxTimestamp) { // // overlap scenario 3: bottom part of data overlaps top part of lag // // calc overlap line long split = lagPartition.indexOf(dataMaxTimestamp, BSearchType.NEWER_OR_SAME); // merge lag with data and copy result to temp partition splitAppendMerge( data, lagPartition.bufferedIterator(0, split - 1), hard, soft, tempPartition); // copy part of lag below data splitAppend( lagPartition.bufferedIterator(split, lagPartition.size() - 1), hard, soft, tempPartition); } else if (dataMinTimestamp > lagMinTimestamp && dataMaxTimestamp >= lagMaxTimestamp) { // // overlap scenario 4: top part of data overlaps with bottom part of lag // long split = lagPartition.indexOf(dataMinTimestamp, BSearchType.OLDER_OR_SAME); // copy part of lag above overlap splitAppend(lagPartition.bufferedIterator(0, split), hard, soft, tempPartition); // merge lag with data and copy result to temp partition splitAppendMerge( data, lagPartition.bufferedIterator(split + 1, lagPartition.size() - 1), hard, soft, tempPartition); } else if (dataMinTimestamp <= lagMinTimestamp && dataMaxTimestamp >= lagMaxTimestamp) { // // overlap scenario 5: lag is fully inside of data // // merge lag with data and copy result to temp partition splitAppendMerge(data, lagPartition.bufferedIterator(), hard, soft, tempPartition); } else { throw new JournalRuntimeException( "Unsupported overlap type: lag min/max [%s/%s] data min/max: [%s/%s]", Dates.toString(lagMinTimestamp), Dates.toString(lagMaxTimestamp), Dates.toString(dataMinTimestamp), Dates.toString(dataMaxTimestamp)); } replaceIrregularPartition(tempPartition); } }