コード例 #1
0
ファイル: JournalWriter.java プロジェクト: ame89/nfsdb
  public void mergeAppend(PeekingIterator<T> data) throws JournalException {

    if (lagMillis == 0) {
      throw new JournalException("This journal is not configured to have lag partition");
    }

    beginTx();

    if (data == null || data.isEmpty()) {
      return;
    }

    long dataMaxTimestamp = getTimestamp(data.peekLast());
    long hard = getAppendTimestampLo();

    if (dataMaxTimestamp < hard) {
      return;
    }

    final Partition<T> lagPartition = openOrCreateLagPartition();
    this.doDiscard = true;
    this.doJournal = true;

    long dataMinTimestamp = getTimestamp(data.peekFirst());
    long lagMaxTimestamp = getMaxTimestamp();
    long lagMinTimestamp = lagPartition.size() == 0L ? 0 : getTimestamp(lagPartition.read(0));
    long soft = Math.max(dataMaxTimestamp, lagMaxTimestamp) - lagMillis;

    if (dataMinTimestamp > lagMaxTimestamp) {
      // this could be as simple as just appending data to lag
      // the only complication is that after adding records to lag it could swell beyond
      // the allocated "lagSwellTimestamp"
      // we should check if this is going to happen and optimise copying of data

      long lagSizeMillis;
      if (hard > 0L) {
        lagSizeMillis = dataMaxTimestamp - hard;
      } else if (lagMinTimestamp > 0L) {
        lagSizeMillis = dataMaxTimestamp - lagMinTimestamp;
      } else {
        lagSizeMillis = 0L;
      }

      if (lagSizeMillis > lagSwellMillis) {
        // data would  be too big and would stretch outside of swell timestamp
        // this is when lag partition should be split, but it is still a straight split without
        // re-order

        Partition<T> tempPartition = createTempPartition().open();
        splitAppend(lagPartition.bufferedIterator(), hard, soft, tempPartition);
        splitAppend(data, hard, soft, tempPartition);
        replaceIrregularPartition(tempPartition);
      } else {
        // simplest case, just append to lag
        lagPartition.append(data);
      }
    } else {

      Partition<T> tempPartition = createTempPartition().open();
      if (dataMinTimestamp > lagMinTimestamp && dataMaxTimestamp < lagMaxTimestamp) {
        //
        // overlap scenario 1: data is fully inside of lag
        //

        // calc boundaries of lag that intersects with data
        long lagMid1 = lagPartition.indexOf(dataMinTimestamp, BSearchType.OLDER_OR_SAME);
        long lagMid2 = lagPartition.indexOf(dataMaxTimestamp, BSearchType.NEWER_OR_SAME);

        // copy part of lag above data
        splitAppend(lagPartition.bufferedIterator(0, lagMid1), hard, soft, tempPartition);

        // merge lag with data and copy result to temp partition
        splitAppendMerge(
            data,
            lagPartition.bufferedIterator(lagMid1 + 1, lagMid2 - 1),
            hard,
            soft,
            tempPartition);

        // copy part of lag below data
        splitAppend(
            lagPartition.bufferedIterator(lagMid2, lagPartition.size() - 1),
            hard,
            soft,
            tempPartition);

      } else if (dataMaxTimestamp < lagMinTimestamp && dataMaxTimestamp <= lagMinTimestamp) {
        //
        // overlap scenario 2: data sits directly above lag
        //
        splitAppend(data, hard, soft, tempPartition);
        splitAppend(lagPartition.bufferedIterator(), hard, soft, tempPartition);
      } else if (dataMinTimestamp <= lagMinTimestamp && dataMaxTimestamp < lagMaxTimestamp) {
        //
        // overlap scenario 3: bottom part of data overlaps top part of lag
        //

        // calc overlap line
        long split = lagPartition.indexOf(dataMaxTimestamp, BSearchType.NEWER_OR_SAME);

        // merge lag with data and copy result to temp partition
        splitAppendMerge(
            data, lagPartition.bufferedIterator(0, split - 1), hard, soft, tempPartition);

        // copy part of lag below data
        splitAppend(
            lagPartition.bufferedIterator(split, lagPartition.size() - 1),
            hard,
            soft,
            tempPartition);
      } else if (dataMinTimestamp > lagMinTimestamp && dataMaxTimestamp >= lagMaxTimestamp) {
        //
        // overlap scenario 4: top part of data overlaps with bottom part of lag
        //
        long split = lagPartition.indexOf(dataMinTimestamp, BSearchType.OLDER_OR_SAME);

        // copy part of lag above overlap
        splitAppend(lagPartition.bufferedIterator(0, split), hard, soft, tempPartition);

        // merge lag with data and copy result to temp partition
        splitAppendMerge(
            data,
            lagPartition.bufferedIterator(split + 1, lagPartition.size() - 1),
            hard,
            soft,
            tempPartition);
      } else if (dataMinTimestamp <= lagMinTimestamp && dataMaxTimestamp >= lagMaxTimestamp) {
        //
        // overlap scenario 5: lag is fully inside of data
        //

        // merge lag with data and copy result to temp partition
        splitAppendMerge(data, lagPartition.bufferedIterator(), hard, soft, tempPartition);
      } else {
        throw new JournalRuntimeException(
            "Unsupported overlap type: lag min/max [%s/%s] data min/max: [%s/%s]",
            Dates.toString(lagMinTimestamp),
            Dates.toString(lagMaxTimestamp),
            Dates.toString(dataMinTimestamp),
            Dates.toString(dataMaxTimestamp));
      }

      replaceIrregularPartition(tempPartition);
    }
  }