/**
   * Changes all keys to "prefix keys" in the given IN. Called after reading an IN from disk via
   * IN.postFetchInit.
   *
   * <p>The conversion of IN keys is invoked from the IN class when an IN is fetched, rather than
   * invoked from the DupConvert class directly, for performance and simplicity. If it were invoked
   * from the DupConvert class, we would have to iterate over all INs in a separate initial pass.
   * This is both more time consuming, and more complex to implement properly so that eviction is
   * possible. Instead, conversion occurs when an old format IN is loaded.
   *
   * <p>Enter/leave with 'in' unlatched.
   */
  public static void convertInKeys(final DatabaseImpl dbImpl, final IN in) {

    /* Nothing to convert for non-duplicates DB. */
    if (!dbImpl.getSortedDuplicates()) {
      return;
    }

    /* DIN/DBIN do not need conversion either. */
    if (in instanceof DIN || in instanceof DBIN) {
      return;
    }

    in.latch();
    try {
      for (int i = 0; i < in.getNEntries(); i += 1) {
        byte[] oldKey = in.getKey(i);
        byte[] newKey = DupKeyData.makePrefixKey(oldKey, 0, oldKey.length);
        in.updateEntry(i, in.getTarget(i), in.getLsn(i), newKey);
      }

      byte[] oldKey = in.getIdentifierKey();
      byte[] newKey = DupKeyData.makePrefixKey(oldKey, 0, oldKey.length);
      in.setIdentifierKey(newKey);

      assert in.verifyMemorySize();
    } finally {
      in.releaseLatch();
    }
  }
 private boolean noDupNodesPresent() {
   for (IN in : envImpl.getInMemoryINs()) {
     if (in instanceof DIN || in instanceof DBIN) {
       System.out.println(in.toString());
       return false;
     }
   }
   return true;
 }
  /**
   * Converts the given DIN and its descendants.
   *
   * <p>Enter/leave with bin field latched, although bin field will change to last inserted slot.
   */
  private void convertDin(final DIN din, final byte[] binKey) {
    din.latch();
    try {
      for (int i = 0; i < din.getNEntries(); i += 1) {

        final IN child = din.fetchIN(i, CacheMode.DEFAULT);

        assert (!child.isBINDelta(false));

        if (child instanceof DBIN) {
          final DBIN dbin = (DBIN) child;
          dbin.latch();
          try {
            for (int j = 0; j < dbin.getNEntries(); j += 1) {
              if (!isLNDeleted(dbin, j)) {
                convertDbinSlot(dbin, j, binKey);
              }
            }
            assert dbin.verifyMemorySize();

            /* Count DBIN obsolete. */
            if (dbin.getLastLoggedLsn() != DbLsn.NULL_LSN) {
              localTracker.countObsoleteNodeInexact(
                  dbin.getLastLoggedLsn(), dbin.getLogType(), 0, dbin.getDatabase());
            }
          } finally {
            dbin.releaseLatch();
          }
        } else {
          convertDin((DIN) child, binKey);
        }

        /* Evict DIN child. */
        din.detachNode(i, false /*updateLsn*/, -1 /*lsn*/);
      }

      assert din.verifyMemorySize();

      /* Count DIN and DupCountLN obsolete. */
      if (din.getLastLoggedLsn() != DbLsn.NULL_LSN) {
        localTracker.countObsoleteNodeInexact(
            din.getLastLoggedLsn(), din.getLogType(), 0, din.getDatabase());
      }
      final ChildReference dupCountRef = din.getDupCountLNRef();
      if (dupCountRef != null && dupCountRef.getLsn() != DbLsn.NULL_LSN) {
        localTracker.countObsoleteNodeInexact(
            dupCountRef.getLsn(), LogEntryType.LOG_DUPCOUNTLN, 0, din.getDatabase());
      }
    } finally {
      din.releaseLatch();
    }
  }
Exemple #4
0
  /* Called once at environment startup by MemoryBudget */
  public static long computeOverhead(DbConfigManager configManager) throws DatabaseException {

    /*
     * Overhead consists of all the fields in this class plus the
     * entry arrays in the IN class.
     */
    return MemoryBudget.BIN_FIXED_OVERHEAD + IN.computeArraysOverhead(configManager);
  }
    /** @return true if the in-memory root was replaced. */
    public IN doWork(ChildReference root) throws DatabaseException {

      if (root == null) {
        return null;
      }
      IN rootIN = (IN) root.fetchTarget(db, null);
      rootIN.latch();
      try {
        if (rootIN.getNodeId() == target.getNodeId()) {

          /*
           * stillRoot handles race condition where root
           * splits after target's latch is release.
           */
          stillRoot = true;
          if (rootIN.getDirty()) {
            DbLsn newLsn = rootIN.log(logManager);
            root.setLsn(newLsn);
            flushed = true;
          }
        }
      } finally {
        rootIN.releaseLatch();
      }
      return null;
    }
Exemple #6
0
  /** Public for testing. */
  public long calcTreeCacheUsage() throws DatabaseException {

    long totalSize = 0;
    INList inList = envImpl.getInMemoryINs();

    inList.latchMajor();
    try {
      Iterator iter = inList.iterator();
      while (iter.hasNext()) {
        IN in = (IN) iter.next();
        long size = in.getInMemorySize();
        totalSize += size;
      }
    } finally {
      inList.releaseMajorLatch();
    }
    return totalSize;
  }
 public void putIN(IN in) {
   Integer level = new Integer(in.getLevel());
   Set inSet = (Set) get(level);
   if (inSet == null) {
     inSet = new HashSet();
     put(level, inSet);
   }
   inSet.add(in);
 }
Exemple #8
0
  /**
   * Mark this entry as deleted, using the delete flag. Only BINS may do this. Don't null the target
   * field.
   *
   * <p>This is used so that an LN can still be locked by the compressor even if the entry is
   * knownDeleted. See BIN.compress.
   *
   * @param index indicates target entry
   */
  public void setKnownDeletedLeaveTarget(int index) {

    /*
     * The migrate flag is cleared since migration is never needed for
     * known deleted entries.
     */
    setMigrate(index, false);
    super.setKnownDeleted(index);
    setDirty(true);
  }
  /**
   * Flush the nodes in order, from the lowest level to highest level. As a flush dirties its
   * parent, add it to the dirty map, thereby cascading the writes up the tree. If flushAll wasn't
   * specified, we need only cascade up to the highest level that existed before the checkpointing
   * started.
   *
   * <p>Note that all but the top level INs and the BINDeltas are logged provisionally. That's
   * because we don't need to process lower INs because the higher INs will end up pointing at them.
   */
  private void flushDirtyNodes(boolean flushAll, boolean allowDeltas, boolean flushExtraLevel)
      throws DatabaseException {

    LogManager logManager = envImpl.getLogManager();

    SortedMap dirtyMap = selectDirtyINs(flushAll, flushExtraLevel);

    while (dirtyMap.size() > 0) {

      /* Work on one level's worth of nodes in ascending level order. */
      Integer currentLevel = (Integer) dirtyMap.firstKey();
      boolean logProvisionally = (currentLevel.intValue() != highestFlushLevel);

      Set nodeSet = (Set) dirtyMap.get(currentLevel);
      Iterator iter = nodeSet.iterator();

      /* Flush all those nodes */
      while (iter.hasNext()) {
        IN target = (IN) iter.next();
        target.latch();
        boolean triedToFlush = false;

        /*
         * Only flush the ones that are still dirty -- some
         * may have been written out by the evictor. Also
         * check if the db is still valid -- since INs of
         * deleted databases are left on the in-memory tree
         * until the evictor lazily clears them out, there may
         * be dead INs around.
         */
        if (target.getDirty() && (!target.getDatabase().getIsDeleted())) {
          flushIN(target, logManager, dirtyMap, logProvisionally, allowDeltas);
          triedToFlush = true;
        } else {
          target.releaseLatch();
        }

        Tracer.trace(
            Level.FINE,
            envImpl,
            "Checkpointer: node="
                + target.getNodeId()
                + " level="
                + Integer.toHexString(target.getLevel())
                + " flushed="
                + triedToFlush);
      }

      /* We're done with this level. */
      dirtyMap.remove(currentLevel);

      /* We can stop at this point. */
      if (currentLevel.intValue() == highestFlushLevel) {
        break;
      }
    }
  }
  /**
   * Add a node to the dirty map. The dirty map is keyed by level (Integers) and holds sets of IN
   * references.
   */
  private void addToDirtyMap(Map dirtyMap, IN in) {
    Integer inLevel = new Integer(in.getLevel());
    Set inSet = (Set) dirtyMap.get(inLevel);

    /* If this level doesn't exist in the map yet, make a new entry. */
    if (inSet == null) {
      inSet = new HashSet();
      dirtyMap.put(inLevel, inSet);
    }

    /* Add to the set. */
    inSet.add(in);
  }
Exemple #11
0
  /**
   * Mark this entry as deleted, using the delete flag. Only BINS may do this.
   *
   * @param index indicates target entry
   */
  @Override
  public void setKnownDeleted(int index) {

    /*
     * The target is cleared to save memory, since a known deleted entry
     * will never be fetched.  The migrate flag is also cleared since
     * migration is never needed for known deleted entries either.
     */
    super.setKnownDeleted(index);

    /*
     * We know it's an LN because we never call setKnownDeleted for
     * an IN.
     */
    LN oldLN = (LN) getTarget(index);
    updateMemorySize(oldLN, null /* newNode */);
    if (oldLN != null) {
      oldLN.releaseMemoryBudget();
    }
    setMigrate(index, false);
    super.setTarget(index, null);
    setDirty(true);
  }
Exemple #12
0
  @Override
  public void beforeLog(LogManager logManager, INLogItem item, INLogContext context)
      throws DatabaseException {

    EnvironmentImpl envImpl = getDatabase().getDbEnvironment();

    /* Allow the cleaner to migrate LNs before logging. */
    envImpl.getCleaner().lazyMigrateLNs(this, context.proactiveMigration, context.backgroundIO);

    /* Check for dirty LNs in deferred-write databases. */
    if (getDatabase().isDeferredWriteMode()) {
      logDirtyLNs(logManager);
    }

    /*
     * We can log a delta rather than full version of this BIN if
     * - this has been called from the checkpointer with allowDeltas=true
     * - there is a full version on disk
     * - we meet the percentage heuristics defined by environment params.
     * - this delta is not prohibited because of cleaning or compression
     * - this is not a deferred write db
     * All other logging should be of the full version.
     */
    boolean doDeltaLog = false;
    BINDelta deltaInfo = null;
    if (context.allowDeltas
        && getLastFullVersion() != DbLsn.NULL_LSN
        && !prohibitNextDelta
        && !getDatabase().isDeferredWriteMode()) {
      deltaInfo = new BINDelta(this);
      doDeltaLog = doDeltaLog(deltaInfo);
    }

    if (doDeltaLog) {
      item.provisional = Provisional.NO;
      item.oldLsn = DbLsn.NULL_LSN;
      item.entry = new SingleItemEntry(getBINDeltaType(), deltaInfo);
      item.isDelta = true;
    } else {
      /* Log a full version of the IN. */
      super.beforeLog(logManager, item, context);
    }
  }
Exemple #13
0
  @Override
  public void afterLog(LogManager logManager, INLogItem item, INLogContext context)
      throws DatabaseException {

    if (item.isDelta) {

      /*
       * Don't change the dirtiness of the node -- leave it dirty. Deltas
       * are never provisional, they must be processed at recovery time.
       */
      lastDeltaVersion = item.newLsn;
      item.newLsn = DbLsn.NULL_LSN;
      numDeltasSinceLastFull++;
    } else {
      super.afterLog(logManager, item, context);
      lastDeltaVersion = DbLsn.NULL_LSN;
      numDeltasSinceLastFull = 0;
    }
    prohibitNextDelta = false;
  }
Exemple #14
0
  MemoryBudget(EnvironmentImpl envImpl, DbConfigManager configManager) throws DatabaseException {

    this.envImpl = envImpl;

    /* Request notification of mutable property changes. */
    envImpl.addConfigObserver(this);

    /* Peform first time budget initialization. */
    reset(configManager);

    /*
     * Calculate IN and BIN overheads, which are a function of
     * capacity. These values are stored in this class so that they can be
     * calculated once per environment. The logic to do the calculations is
     * left in the respective node classes so it can be done properly in
     * the domain of those objects.
     */
    inOverhead = IN.computeOverhead(configManager);
    binOverhead = BIN.computeOverhead(configManager);
    dinOverhead = DIN.computeOverhead(configManager);
    dbinOverhead = DBIN.computeOverhead(configManager);
  }
Exemple #15
0
  /**
   * Get the key (dupe or identifier) in child that is used to locate it in 'this' node. For BIN's,
   * the child node has to be a DIN so we use the Dup Key to cross the main-tree/dupe-tree boundary.
   */
  @Override
  public byte[] getChildKey(IN child) throws DatabaseException {

    return child.getDupKey();
  }
Exemple #16
0
  /**
   * Adjust any cursors that are referring to this BIN. This method is called during a split
   * operation. "this" is the BIN being split. newSibling is the new BIN into which the entries from
   * "this" between newSiblingLow and newSiblingHigh have been copied.
   *
   * @param newSibling - the newSibling into which "this" has been split.
   * @param newSiblingLow, newSiblingHigh - the low and high entry of "this" that were moved into
   *     newSibling.
   */
  @Override
  void adjustCursors(IN newSibling, int newSiblingLow, int newSiblingHigh) {
    assert newSibling.isLatchOwnerForWrite();
    assert this.isLatchOwnerForWrite();
    int adjustmentDelta = (newSiblingHigh - newSiblingLow);
    Iterator<CursorImpl> iter = cursorSet.iterator();
    while (iter.hasNext()) {
      CursorImpl cursor = iter.next();
      if (getCursorBINToBeRemoved(cursor) == this) {

        /*
         * This BIN will be removed from the cursor by CursorImpl
         * following advance to next BIN; ignore it.
         */
        continue;
      }
      int cIdx = getCursorIndex(cursor);
      BIN cBin = getCursorBIN(cursor);
      assert cBin == this : "nodeId=" + getNodeId() + " cursor=" + cursor.dumpToString(true);
      assert newSibling instanceof BIN;

      /*
       * There are four cases to consider for cursor adjustments,
       * depending on (1) how the existing node gets split, and (2) where
       * the cursor points to currently.  In cases 1 and 2, the id key of
       * the node being split is to the right of the splitindex so the
       * new sibling gets the node entries to the left of that index.
       * This is indicated by "new sibling" to the left of the vertical
       * split line below.  The right side of the node contains entries
       * that will remain in the existing node (although they've been
       * shifted to the left).  The vertical bar (^) indicates where the
       * cursor currently points.
       *
       * case 1:
       *
       *   We need to set the cursor's "bin" reference to point at the
       *   new sibling, but we don't need to adjust its index since that
       *   continues to be correct post-split.
       *
       *   +=======================================+
       *   |  new sibling        |  existing node  |
       *   +=======================================+
       *         cursor ^
       *
       * case 2:
       *
       *   We only need to adjust the cursor's index since it continues
       *   to point to the current BIN post-split.
       *
       *   +=======================================+
       *   |  new sibling        |  existing node  |
       *   +=======================================+
       *                              cursor ^
       *
       * case 3:
       *
       *   Do nothing.  The cursor continues to point at the correct BIN
       *   and index.
       *
       *   +=======================================+
       *   |  existing Node        |  new sibling  |
       *   +=======================================+
       *         cursor ^
       *
       * case 4:
       *
       *   Adjust the "bin" pointer to point at the new sibling BIN and
       *   also adjust the index.
       *
       *   +=======================================+
       *   |  existing Node        |  new sibling  |
       *   +=======================================+
       *                                 cursor ^
       */
      BIN ns = (BIN) newSibling;
      if (newSiblingLow == 0) {
        if (cIdx < newSiblingHigh) {
          /* case 1 */
          setCursorBIN(cursor, ns);
          iter.remove();
          ns.addCursor(cursor);
        } else {
          /* case 2 */
          setCursorIndex(cursor, cIdx - adjustmentDelta);
        }
      } else {
        if (cIdx >= newSiblingLow) {
          /* case 4 */
          setCursorIndex(cursor, cIdx - newSiblingLow);
          setCursorBIN(cursor, ns);
          iter.remove();
          ns.addCursor(cursor);
        }
      }
    }
  }
  /** Flush the target IN. */
  private void flushIN(
      IN target, LogManager logManager, Map dirtyMap, boolean logProvisionally, boolean allowDeltas)
      throws DatabaseException {

    DatabaseImpl db = target.getDatabase();
    Tree tree = db.getTree();
    boolean targetWasRoot = false;

    if (target.isDbRoot()) {
      /* We're trying to flush the root. */
      target.releaseLatch();
      RootFlusher flusher = new RootFlusher(db, logManager, target);
      tree.withRootLatched(flusher);
      boolean flushed = flusher.getFlushed();

      /*
       * We have to check if the root split between target.releaseLatch
       * and the execution of the root flusher. If it did split, this
       * target has to get handled like a regular node.
       */
      targetWasRoot = flusher.stillRoot();

      /*
       * Update the tree's owner, whether it's the env root or the
       * dbmapping tree.
       */
      if (flushed) {
        DbTree dbTree = db.getDbEnvironment().getDbMapTree();
        dbTree.modifyDbRoot(db);
        nFullINFlushThisRun++;
        nFullINFlush++;
      }
      if (!targetWasRoot) {
        /*
         * re-latch for another attempt, now that this is no longer
         * the root.
         */
        target.latch();
      }
    }

    if (!targetWasRoot) {
      SearchResult result = tree.getParentINForChildIN(target, true);

      /*
       * Found a parent, do the flush. If no parent found, the
       * compressor deleted this item before we got to processing it.
       */
      if (result.exactParentFound) {
        try {
          ChildReference entry = result.parent.getEntry(result.index);
          IN renewedTarget = (IN) entry.fetchTarget(db, result.parent);
          renewedTarget.latch();
          DbLsn newLsn = null;
          try {

            /* Still dirty? */
            if (renewedTarget.getDirty()) {
              if (allowDeltas) {
                newLsn = renewedTarget.logAllowDeltas(logManager, logProvisionally);
                if (newLsn == null) {
                  nDeltaINFlushThisRun++;
                  nDeltaINFlush++;
                }
              } else {
                newLsn = renewedTarget.log(logManager, logProvisionally);
              }
            }
          } finally {
            renewedTarget.releaseLatch();
          }

          /* Update parent if logging occurred */
          if (newLsn != null) {
            nFullINFlushThisRun++;
            nFullINFlush++;
            if (renewedTarget instanceof BIN) {
              nFullBINFlush++;
            }
            result.parent.updateEntry(result.index, newLsn);
            addToDirtyMap(dirtyMap, result.parent);
          }
        } finally {
          result.parent.releaseLatch();
        }
      }
    }
  }
Exemple #18
0
 /**
  * Clear the known deleted flag. Only BINS may do this.
  *
  * @param index indicates target entry
  */
 @Override
 public void clearKnownDeleted(int index) {
   super.clearKnownDeleted(index);
   setDirty(true);
 }
Exemple #19
0
  /*
   * If this search can go further, return the child. If it can't, and you
   * are a possible new parent to this child, return this IN. If the search
   * can't go further and this IN can't be a parent to this child, return
   * null.
   */
  @Override
  protected void descendOnParentSearch(
      SearchResult result,
      boolean targetContainsDuplicates,
      boolean targetIsRoot,
      long targetNodeId,
      Node child,
      boolean requireExactMatch)
      throws DatabaseException {

    if (child.canBeAncestor(targetContainsDuplicates)) {
      if (targetContainsDuplicates && targetIsRoot) {

        /*
         * Don't go further -- the target is a root of a dup tree, so
         * this BIN will have to be the parent.
         */
        long childNid = child.getNodeId();
        ((IN) child).releaseLatch();

        result.keepSearching = false; // stop searching

        if (childNid == targetNodeId) { // set if exact find
          result.exactParentFound = true;
        } else {
          result.exactParentFound = false;
        }

        /*
         * Return a reference to this node unless we need an exact
         * match and this isn't exact.
         */
        if (requireExactMatch && !result.exactParentFound) {
          result.parent = null;
          releaseLatch();
        } else {
          result.parent = this;
        }

      } else {
        /*
         * Go further down into the dup tree.
         */
        releaseLatch();
        result.parent = (IN) child;
      }
    } else {

      /*
       * Our search ends, we didn't find it. If we need an exact match,
       * give up, if we only need a potential match, keep this node
       * latched and return it.
       */
      result.exactParentFound = false;
      result.keepSearching = false;
      if (!requireExactMatch && targetContainsDuplicates) {
        result.parent = this;
      } else {
        releaseLatch();
        result.parent = null;
      }
    }
  }
  /*
   * Scan the INList for all dirty INs. Arrange them in level sorted
   * map for level ordered flushing.
   */
  private SortedMap selectDirtyINs(boolean flushAll, boolean flushExtraLevel)
      throws DatabaseException {

    SortedMap newDirtyMap = new TreeMap();

    INList inMemINs = envImpl.getInMemoryINs();
    inMemINs.latchMajor();

    /*
     * Opportunistically recalculate the environment wide memory
     * count.  Incurs no extra cost because we're walking the IN
     * list anyway.  Not the best in terms of encapsulation as
     * prefereably all memory calculations are done in
     * MemoryBudget, but done this way to avoid any extra
     * latching.
     */
    long totalSize = 0;
    MemoryBudget mb = envImpl.getMemoryBudget();

    try {
      Iterator iter = inMemINs.iterator();
      while (iter.hasNext()) {
        IN in = (IN) iter.next();
        in.latch();
        totalSize = mb.accumulateNewUsage(in, totalSize);
        boolean isDirty = in.getDirty();
        in.releaseLatch();
        if (isDirty) {
          Integer level = new Integer(in.getLevel());
          Set dirtySet;
          if (newDirtyMap.containsKey(level)) {
            dirtySet = (Set) newDirtyMap.get(level);
          } else {
            dirtySet = new HashSet();
            newDirtyMap.put(level, dirtySet);
          }
          dirtySet.add(in);
        }
      }

      // Later release: refresh env count.
      // mb.refreshCacheMemoryUsage(totalSize);

      /*
       * If we're flushing all for cleaning, we must flush to
       * the point that there are no nodes with LSNs in the
       * cleaned files.  We could figure this out by perusing
       * every node to see what children it has, but that's so
       * expensive that instead we'll flush to the root.
       */
      if (newDirtyMap.size() > 0) {
        if (flushAll) {
          highestFlushLevel = envImpl.getDbMapTree().getHighestLevel();
        } else {
          highestFlushLevel = ((Integer) newDirtyMap.lastKey()).intValue();
          if (flushExtraLevel) {
            highestFlushLevel += 1;
          }
        }
      }

    } finally {
      inMemINs.releaseMajorLatchIfHeld();
    }

    return newDirtyMap;
  }
Exemple #21
0
 public long accumulateNewUsage(IN in, long newSize) {
   return in.getInMemorySize() + newSize;
 }
Exemple #22
0
 protected void hook625(Node child) throws DatabaseException {
   ((IN) child).releaseLatch();
   original(child);
 }
Exemple #23
0
  public void testEntryData() throws Throwable {

    try {
      ByteBuffer buffer = ByteBuffer.allocate(1000);
      database = new DatabaseImpl("foo", new DatabaseId(1), env, new DatabaseConfig());

      /*
       * For each loggable object, can we write the entry data out?
       */

      /*
       * Tracer records.
       */
      Tracer dMsg = new Tracer("Hello there");
      writeAndRead(buffer, LogEntryType.LOG_TRACE, dMsg, new Tracer());

      /*
       * LNs
       */
      String data = "abcdef";
      LN ln = new LN(data.getBytes());
      LN lnFromLog = new LN();
      writeAndRead(buffer, LogEntryType.LOG_LN, ln, lnFromLog);
      lnFromLog.verify(null);
      assertTrue(LogEntryType.LOG_LN.marshallOutsideLatch());

      FileSummaryLN fsLN = new FileSummaryLN(new FileSummary());
      FileSummaryLN fsLNFromLog = new FileSummaryLN();
      writeAndRead(buffer, LogEntryType.LOG_FILESUMMARYLN, fsLN, fsLNFromLog);
      assertFalse(LogEntryType.LOG_FILESUMMARYLN.marshallOutsideLatch());

      /*
       * INs
       */
      IN in = new IN(database, new byte[] {1, 0, 1, 0}, 7, 5);
      in.latch();
      in.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      in.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      in.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(35, 400)));

      /* Write it. */
      IN inFromLog = new IN();
      inFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog);
      inFromLog.releaseLatch();
      in.releaseLatch();

      /*
       * IN - long form
       */
      in = new IN(database, new byte[] {1, 0, 1, 0}, 7, 5);
      in.latch();
      in.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      in.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      in.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(1235, 400)));
      in.insertEntry(
          new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(0xFFFFFFF0L, 400)));

      /* Write it. */
      inFromLog = new IN();
      inFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog);
      inFromLog.releaseLatch();
      in.releaseLatch();

      /*
       * BINs
       */
      BIN bin = new BIN(database, new byte[] {3, 2, 1}, 8, 5);
      bin.latch();
      bin.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(212, 200)));
      bin.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(229, 300)));
      bin.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(235, 400)));
      BIN binFromLog = new BIN();
      binFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_BIN, bin, binFromLog);
      binFromLog.verify(null);
      binFromLog.releaseLatch();
      bin.releaseLatch();

      /*
       * DINs
       */
      DIN din =
          new DIN(
              database,
              new byte[] {1, 0, 0, 1},
              7,
              new byte[] {0, 1, 1, 0},
              new ChildReference(null, new byte[] {1, 0, 0, 1}, DbLsn.makeLsn(10, 100)),
              5);
      din.latch();
      din.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      din.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      din.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(35, 400)));

      /* Write it. */
      DIN dinFromLog = new DIN();
      dinFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_DIN, din, dinFromLog);
      din.releaseLatch();
      dinFromLog.releaseLatch();

      /*
       * DBINs
       */
      DBIN dbin = new DBIN(database, new byte[] {3, 2, 1}, 8, new byte[] {1, 2, 3}, 5);
      dbin.latch();
      dbin.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(212, 200)));
      dbin.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(229, 300)));
      dbin.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(235, 400)));
      DBIN dbinFromLog = new DBIN();
      dbinFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_DBIN, dbin, dbinFromLog);
      dbinFromLog.verify(null);
      dbin.releaseLatch();
      dbinFromLog.releaseLatch();

      /*
       * Root
       */
      DbTree dbTree = new DbTree(env);
      DbTree dbTreeFromLog = new DbTree();
      writeAndRead(buffer, LogEntryType.LOG_ROOT, dbTree, dbTreeFromLog);

      /*
       * MapLN
       */
      MapLN mapLn = new MapLN(database);
      MapLN mapLnFromLog = new MapLN();
      writeAndRead(buffer, LogEntryType.LOG_MAPLN, mapLn, mapLnFromLog);

      /*
       * UserTxn
       */

      /*
      * Disabled for now because these txns don't compare equal,
             * because one has a name of "main" and the other has a name of
             * null because it was read from the log.

      Txn txn = new Txn(env, new TransactionConfig());
      Txn txnFromLog = new Txn();
      writeAndRead(buffer, LogEntryType.TXN_COMMIT, txn, txnFromLog);
      txn.commit();
            */

      /*
       * TxnCommit
       */
      TxnCommit commit = new TxnCommit(111, DbLsn.makeLsn(10, 10));
      TxnCommit commitFromLog = new TxnCommit();
      writeAndRead(buffer, LogEntryType.LOG_TXN_COMMIT, commit, commitFromLog);

      /*
       * TxnAbort
       */
      TxnAbort abort = new TxnAbort(111, DbLsn.makeLsn(11, 11));
      TxnAbort abortFromLog = new TxnAbort();
      writeAndRead(buffer, LogEntryType.LOG_TXN_ABORT, abort, abortFromLog);

      /*
       * TxnPrepare
       */
      byte[] gid = new byte[64];
      byte[] bqual = new byte[64];
      TxnPrepare prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, gid, bqual));
      TxnPrepare prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, null, bqual));
      prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, gid, null));
      prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      /*
       * IN delete info
       */
      INDeleteInfo info = new INDeleteInfo(77, new byte[1], new DatabaseId(100));
      INDeleteInfo infoFromLog = new INDeleteInfo();
      writeAndRead(buffer, LogEntryType.LOG_IN_DELETE_INFO, info, infoFromLog);

      /*
       * Checkpoint start
       */
      CheckpointStart start = new CheckpointStart(177, "test");
      CheckpointStart startFromLog = new CheckpointStart();
      writeAndRead(buffer, LogEntryType.LOG_CKPT_START, start, startFromLog);

      /*
       * Checkpoint end
       */
      CheckpointEnd end =
          new CheckpointEnd(
              "test",
              DbLsn.makeLsn(20, 55),
              env.getRootLsn(),
              env.getTxnManager().getFirstActiveLsn(),
              Node.getLastId(),
              env.getDbMapTree().getLastDbId(),
              env.getTxnManager().getLastTxnId(),
              177);
      CheckpointEnd endFromLog = new CheckpointEnd();
      writeAndRead(buffer, LogEntryType.LOG_CKPT_END, end, endFromLog);
    } catch (Throwable t) {
      t.printStackTrace();
      throw t;
    }
  }