/** @return true if the in-memory root was replaced. */
    public IN doWork(ChildReference root) throws DatabaseException {

      if (root == null) {
        return null;
      }
      IN rootIN = (IN) root.fetchTarget(db, null);
      rootIN.latch();
      try {
        if (rootIN.getNodeId() == target.getNodeId()) {

          /*
           * stillRoot handles race condition where root
           * splits after target's latch is release.
           */
          stillRoot = true;
          if (rootIN.getDirty()) {
            DbLsn newLsn = rootIN.log(logManager);
            root.setLsn(newLsn);
            flushed = true;
          }
        }
      } finally {
        rootIN.releaseLatch();
      }
      return null;
    }
  /**
   * Flush the nodes in order, from the lowest level to highest level. As a flush dirties its
   * parent, add it to the dirty map, thereby cascading the writes up the tree. If flushAll wasn't
   * specified, we need only cascade up to the highest level that existed before the checkpointing
   * started.
   *
   * <p>Note that all but the top level INs and the BINDeltas are logged provisionally. That's
   * because we don't need to process lower INs because the higher INs will end up pointing at them.
   */
  private void flushDirtyNodes(boolean flushAll, boolean allowDeltas, boolean flushExtraLevel)
      throws DatabaseException {

    LogManager logManager = envImpl.getLogManager();

    SortedMap dirtyMap = selectDirtyINs(flushAll, flushExtraLevel);

    while (dirtyMap.size() > 0) {

      /* Work on one level's worth of nodes in ascending level order. */
      Integer currentLevel = (Integer) dirtyMap.firstKey();
      boolean logProvisionally = (currentLevel.intValue() != highestFlushLevel);

      Set nodeSet = (Set) dirtyMap.get(currentLevel);
      Iterator iter = nodeSet.iterator();

      /* Flush all those nodes */
      while (iter.hasNext()) {
        IN target = (IN) iter.next();
        target.latch();
        boolean triedToFlush = false;

        /*
         * Only flush the ones that are still dirty -- some
         * may have been written out by the evictor. Also
         * check if the db is still valid -- since INs of
         * deleted databases are left on the in-memory tree
         * until the evictor lazily clears them out, there may
         * be dead INs around.
         */
        if (target.getDirty() && (!target.getDatabase().getIsDeleted())) {
          flushIN(target, logManager, dirtyMap, logProvisionally, allowDeltas);
          triedToFlush = true;
        } else {
          target.releaseLatch();
        }

        Tracer.trace(
            Level.FINE,
            envImpl,
            "Checkpointer: node="
                + target.getNodeId()
                + " level="
                + Integer.toHexString(target.getLevel())
                + " flushed="
                + triedToFlush);
      }

      /* We're done with this level. */
      dirtyMap.remove(currentLevel);

      /* We can stop at this point. */
      if (currentLevel.intValue() == highestFlushLevel) {
        break;
      }
    }
  }
  /** Flush the target IN. */
  private void flushIN(
      IN target, LogManager logManager, Map dirtyMap, boolean logProvisionally, boolean allowDeltas)
      throws DatabaseException {

    DatabaseImpl db = target.getDatabase();
    Tree tree = db.getTree();
    boolean targetWasRoot = false;

    if (target.isDbRoot()) {
      /* We're trying to flush the root. */
      target.releaseLatch();
      RootFlusher flusher = new RootFlusher(db, logManager, target);
      tree.withRootLatched(flusher);
      boolean flushed = flusher.getFlushed();

      /*
       * We have to check if the root split between target.releaseLatch
       * and the execution of the root flusher. If it did split, this
       * target has to get handled like a regular node.
       */
      targetWasRoot = flusher.stillRoot();

      /*
       * Update the tree's owner, whether it's the env root or the
       * dbmapping tree.
       */
      if (flushed) {
        DbTree dbTree = db.getDbEnvironment().getDbMapTree();
        dbTree.modifyDbRoot(db);
        nFullINFlushThisRun++;
        nFullINFlush++;
      }
      if (!targetWasRoot) {
        /*
         * re-latch for another attempt, now that this is no longer
         * the root.
         */
        target.latch();
      }
    }

    if (!targetWasRoot) {
      SearchResult result = tree.getParentINForChildIN(target, true);

      /*
       * Found a parent, do the flush. If no parent found, the
       * compressor deleted this item before we got to processing it.
       */
      if (result.exactParentFound) {
        try {
          ChildReference entry = result.parent.getEntry(result.index);
          IN renewedTarget = (IN) entry.fetchTarget(db, result.parent);
          renewedTarget.latch();
          DbLsn newLsn = null;
          try {

            /* Still dirty? */
            if (renewedTarget.getDirty()) {
              if (allowDeltas) {
                newLsn = renewedTarget.logAllowDeltas(logManager, logProvisionally);
                if (newLsn == null) {
                  nDeltaINFlushThisRun++;
                  nDeltaINFlush++;
                }
              } else {
                newLsn = renewedTarget.log(logManager, logProvisionally);
              }
            }
          } finally {
            renewedTarget.releaseLatch();
          }

          /* Update parent if logging occurred */
          if (newLsn != null) {
            nFullINFlushThisRun++;
            nFullINFlush++;
            if (renewedTarget instanceof BIN) {
              nFullBINFlush++;
            }
            result.parent.updateEntry(result.index, newLsn);
            addToDirtyMap(dirtyMap, result.parent);
          }
        } finally {
          result.parent.releaseLatch();
        }
      }
    }
  }
  /*
   * Scan the INList for all dirty INs. Arrange them in level sorted
   * map for level ordered flushing.
   */
  private SortedMap selectDirtyINs(boolean flushAll, boolean flushExtraLevel)
      throws DatabaseException {

    SortedMap newDirtyMap = new TreeMap();

    INList inMemINs = envImpl.getInMemoryINs();
    inMemINs.latchMajor();

    /*
     * Opportunistically recalculate the environment wide memory
     * count.  Incurs no extra cost because we're walking the IN
     * list anyway.  Not the best in terms of encapsulation as
     * prefereably all memory calculations are done in
     * MemoryBudget, but done this way to avoid any extra
     * latching.
     */
    long totalSize = 0;
    MemoryBudget mb = envImpl.getMemoryBudget();

    try {
      Iterator iter = inMemINs.iterator();
      while (iter.hasNext()) {
        IN in = (IN) iter.next();
        in.latch();
        totalSize = mb.accumulateNewUsage(in, totalSize);
        boolean isDirty = in.getDirty();
        in.releaseLatch();
        if (isDirty) {
          Integer level = new Integer(in.getLevel());
          Set dirtySet;
          if (newDirtyMap.containsKey(level)) {
            dirtySet = (Set) newDirtyMap.get(level);
          } else {
            dirtySet = new HashSet();
            newDirtyMap.put(level, dirtySet);
          }
          dirtySet.add(in);
        }
      }

      // Later release: refresh env count.
      // mb.refreshCacheMemoryUsage(totalSize);

      /*
       * If we're flushing all for cleaning, we must flush to
       * the point that there are no nodes with LSNs in the
       * cleaned files.  We could figure this out by perusing
       * every node to see what children it has, but that's so
       * expensive that instead we'll flush to the root.
       */
      if (newDirtyMap.size() > 0) {
        if (flushAll) {
          highestFlushLevel = envImpl.getDbMapTree().getHighestLevel();
        } else {
          highestFlushLevel = ((Integer) newDirtyMap.lastKey()).intValue();
          if (flushExtraLevel) {
            highestFlushLevel += 1;
          }
        }
      }

    } finally {
      inMemINs.releaseMajorLatchIfHeld();
    }

    return newDirtyMap;
  }