/** Sync all in-memory map entries to backing disk store. */
  public synchronized void sync() {
    String dbName = null;
    // Sync. memory and disk.
    useStatsSyncUsed.incrementAndGet();
    long startTime = 0;
    if (logger.isLoggable(Level.FINE)) {
      dbName = getDatabaseName();
      startTime = System.currentTimeMillis();
      logger.fine(
          dbName + " start sizes: disk " + this.diskMap.size() + ", mem " + this.memMap.size());
    }

    for (String key : this.memMap.keySet()) {
      SoftEntry<V> entry = memMap.get(key);
      if (entry != null) {
        // Get & hold so not cleared pre-return.
        V value = entry.get();
        if (value != null) {
          expungeStatsDiskPut.incrementAndGet();
          this.diskMap.put(key, value); // unchecked cast
        }
      }
    }
    pageOutStaleEntries();

    // force sync of deferred-writes
    try {
      this.db.sync();
    } catch (DatabaseException e) {
      throw new RuntimeException(e);
    }

    if (logger.isLoggable(Level.FINE)) {
      logger.fine(
          dbName
              + " sync took "
              + (System.currentTimeMillis() - startTime)
              + "ms. "
              + "Finish sizes: disk "
              + this.diskMap.size()
              + ", mem "
              + this.memMap.size());
    }
  }
  /* (non-Javadoc)
   * @see org.archive.util.ObjectIdentityCache#get(java.lang.String, org.archive.util.ObjectIdentityBdbCache)
   */
  public V getOrUse(final String key, Supplier<V> supplierOrNull) {
    countOfGets.incrementAndGet();

    if (countOfGets.get() % 10000 == 0) {
      logCacheSummary();
    }

    // check mem cache
    SoftEntry<V> entry = memMap.get(key);
    if (entry != null) {
      V val = entry.get();
      if (val != null) {
        // the concurrent garden path: in mem, valid
        cacheHit.incrementAndGet();
        return val;
      }
    }

    // everything in other difficult cases happens inside this block
    synchronized (this) {
      // recheck mem cache -- if another thread beat us into sync
      // block and already filled the key
      entry = memMap.get(key);
      if (entry != null) {
        V val = entry.get();
        if (val != null) {
          cacheHit.incrementAndGet();
          return val;
        }
      }
      // persist to disk all ref-enqueued stale (soft-ref-cleared) entries now
      pageOutStaleEntries();
      // and catch if this exact entry not yet ref-enqueued
      if (memMap.get(key) != null) {
        pageOutStaleEntry(entry);
        if (memMap.get(key) != null) {
          logger.log(Level.SEVERE, "nulled key " + key + " not paged-out", new Exception());
        }
      }

      // check disk
      V valDisk = (V) diskMap.get(key);
      if (valDisk == null) {
        // never yet created, consider creating
        if (supplierOrNull == null) {
          return null;
        }
        // create using provided Supplier
        valDisk = supplierOrNull.get();
        supplierUsed.incrementAndGet();
        // putting initial value directly into diskMap
        // (rather than just the memMap until page-out)
        // ensures diskMap.keySet() provides complete view
        V prevVal = diskMap.putIfAbsent(key, valDisk);
        count.incrementAndGet();
        if (prevVal != null) {
          // ERROR: diskMap modification since previous
          // diskMap.get() should be impossible
          logger.log(Level.SEVERE, "diskMap modified outside synchronized block?");
        }
      } else {
        diskHit.incrementAndGet();
      }

      // keep new val in memMap
      SoftEntry<V> newEntry = new SoftEntry<V>(key, valDisk, refQueue);
      SoftEntry<V> prevVal = memMap.putIfAbsent(key, newEntry);
      if (prevVal != null) {
        // ERROR: memMap modification since previous
        // memMap.get() should be impossible
        logger.log(Level.SEVERE, "memMap modified outside synchronized block?", new Exception());
      }
      return valDisk;
    }
  }