Exemple #1
0
  /** Logs the LN at the given index if it is dirty. */
  private void logDirtyLN(int index, LN ln, boolean force) throws DatabaseException {

    if (ln.isDirty() || force) {
      DatabaseImpl dbImpl = getDatabase();

      /* Only deferred write databases should have dirty LNs. */
      assert dbImpl.isDeferredWriteMode();

      /* Log the LN with the main tree key. */
      byte[] key = containsDuplicates() ? getDupKey() : getKey(index);

      /*
       * No need to lock, this is non-txnal. This should never be part
       * othe replication stream, because this is a deferred write db.
       */
      long lsn =
          ln.log(
              dbImpl.getDbEnvironment(),
              dbImpl,
              key,
              getLsn(index), // obsoleteLsn
              null, // locker
              true, // backgroundIO
              ReplicationContext.NO_REPLICATE);
      updateEntry(index, lsn);
    }
  }
Exemple #2
0
  /**
   * Evict a single LN if allowed. The amount of memory freed is returned and must be subtracted
   * from the memory budget by the caller.
   */
  private long evictInternal(int index, Cleaner cleaner) throws DatabaseException {

    Node n = getTarget(index);

    if (n instanceof LN) {
      LN ln = (LN) n;

      /*
       * Don't evict MapLNs for open databases (LN.isEvictable) [#13415].
       * And don't strip LNs that the cleaner will be migrating
       * (Cleaner.isEvictable).
       */
      if (ln.isEvictable() && cleaner.isEvictable(this, index)) {

        boolean force = getDatabase().isDeferredWriteMode() && getLsn(index) == DbLsn.NULL_LSN;
        /* Log target if necessary. */
        logDirtyLN(index, (LN) n, force);

        /* Clear target. */
        setTarget(index, null);
        ln.releaseMemoryBudget();

        return n.getMemorySizeIncludedByParent();
      }
    }
    return 0;
  }
Exemple #3
0
  /** @see LN#writeToLog */
  public void writeToLog(ByteBuffer logBuffer) {

    /*
     * Add the tracked (live) summary to the base summary before writing it
     * to the log, and reset the tracked summary.  Do this even when
     * deleting the LN, so that the tracked summary is cleared.
     */
    if (trackedSummary != null) {

      baseSummary.add(trackedSummary);

      if (!isDeleted()) {
        getOffsets();
      }

      /* Reset the totals to zero and clear the tracked offsets. */
      trackedSummary.reset();
    }

    super.writeToLog(logBuffer);

    if (!isDeleted()) {
      baseSummary.writeToLog(logBuffer);
      obsoleteOffsets.writeToLog(logBuffer);
    }
  }
Exemple #4
0
  /**
   * Initialize a node that has been faulted in from the log. If this FSLN contains version 1
   * offsets that can be incorrect when RMW was used, and if je.cleaner.rmwFix is enabled, discard
   * the offsets. [#13158]
   */
  public void postFetchInit(DatabaseImpl db, long sourceLsn) throws DatabaseException {

    super.postFetchInit(db, sourceLsn);

    memBudget = db.getDbEnvironment().getMemoryBudget();

    if (entryVersion == 1 && db.getDbEnvironment().getUtilizationProfile().isRMWFixEnabled()) {
      obsoleteOffsets = new PackedOffsets();
    }
  }
Exemple #5
0
  /** @see LN#readFromLog */
  public void readFromLog(ByteBuffer itemBuffer, byte entryVersion) throws LogException {

    this.entryVersion = entryVersion;

    super.readFromLog(itemBuffer, entryVersion);

    if (!isDeleted()) {
      baseSummary.readFromLog(itemBuffer, entryVersion);
      if (entryVersion > 0) {
        obsoleteOffsets.readFromLog(itemBuffer, entryVersion);
      }
    }
  }
Exemple #6
0
  /**
   * Note that the IN may or may not be latched when this method is called. Returning the wrong
   * answer is OK in that case (it will be called again later when latched), but an exception should
   * not occur.
   */
  @Override
  int getChildEvictionType() {

    Cleaner cleaner = getDatabase().getDbEnvironment().getCleaner();

    for (int i = 0; i < getNEntries(); i++) {
      Node node = getTarget(i);
      if (node != null) {
        if (node instanceof LN) {
          LN ln = (LN) node;

          /*
           * If the LN is not evictable, we may neither strip the LN
           * nor evict the node.  isEvictableInexact is used here as
           * a fast check, to avoid the overhead of acquiring a
           * handle lock while selecting an IN for eviction.   See
           * evictInternal which will call LN.isEvictable to acquire
           * an handle lock and guarantee that another thread cannot
           * open the MapLN.  [#13415]
           */
          if (!ln.isEvictableInexact()) {
            return MAY_NOT_EVICT;
          }

          /*
           * If the cleaner allows eviction, then this LN may be
           * stripped.
           */
          if (cleaner.isEvictable(this, i)) {
            return MAY_EVICT_LNS;
          }
        } else {
          return MAY_NOT_EVICT;
        }
      }
    }
    return MAY_EVICT_NODE;
  }
Exemple #7
0
  /**
   * Mark this entry as deleted, using the delete flag. Only BINS may do this.
   *
   * @param index indicates target entry
   */
  @Override
  public void setKnownDeleted(int index) {

    /*
     * The target is cleared to save memory, since a known deleted entry
     * will never be fetched.  The migrate flag is also cleared since
     * migration is never needed for known deleted entries either.
     */
    super.setKnownDeleted(index);

    /*
     * We know it's an LN because we never call setKnownDeleted for
     * an IN.
     */
    LN oldLN = (LN) getTarget(index);
    updateMemorySize(oldLN, null /* newNode */);
    if (oldLN != null) {
      oldLN.releaseMemoryBudget();
    }
    setMigrate(index, false);
    super.setTarget(index, null);
    setDirty(true);
  }
Exemple #8
0
  /**
   * Compress this BIN by removing any entries that are deleted. Deleted entries are those that have
   * LN's marked deleted or if the knownDeleted flag is set. Caller is responsible for latching and
   * unlatching this node.
   *
   * @param binRef is used to determine the set of keys to be checked for deletedness, or is null to
   *     check all keys.
   * @param canFetch if false, don't fetch any non-resident children. We don't want some callers of
   *     compress, such as the evictor, to fault in other nodes.
   * @return true if we had to requeue the entry because we were unable to get locks, false if all
   *     entries were processed and therefore any remaining deleted keys in the BINReference must
   *     now be in some other BIN because of a split.
   */
  @Override
  public boolean compress(
      BINReference binRef, boolean canFetch, LocalUtilizationTracker localTracker)
      throws DatabaseException {

    boolean ret = false;
    boolean setNewIdKey = false;
    boolean anyLocksDenied = false;
    DatabaseImpl db = getDatabase();
    EnvironmentImpl envImpl = db.getDbEnvironment();
    BasicLocker lockingTxn = BasicLocker.createBasicLocker(envImpl);

    try {
      for (int i = 0; i < getNEntries(); i++) {

        /*
         * We have to be able to lock the LN before we can compress the
         * entry.  If we can't, then, skip over it.
         *
         * We must lock the LN even if isKnownDeleted is true, because
         * locks protect the aborts. (Aborts may execute multiple
         * operations, where each operation latches and unlatches. It's
         * the LN lock that protects the integrity of the whole
         * multi-step process.)
         *
         * For example, during abort, there may be cases where we have
         * deleted and then added an LN during the same txn.  This
         * means that to undo/abort it, we first delete the LN (leaving
         * knownDeleted set), and then add it back into the tree.  We
         * want to make sure the entry is in the BIN when we do the
         * insert back in.
         */
        boolean deleteEntry = false;
        Node n = null;

        if (binRef == null
            || isEntryPendingDeleted(i)
            || isEntryKnownDeleted(i)
            || binRef.hasDeletedKey(new Key(getKey(i)))) {

          if (canFetch) {
            if (db.isDeferredWriteMode() && getLsn(i) == DbLsn.NULL_LSN) {
              /* Null LSNs are ok in DW. [#15588] */
              n = getTarget(i);
            } else {
              n = fetchTarget(i);
            }
          } else {
            n = getTarget(i);
            if (n == null) {
              /* Punt, we don't know the state of this child. */
              continue;
            }
          }

          if (n == null) {
            /* Cleaner deleted the log file.  Compress this LN. */
            deleteEntry = true;
          } else if (isEntryKnownDeleted(i)) {
            LockResult lockRet = lockingTxn.nonBlockingLock(n.getNodeId(), LockType.READ, db);
            if (lockRet.getLockGrant() == LockGrantType.DENIED) {
              anyLocksDenied = true;
              continue;
            }

            deleteEntry = true;
          } else {
            if (!n.containsDuplicates()) {
              LN ln = (LN) n;
              LockResult lockRet = lockingTxn.nonBlockingLock(ln.getNodeId(), LockType.READ, db);
              if (lockRet.getLockGrant() == LockGrantType.DENIED) {
                anyLocksDenied = true;
                continue;
              }

              if (ln.isDeleted()) {
                deleteEntry = true;
              }
            }
          }

          /* Remove key from BINReference in case we requeue it. */
          if (binRef != null) {
            binRef.removeDeletedKey(new Key(getKey(i)));
          }
        }

        /* At this point, we know we can delete. */
        if (deleteEntry) {
          boolean entryIsIdentifierKey =
              Key.compareKeys(getKey(i), getIdentifierKey(), getKeyComparator()) == 0;
          if (entryIsIdentifierKey) {

            /*
             * We're about to remove the entry with the idKey so
             * the node will need a new idkey.
             */
            setNewIdKey = true;
          }

          /*
           * When deleting a deferred-write LN entry, we count the
           * last logged LSN as obsolete.
           */
          if (localTracker != null && db.isDeferredWriteMode() && n instanceof LN) {
            LN ln = (LN) n;
            long lsn = getLsn(i);
            if (ln.isDirty() && lsn != DbLsn.NULL_LSN) {
              localTracker.countObsoleteNode(lsn, ln.getLogType(), ln.getLastLoggedSize(), db);
            }
          }

          boolean deleteSuccess = deleteEntry(i, true);
          assert deleteSuccess;

          /*
           * Since we're deleting the current entry, bump the current
           * index back down one.
           */
          i--;
        }
      }
    } finally {
      if (lockingTxn != null) {
        lockingTxn.operationEnd();
      }
    }

    if (anyLocksDenied && binRef != null) {
      db.getDbEnvironment().addToCompressorQueue(binRef, false);
      ret = true;
    }

    if (getNEntries() != 0 && setNewIdKey) {
      setIdentifierKey(getKey(0));
    }

    /* This BIN is empty and expendable. */
    if (getNEntries() == 0) {
      setGeneration(0);
    }

    return ret;
  }
Exemple #9
0
 /** @see LN#readFromLog */
 public void readFromLog(ByteBuffer itemBuffer, byte entryTypeVersion) throws LogException {
   super.readFromLog(itemBuffer, entryTypeVersion);
   databaseImpl.readFromLog(itemBuffer, entryTypeVersion);
   deleted = LogUtils.readBoolean(itemBuffer);
 }
Exemple #10
0
 /** @see LN#writeToLog */
 public void writeToLog(ByteBuffer logBuffer) {
   super.writeToLog(logBuffer);
   databaseImpl.writeToLog(logBuffer);
   LogUtils.writeBoolean(logBuffer, deleted);
 }
Exemple #11
0
 /** Clear out the obsoleteOffsets to save memory when the LN is deleted. */
 @Override
 void makeDeleted() {
   super.makeDeleted();
   obsoleteOffsets = new PackedOffsets();
 }
Exemple #12
0
  public void testEntryData() throws Throwable {

    try {
      ByteBuffer buffer = ByteBuffer.allocate(1000);
      database = new DatabaseImpl("foo", new DatabaseId(1), env, new DatabaseConfig());

      /*
       * For each loggable object, can we write the entry data out?
       */

      /*
       * Tracer records.
       */
      Tracer dMsg = new Tracer("Hello there");
      writeAndRead(buffer, LogEntryType.LOG_TRACE, dMsg, new Tracer());

      /*
       * LNs
       */
      String data = "abcdef";
      LN ln = new LN(data.getBytes());
      LN lnFromLog = new LN();
      writeAndRead(buffer, LogEntryType.LOG_LN, ln, lnFromLog);
      lnFromLog.verify(null);
      assertTrue(LogEntryType.LOG_LN.marshallOutsideLatch());

      FileSummaryLN fsLN = new FileSummaryLN(new FileSummary());
      FileSummaryLN fsLNFromLog = new FileSummaryLN();
      writeAndRead(buffer, LogEntryType.LOG_FILESUMMARYLN, fsLN, fsLNFromLog);
      assertFalse(LogEntryType.LOG_FILESUMMARYLN.marshallOutsideLatch());

      /*
       * INs
       */
      IN in = new IN(database, new byte[] {1, 0, 1, 0}, 7, 5);
      in.latch();
      in.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      in.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      in.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(35, 400)));

      /* Write it. */
      IN inFromLog = new IN();
      inFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog);
      inFromLog.releaseLatch();
      in.releaseLatch();

      /*
       * IN - long form
       */
      in = new IN(database, new byte[] {1, 0, 1, 0}, 7, 5);
      in.latch();
      in.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      in.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      in.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(1235, 400)));
      in.insertEntry(
          new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(0xFFFFFFF0L, 400)));

      /* Write it. */
      inFromLog = new IN();
      inFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog);
      inFromLog.releaseLatch();
      in.releaseLatch();

      /*
       * BINs
       */
      BIN bin = new BIN(database, new byte[] {3, 2, 1}, 8, 5);
      bin.latch();
      bin.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(212, 200)));
      bin.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(229, 300)));
      bin.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(235, 400)));
      BIN binFromLog = new BIN();
      binFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_BIN, bin, binFromLog);
      binFromLog.verify(null);
      binFromLog.releaseLatch();
      bin.releaseLatch();

      /*
       * DINs
       */
      DIN din =
          new DIN(
              database,
              new byte[] {1, 0, 0, 1},
              7,
              new byte[] {0, 1, 1, 0},
              new ChildReference(null, new byte[] {1, 0, 0, 1}, DbLsn.makeLsn(10, 100)),
              5);
      din.latch();
      din.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      din.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      din.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(35, 400)));

      /* Write it. */
      DIN dinFromLog = new DIN();
      dinFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_DIN, din, dinFromLog);
      din.releaseLatch();
      dinFromLog.releaseLatch();

      /*
       * DBINs
       */
      DBIN dbin = new DBIN(database, new byte[] {3, 2, 1}, 8, new byte[] {1, 2, 3}, 5);
      dbin.latch();
      dbin.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(212, 200)));
      dbin.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(229, 300)));
      dbin.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(235, 400)));
      DBIN dbinFromLog = new DBIN();
      dbinFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_DBIN, dbin, dbinFromLog);
      dbinFromLog.verify(null);
      dbin.releaseLatch();
      dbinFromLog.releaseLatch();

      /*
       * Root
       */
      DbTree dbTree = new DbTree(env);
      DbTree dbTreeFromLog = new DbTree();
      writeAndRead(buffer, LogEntryType.LOG_ROOT, dbTree, dbTreeFromLog);

      /*
       * MapLN
       */
      MapLN mapLn = new MapLN(database);
      MapLN mapLnFromLog = new MapLN();
      writeAndRead(buffer, LogEntryType.LOG_MAPLN, mapLn, mapLnFromLog);

      /*
       * UserTxn
       */

      /*
      * Disabled for now because these txns don't compare equal,
             * because one has a name of "main" and the other has a name of
             * null because it was read from the log.

      Txn txn = new Txn(env, new TransactionConfig());
      Txn txnFromLog = new Txn();
      writeAndRead(buffer, LogEntryType.TXN_COMMIT, txn, txnFromLog);
      txn.commit();
            */

      /*
       * TxnCommit
       */
      TxnCommit commit = new TxnCommit(111, DbLsn.makeLsn(10, 10));
      TxnCommit commitFromLog = new TxnCommit();
      writeAndRead(buffer, LogEntryType.LOG_TXN_COMMIT, commit, commitFromLog);

      /*
       * TxnAbort
       */
      TxnAbort abort = new TxnAbort(111, DbLsn.makeLsn(11, 11));
      TxnAbort abortFromLog = new TxnAbort();
      writeAndRead(buffer, LogEntryType.LOG_TXN_ABORT, abort, abortFromLog);

      /*
       * TxnPrepare
       */
      byte[] gid = new byte[64];
      byte[] bqual = new byte[64];
      TxnPrepare prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, gid, bqual));
      TxnPrepare prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, null, bqual));
      prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, gid, null));
      prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      /*
       * IN delete info
       */
      INDeleteInfo info = new INDeleteInfo(77, new byte[1], new DatabaseId(100));
      INDeleteInfo infoFromLog = new INDeleteInfo();
      writeAndRead(buffer, LogEntryType.LOG_IN_DELETE_INFO, info, infoFromLog);

      /*
       * Checkpoint start
       */
      CheckpointStart start = new CheckpointStart(177, "test");
      CheckpointStart startFromLog = new CheckpointStart();
      writeAndRead(buffer, LogEntryType.LOG_CKPT_START, start, startFromLog);

      /*
       * Checkpoint end
       */
      CheckpointEnd end =
          new CheckpointEnd(
              "test",
              DbLsn.makeLsn(20, 55),
              env.getRootLsn(),
              env.getTxnManager().getFirstActiveLsn(),
              Node.getLastId(),
              env.getDbMapTree().getLastDbId(),
              env.getTxnManager().getLastTxnId(),
              177);
      CheckpointEnd endFromLog = new CheckpointEnd();
      writeAndRead(buffer, LogEntryType.LOG_CKPT_END, end, endFromLog);
    } catch (Throwable t) {
      t.printStackTrace();
      throw t;
    }
  }