/**
   * Changes all keys to "prefix keys" in the given IN. Called after reading an IN from disk via
   * IN.postFetchInit.
   *
   * <p>The conversion of IN keys is invoked from the IN class when an IN is fetched, rather than
   * invoked from the DupConvert class directly, for performance and simplicity. If it were invoked
   * from the DupConvert class, we would have to iterate over all INs in a separate initial pass.
   * This is both more time consuming, and more complex to implement properly so that eviction is
   * possible. Instead, conversion occurs when an old format IN is loaded.
   *
   * <p>Enter/leave with 'in' unlatched.
   */
  public static void convertInKeys(final DatabaseImpl dbImpl, final IN in) {

    /* Nothing to convert for non-duplicates DB. */
    if (!dbImpl.getSortedDuplicates()) {
      return;
    }

    /* DIN/DBIN do not need conversion either. */
    if (in instanceof DIN || in instanceof DBIN) {
      return;
    }

    in.latch();
    try {
      for (int i = 0; i < in.getNEntries(); i += 1) {
        byte[] oldKey = in.getKey(i);
        byte[] newKey = DupKeyData.makePrefixKey(oldKey, 0, oldKey.length);
        in.updateEntry(i, in.getTarget(i), in.getLsn(i), newKey);
      }

      byte[] oldKey = in.getIdentifierKey();
      byte[] newKey = DupKeyData.makePrefixKey(oldKey, 0, oldKey.length);
      in.setIdentifierKey(newKey);

      assert in.verifyMemorySize();
    } finally {
      in.releaseLatch();
    }
  }
    /** @return true if the in-memory root was replaced. */
    public IN doWork(ChildReference root) throws DatabaseException {

      if (root == null) {
        return null;
      }
      IN rootIN = (IN) root.fetchTarget(db, null);
      rootIN.latch();
      try {
        if (rootIN.getNodeId() == target.getNodeId()) {

          /*
           * stillRoot handles race condition where root
           * splits after target's latch is release.
           */
          stillRoot = true;
          if (rootIN.getDirty()) {
            DbLsn newLsn = rootIN.log(logManager);
            root.setLsn(newLsn);
            flushed = true;
          }
        }
      } finally {
        rootIN.releaseLatch();
      }
      return null;
    }
  /**
   * Flush the nodes in order, from the lowest level to highest level. As a flush dirties its
   * parent, add it to the dirty map, thereby cascading the writes up the tree. If flushAll wasn't
   * specified, we need only cascade up to the highest level that existed before the checkpointing
   * started.
   *
   * <p>Note that all but the top level INs and the BINDeltas are logged provisionally. That's
   * because we don't need to process lower INs because the higher INs will end up pointing at them.
   */
  private void flushDirtyNodes(boolean flushAll, boolean allowDeltas, boolean flushExtraLevel)
      throws DatabaseException {

    LogManager logManager = envImpl.getLogManager();

    SortedMap dirtyMap = selectDirtyINs(flushAll, flushExtraLevel);

    while (dirtyMap.size() > 0) {

      /* Work on one level's worth of nodes in ascending level order. */
      Integer currentLevel = (Integer) dirtyMap.firstKey();
      boolean logProvisionally = (currentLevel.intValue() != highestFlushLevel);

      Set nodeSet = (Set) dirtyMap.get(currentLevel);
      Iterator iter = nodeSet.iterator();

      /* Flush all those nodes */
      while (iter.hasNext()) {
        IN target = (IN) iter.next();
        target.latch();
        boolean triedToFlush = false;

        /*
         * Only flush the ones that are still dirty -- some
         * may have been written out by the evictor. Also
         * check if the db is still valid -- since INs of
         * deleted databases are left on the in-memory tree
         * until the evictor lazily clears them out, there may
         * be dead INs around.
         */
        if (target.getDirty() && (!target.getDatabase().getIsDeleted())) {
          flushIN(target, logManager, dirtyMap, logProvisionally, allowDeltas);
          triedToFlush = true;
        } else {
          target.releaseLatch();
        }

        Tracer.trace(
            Level.FINE,
            envImpl,
            "Checkpointer: node="
                + target.getNodeId()
                + " level="
                + Integer.toHexString(target.getLevel())
                + " flushed="
                + triedToFlush);
      }

      /* We're done with this level. */
      dirtyMap.remove(currentLevel);

      /* We can stop at this point. */
      if (currentLevel.intValue() == highestFlushLevel) {
        break;
      }
    }
  }
示例#4
0
文件: IN.java 项目: mtiede/FeatureIDE
 protected void hook622() throws DatabaseException {
   releaseLatch();
   original();
 }
示例#5
0
文件: IN.java 项目: mtiede/FeatureIDE
 protected void hook625(Node child) throws DatabaseException {
   ((IN) child).releaseLatch();
   original(child);
 }
示例#6
0
  /*
   * If this search can go further, return the child. If it can't, and you
   * are a possible new parent to this child, return this IN. If the search
   * can't go further and this IN can't be a parent to this child, return
   * null.
   */
  @Override
  protected void descendOnParentSearch(
      SearchResult result,
      boolean targetContainsDuplicates,
      boolean targetIsRoot,
      long targetNodeId,
      Node child,
      boolean requireExactMatch)
      throws DatabaseException {

    if (child.canBeAncestor(targetContainsDuplicates)) {
      if (targetContainsDuplicates && targetIsRoot) {

        /*
         * Don't go further -- the target is a root of a dup tree, so
         * this BIN will have to be the parent.
         */
        long childNid = child.getNodeId();
        ((IN) child).releaseLatch();

        result.keepSearching = false; // stop searching

        if (childNid == targetNodeId) { // set if exact find
          result.exactParentFound = true;
        } else {
          result.exactParentFound = false;
        }

        /*
         * Return a reference to this node unless we need an exact
         * match and this isn't exact.
         */
        if (requireExactMatch && !result.exactParentFound) {
          result.parent = null;
          releaseLatch();
        } else {
          result.parent = this;
        }

      } else {
        /*
         * Go further down into the dup tree.
         */
        releaseLatch();
        result.parent = (IN) child;
      }
    } else {

      /*
       * Our search ends, we didn't find it. If we need an exact match,
       * give up, if we only need a potential match, keep this node
       * latched and return it.
       */
      result.exactParentFound = false;
      result.keepSearching = false;
      if (!requireExactMatch && targetContainsDuplicates) {
        result.parent = this;
      } else {
        releaseLatch();
        result.parent = null;
      }
    }
  }
  /** Flush the target IN. */
  private void flushIN(
      IN target, LogManager logManager, Map dirtyMap, boolean logProvisionally, boolean allowDeltas)
      throws DatabaseException {

    DatabaseImpl db = target.getDatabase();
    Tree tree = db.getTree();
    boolean targetWasRoot = false;

    if (target.isDbRoot()) {
      /* We're trying to flush the root. */
      target.releaseLatch();
      RootFlusher flusher = new RootFlusher(db, logManager, target);
      tree.withRootLatched(flusher);
      boolean flushed = flusher.getFlushed();

      /*
       * We have to check if the root split between target.releaseLatch
       * and the execution of the root flusher. If it did split, this
       * target has to get handled like a regular node.
       */
      targetWasRoot = flusher.stillRoot();

      /*
       * Update the tree's owner, whether it's the env root or the
       * dbmapping tree.
       */
      if (flushed) {
        DbTree dbTree = db.getDbEnvironment().getDbMapTree();
        dbTree.modifyDbRoot(db);
        nFullINFlushThisRun++;
        nFullINFlush++;
      }
      if (!targetWasRoot) {
        /*
         * re-latch for another attempt, now that this is no longer
         * the root.
         */
        target.latch();
      }
    }

    if (!targetWasRoot) {
      SearchResult result = tree.getParentINForChildIN(target, true);

      /*
       * Found a parent, do the flush. If no parent found, the
       * compressor deleted this item before we got to processing it.
       */
      if (result.exactParentFound) {
        try {
          ChildReference entry = result.parent.getEntry(result.index);
          IN renewedTarget = (IN) entry.fetchTarget(db, result.parent);
          renewedTarget.latch();
          DbLsn newLsn = null;
          try {

            /* Still dirty? */
            if (renewedTarget.getDirty()) {
              if (allowDeltas) {
                newLsn = renewedTarget.logAllowDeltas(logManager, logProvisionally);
                if (newLsn == null) {
                  nDeltaINFlushThisRun++;
                  nDeltaINFlush++;
                }
              } else {
                newLsn = renewedTarget.log(logManager, logProvisionally);
              }
            }
          } finally {
            renewedTarget.releaseLatch();
          }

          /* Update parent if logging occurred */
          if (newLsn != null) {
            nFullINFlushThisRun++;
            nFullINFlush++;
            if (renewedTarget instanceof BIN) {
              nFullBINFlush++;
            }
            result.parent.updateEntry(result.index, newLsn);
            addToDirtyMap(dirtyMap, result.parent);
          }
        } finally {
          result.parent.releaseLatch();
        }
      }
    }
  }
  /*
   * Scan the INList for all dirty INs. Arrange them in level sorted
   * map for level ordered flushing.
   */
  private SortedMap selectDirtyINs(boolean flushAll, boolean flushExtraLevel)
      throws DatabaseException {

    SortedMap newDirtyMap = new TreeMap();

    INList inMemINs = envImpl.getInMemoryINs();
    inMemINs.latchMajor();

    /*
     * Opportunistically recalculate the environment wide memory
     * count.  Incurs no extra cost because we're walking the IN
     * list anyway.  Not the best in terms of encapsulation as
     * prefereably all memory calculations are done in
     * MemoryBudget, but done this way to avoid any extra
     * latching.
     */
    long totalSize = 0;
    MemoryBudget mb = envImpl.getMemoryBudget();

    try {
      Iterator iter = inMemINs.iterator();
      while (iter.hasNext()) {
        IN in = (IN) iter.next();
        in.latch();
        totalSize = mb.accumulateNewUsage(in, totalSize);
        boolean isDirty = in.getDirty();
        in.releaseLatch();
        if (isDirty) {
          Integer level = new Integer(in.getLevel());
          Set dirtySet;
          if (newDirtyMap.containsKey(level)) {
            dirtySet = (Set) newDirtyMap.get(level);
          } else {
            dirtySet = new HashSet();
            newDirtyMap.put(level, dirtySet);
          }
          dirtySet.add(in);
        }
      }

      // Later release: refresh env count.
      // mb.refreshCacheMemoryUsage(totalSize);

      /*
       * If we're flushing all for cleaning, we must flush to
       * the point that there are no nodes with LSNs in the
       * cleaned files.  We could figure this out by perusing
       * every node to see what children it has, but that's so
       * expensive that instead we'll flush to the root.
       */
      if (newDirtyMap.size() > 0) {
        if (flushAll) {
          highestFlushLevel = envImpl.getDbMapTree().getHighestLevel();
        } else {
          highestFlushLevel = ((Integer) newDirtyMap.lastKey()).intValue();
          if (flushExtraLevel) {
            highestFlushLevel += 1;
          }
        }
      }

    } finally {
      inMemINs.releaseMajorLatchIfHeld();
    }

    return newDirtyMap;
  }
示例#9
0
  public void testEntryData() throws Throwable {

    try {
      ByteBuffer buffer = ByteBuffer.allocate(1000);
      database = new DatabaseImpl("foo", new DatabaseId(1), env, new DatabaseConfig());

      /*
       * For each loggable object, can we write the entry data out?
       */

      /*
       * Tracer records.
       */
      Tracer dMsg = new Tracer("Hello there");
      writeAndRead(buffer, LogEntryType.LOG_TRACE, dMsg, new Tracer());

      /*
       * LNs
       */
      String data = "abcdef";
      LN ln = new LN(data.getBytes());
      LN lnFromLog = new LN();
      writeAndRead(buffer, LogEntryType.LOG_LN, ln, lnFromLog);
      lnFromLog.verify(null);
      assertTrue(LogEntryType.LOG_LN.marshallOutsideLatch());

      FileSummaryLN fsLN = new FileSummaryLN(new FileSummary());
      FileSummaryLN fsLNFromLog = new FileSummaryLN();
      writeAndRead(buffer, LogEntryType.LOG_FILESUMMARYLN, fsLN, fsLNFromLog);
      assertFalse(LogEntryType.LOG_FILESUMMARYLN.marshallOutsideLatch());

      /*
       * INs
       */
      IN in = new IN(database, new byte[] {1, 0, 1, 0}, 7, 5);
      in.latch();
      in.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      in.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      in.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(35, 400)));

      /* Write it. */
      IN inFromLog = new IN();
      inFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog);
      inFromLog.releaseLatch();
      in.releaseLatch();

      /*
       * IN - long form
       */
      in = new IN(database, new byte[] {1, 0, 1, 0}, 7, 5);
      in.latch();
      in.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      in.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      in.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(1235, 400)));
      in.insertEntry(
          new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(0xFFFFFFF0L, 400)));

      /* Write it. */
      inFromLog = new IN();
      inFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_IN, in, inFromLog);
      inFromLog.releaseLatch();
      in.releaseLatch();

      /*
       * BINs
       */
      BIN bin = new BIN(database, new byte[] {3, 2, 1}, 8, 5);
      bin.latch();
      bin.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(212, 200)));
      bin.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(229, 300)));
      bin.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(235, 400)));
      BIN binFromLog = new BIN();
      binFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_BIN, bin, binFromLog);
      binFromLog.verify(null);
      binFromLog.releaseLatch();
      bin.releaseLatch();

      /*
       * DINs
       */
      DIN din =
          new DIN(
              database,
              new byte[] {1, 0, 0, 1},
              7,
              new byte[] {0, 1, 1, 0},
              new ChildReference(null, new byte[] {1, 0, 0, 1}, DbLsn.makeLsn(10, 100)),
              5);
      din.latch();
      din.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(12, 200)));
      din.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(29, 300)));
      din.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(35, 400)));

      /* Write it. */
      DIN dinFromLog = new DIN();
      dinFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_DIN, din, dinFromLog);
      din.releaseLatch();
      dinFromLog.releaseLatch();

      /*
       * DBINs
       */
      DBIN dbin = new DBIN(database, new byte[] {3, 2, 1}, 8, new byte[] {1, 2, 3}, 5);
      dbin.latch();
      dbin.insertEntry(new ChildReference(null, new byte[] {1, 0, 1, 0}, DbLsn.makeLsn(212, 200)));
      dbin.insertEntry(new ChildReference(null, new byte[] {1, 1, 1, 0}, DbLsn.makeLsn(229, 300)));
      dbin.insertEntry(new ChildReference(null, new byte[] {0, 0, 1, 0}, DbLsn.makeLsn(235, 400)));
      DBIN dbinFromLog = new DBIN();
      dbinFromLog.latch();
      writeAndRead(buffer, LogEntryType.LOG_DBIN, dbin, dbinFromLog);
      dbinFromLog.verify(null);
      dbin.releaseLatch();
      dbinFromLog.releaseLatch();

      /*
       * Root
       */
      DbTree dbTree = new DbTree(env);
      DbTree dbTreeFromLog = new DbTree();
      writeAndRead(buffer, LogEntryType.LOG_ROOT, dbTree, dbTreeFromLog);

      /*
       * MapLN
       */
      MapLN mapLn = new MapLN(database);
      MapLN mapLnFromLog = new MapLN();
      writeAndRead(buffer, LogEntryType.LOG_MAPLN, mapLn, mapLnFromLog);

      /*
       * UserTxn
       */

      /*
      * Disabled for now because these txns don't compare equal,
             * because one has a name of "main" and the other has a name of
             * null because it was read from the log.

      Txn txn = new Txn(env, new TransactionConfig());
      Txn txnFromLog = new Txn();
      writeAndRead(buffer, LogEntryType.TXN_COMMIT, txn, txnFromLog);
      txn.commit();
            */

      /*
       * TxnCommit
       */
      TxnCommit commit = new TxnCommit(111, DbLsn.makeLsn(10, 10));
      TxnCommit commitFromLog = new TxnCommit();
      writeAndRead(buffer, LogEntryType.LOG_TXN_COMMIT, commit, commitFromLog);

      /*
       * TxnAbort
       */
      TxnAbort abort = new TxnAbort(111, DbLsn.makeLsn(11, 11));
      TxnAbort abortFromLog = new TxnAbort();
      writeAndRead(buffer, LogEntryType.LOG_TXN_ABORT, abort, abortFromLog);

      /*
       * TxnPrepare
       */
      byte[] gid = new byte[64];
      byte[] bqual = new byte[64];
      TxnPrepare prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, gid, bqual));
      TxnPrepare prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, null, bqual));
      prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      prepare = new TxnPrepare(111, new LogUtils.XidImpl(1, gid, null));
      prepareFromLog = new TxnPrepare();
      writeAndRead(buffer, LogEntryType.LOG_TXN_PREPARE, prepare, prepareFromLog);

      /*
       * IN delete info
       */
      INDeleteInfo info = new INDeleteInfo(77, new byte[1], new DatabaseId(100));
      INDeleteInfo infoFromLog = new INDeleteInfo();
      writeAndRead(buffer, LogEntryType.LOG_IN_DELETE_INFO, info, infoFromLog);

      /*
       * Checkpoint start
       */
      CheckpointStart start = new CheckpointStart(177, "test");
      CheckpointStart startFromLog = new CheckpointStart();
      writeAndRead(buffer, LogEntryType.LOG_CKPT_START, start, startFromLog);

      /*
       * Checkpoint end
       */
      CheckpointEnd end =
          new CheckpointEnd(
              "test",
              DbLsn.makeLsn(20, 55),
              env.getRootLsn(),
              env.getTxnManager().getFirstActiveLsn(),
              Node.getLastId(),
              env.getDbMapTree().getLastDbId(),
              env.getTxnManager().getLastTxnId(),
              177);
      CheckpointEnd endFromLog = new CheckpointEnd();
      writeAndRead(buffer, LogEntryType.LOG_CKPT_END, end, endFromLog);
    } catch (Throwable t) {
      t.printStackTrace();
      throw t;
    }
  }