예제 #1
0
  /**
   * readNextEntry will stop at a bad entry.
   *
   * @return true if an element has been read.
   */
  public boolean readNextEntry() throws DatabaseException, IOException {

    boolean foundEntry = false;

    try {

      /*
       * At this point,
       *  currentEntryOffset is the entry we just read.
       *  nextEntryOffset is the entry we're about to read.
       *  currentEntryPrevOffset is 2 entries ago.
       * Note that readNextEntry() moves all the offset pointers up.
       */

      foundEntry = super.readNextEntry();

      /*
       * Note that initStartingPosition() makes sure that the file header
       * entry is valid.  So by the time we get to this method, we know
       * we're at a file with a valid file header entry.
       */
      lastValidOffset = currentEntryOffset;
      nextUnprovenOffset = nextEntryOffset;
    } catch (DbChecksumException e) {
      Tracer.trace(
          Level.INFO,
          envImpl,
          "Found checksum exception while searching "
              + " for end of log. Last valid entry is at "
              + DbLsn.toString(DbLsn.makeLsn(readBufferFileNum, lastValidOffset))
              + " Bad entry is at "
              + DbLsn.makeLsn(readBufferFileNum, nextUnprovenOffset));
    }
    return foundEntry;
  }
예제 #2
0
  /**
   * Flush the nodes in order, from the lowest level to highest level. As a flush dirties its
   * parent, add it to the dirty map, thereby cascading the writes up the tree. If flushAll wasn't
   * specified, we need only cascade up to the highest level that existed before the checkpointing
   * started.
   *
   * <p>Note that all but the top level INs and the BINDeltas are logged provisionally. That's
   * because we don't need to process lower INs because the higher INs will end up pointing at them.
   */
  private void flushDirtyNodes(boolean flushAll, boolean allowDeltas, boolean flushExtraLevel)
      throws DatabaseException {

    LogManager logManager = envImpl.getLogManager();

    SortedMap dirtyMap = selectDirtyINs(flushAll, flushExtraLevel);

    while (dirtyMap.size() > 0) {

      /* Work on one level's worth of nodes in ascending level order. */
      Integer currentLevel = (Integer) dirtyMap.firstKey();
      boolean logProvisionally = (currentLevel.intValue() != highestFlushLevel);

      Set nodeSet = (Set) dirtyMap.get(currentLevel);
      Iterator iter = nodeSet.iterator();

      /* Flush all those nodes */
      while (iter.hasNext()) {
        IN target = (IN) iter.next();
        target.latch();
        boolean triedToFlush = false;

        /*
         * Only flush the ones that are still dirty -- some
         * may have been written out by the evictor. Also
         * check if the db is still valid -- since INs of
         * deleted databases are left on the in-memory tree
         * until the evictor lazily clears them out, there may
         * be dead INs around.
         */
        if (target.getDirty() && (!target.getDatabase().getIsDeleted())) {
          flushIN(target, logManager, dirtyMap, logProvisionally, allowDeltas);
          triedToFlush = true;
        } else {
          target.releaseLatch();
        }

        Tracer.trace(
            Level.FINE,
            envImpl,
            "Checkpointer: node="
                + target.getNodeId()
                + " level="
                + Integer.toHexString(target.getLevel())
                + " flushed="
                + triedToFlush);
      }

      /* We're done with this level. */
      dirtyMap.remove(currentLevel);

      /* We can stop at this point. */
      if (currentLevel.intValue() == highestFlushLevel) {
        break;
      }
    }
  }
예제 #3
0
 private void trace(EnvironmentImpl envImpl, String invokingSource, boolean success) {
   StringBuffer sb = new StringBuffer();
   sb.append("Checkpoint ").append(checkpointId);
   sb.append(": source=").append(invokingSource);
   sb.append(" success=").append(success);
   sb.append(" nFullINFlushThisRun=").append(nFullINFlushThisRun);
   sb.append(" nDeltaINFlushThisRun=").append(nDeltaINFlushThisRun);
   Tracer.trace(Level.INFO, envImpl, sb.toString());
 }
예제 #4
0
파일: Database.java 프로젝트: ppires/CIDE
  /**
   * @deprecated It has not been possible to implement this method with correct transactional
   *     semantics without incurring a performance penalty on all Database operations. Truncate
   *     functionality has been moved to Environment.truncateDatabase(), which requires that all
   *     Database handles on the database are closed before the truncate operation can execute.
   */
  public int truncate(Transaction txn, boolean countRecords) throws DatabaseException {

    checkEnv();
    checkRequiredDbState(OPEN, "Can't call Database.truncate");
    checkWritable("truncate");
    Tracer.trace(
        Level.FINEST,
        envHandle.getEnvironmentImpl(),
        "Database.truncate" + ": txnId=" + ((txn == null) ? "null" : Long.toString(txn.getId())));

    Locker locker = null;
    boolean triggerLock = false;
    boolean operationOk = false;

    try {
      locker =
          LockerFactory.getWritableLocker(
              envHandle, txn, isTransactional(), true /*retainLocks*/, null);

      /*
       * Pass true to always get a read lock on the triggers, so we are
       * sure that no secondaries are added during truncation.
       */
      acquireTriggerListReadLock();
      triggerLock = true;

      /* Truncate primary. */
      int count = truncateInternal(locker, countRecords);

      /* Truncate secondaries. */
      for (int i = 0; i < triggerList.size(); i += 1) {
        Object obj = triggerList.get(i);
        if (obj instanceof SecondaryTrigger) {
          SecondaryDatabase secDb = ((SecondaryTrigger) obj).getDb();
          secDb.truncateInternal(locker, false);
        }
      }

      operationOk = true;
      return count;
    } finally {
      if (locker != null) {
        locker.operationEnd(operationOk);
      }
      if (triggerLock) {
        releaseTriggerListReadLock();
      }
    }
  }
예제 #5
0
  /** Helper for doCheckpoint. Same args only this is called with the evictor locked out. */
  private synchronized void doCheckpointInternal(
      CheckpointConfig config,
      boolean allowDeltas,
      boolean flushAll,
      boolean deleteAllCleanedFiles,
      String invokingSource)
      throws DatabaseException {

    if (!isRunnable(config)) {
      return;
    }

    /*
     * If there are cleaned files to be deleted, flush an extra level to
     * write out the parents of cleaned nodes.  This ensures that the
     * no node will contain the LSN of a cleaned files.
     */
    boolean flushExtraLevel = false;
    Set cleanedFiles = null;
    Cleaner cleaner = envImpl.getCleaner();
    if (cleaner != null) {
      cleanedFiles = cleaner.getCleanedFiles(deleteAllCleanedFiles);
      if (cleanedFiles != null) {
        flushExtraLevel = true;
      }
    }

    lastCheckpointMillis = System.currentTimeMillis();
    resetPerRunCounters();
    LogManager logManager = envImpl.getLogManager();

    /* Get the next checkpoint id. */
    checkpointId++;
    nCheckpoints++;
    boolean success = false;
    boolean traced = false;
    try {
      /* Log the checkpoint start. */
      CheckpointStart startEntry = new CheckpointStart(checkpointId, invokingSource);
      DbLsn checkpointStart = logManager.log(startEntry);

      /*
       * Remember the first active lsn -- before this position in the
       * log, there are no active transactions at this point in time.
       */
      DbLsn firstActiveLsn = envImpl.getTxnManager().getFirstActiveLsn();

      if (firstActiveLsn != null && checkpointStart.compareTo(firstActiveLsn) < 0) {
        firstActiveLsn = checkpointStart;
      }

      /* Flush IN nodes. */
      flushDirtyNodes(flushAll, allowDeltas, flushExtraLevel);

      /*
       * Flush utilization info AFTER flushing IN nodes to reduce the
       * inaccuracies caused by the sequence FileSummaryLN-LN-BIN.
       */
      flushUtilizationInfo();

      /* Log the checkpoint end. */
      if (firstActiveLsn == null) {
        firstActiveLsn = checkpointStart;
      }
      CheckpointEnd endEntry =
          new CheckpointEnd(
              invokingSource,
              checkpointStart,
              envImpl.getRootLsn(),
              firstActiveLsn,
              Node.getLastId(),
              envImpl.getDbMapTree().getLastDbId(),
              envImpl.getTxnManager().getLastTxnId(),
              checkpointId);

      /*
       * Log checkpoint end and update state kept about the last
       * checkpoint location. Send a trace message *before* the
       * checkpoint end log entry. This is done  so that the  normal
       * trace  message doesn't affect the time-based isRunnable()
       * calculation, which only issues a checkpoint if a log record
       * has been written since the last checkpoint.
       */
      trace(envImpl, invokingSource, true);
      traced = true;

      /*
       * Always flush to ensure that cleaned files are not referenced,
       * and to ensure that this checkpoint is not wasted if we crash.
       */
      lastCheckpointEnd = logManager.logForceFlush(endEntry);
      lastFirstActiveLsn = firstActiveLsn;
      lastCheckpointStart = checkpointStart;

      success = true;

      if (cleaner != null && cleanedFiles != null) {
        cleaner.deleteCleanedFiles(cleanedFiles);
      }
    } catch (DatabaseException e) {
      Tracer.trace(envImpl, "Checkpointer", "doCheckpoint", "checkpointId=" + checkpointId, e);
      throw e;
    } finally {
      if (!traced) {
        trace(envImpl, invokingSource, success);
      }
    }
  }
예제 #6
0
  /**
   * Determine whether a checkpoint should be run. 1. If the force parameter is specified, always
   * checkpoint. 2. If the config object specifies time or log size, use that. 3. If the environment
   * is configured to use log size based checkpointing, check the log. 4. Lastly, use time based
   * checking.
   */
  private boolean isRunnable(CheckpointConfig config) throws DatabaseException {

    /* Figure out if we're using log size or time to determine interval.*/
    long useBytesInterval = 0;
    long useTimeInterval = 0;
    DbLsn nextLsn = null;
    try {
      if (config.getForce()) {
        return true;
      } else if (config.getKBytes() != 0) {
        useBytesInterval = config.getKBytes() << 10;
      } else if (config.getMinutes() != 0) {
        // convert to millis
        useTimeInterval = config.getMinutes() * 60 * 1000;
      } else if (logSizeBytesInterval != 0) {
        useBytesInterval = logSizeBytesInterval;
      } else {
        useTimeInterval = timeInterval;
      }

      /*
       * If our checkpoint interval is defined by log size, check
       * on how much log has grown since the last checkpoint.
       */
      if (useBytesInterval != 0) {
        nextLsn = envImpl.getFileManager().getNextLsn();
        if (nextLsn.getNoCleaningDistance(lastCheckpointEnd, logFileMax) >= useBytesInterval) {
          return true;
        } else {
          return false;
        }
      } else if (useTimeInterval != 0) {

        /*
         * Our checkpoint is determined by time.  If enough
         * time has passed and some log data has been written,
         * do a checkpoint.
         */
        DbLsn lastUsedLsn = envImpl.getFileManager().getLastUsedLsn();
        if (((System.currentTimeMillis() - lastCheckpointMillis) >= useTimeInterval)
            && (lastUsedLsn.compareTo(lastCheckpointEnd) != 0)) {
          return true;
        } else {
          return false;
        }
      } else {
        return false;
      }
    } finally {
      StringBuffer sb = new StringBuffer();
      sb.append("size interval=").append(useBytesInterval);
      if (nextLsn != null) {
        sb.append(" nextLsn=").append(nextLsn.getNoFormatString());
      }
      if (lastCheckpointEnd != null) {
        sb.append(" lastCkpt=");
        sb.append(lastCheckpointEnd.getNoFormatString());
      }
      sb.append(" time interval=").append(useTimeInterval);
      sb.append(" force=").append(config.getForce());

      Tracer.trace(Level.FINEST, envImpl, sb.toString());
    }
  }