private void fillLogWithTraceMsgs(Environment env, int numToAdd) { EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); FileManager fileManager = envImpl.getFileManager(); Long beforeTracing = getLastFileNum(env); logger.info("BeforeTracing end file = 0x" + Long.toHexString(beforeTracing)); do { for (int i = 0; i <= 100; i++) { Trace.trace(envImpl, STUFF + i); } } while (fileManager.getLastFileNum() <= (beforeTracing + numToAdd)); Long afterTracing = fileManager.getLastFileNum(); logger.info("AfterTracing end file = 0x" + Long.toHexString(afterTracing)); /* Check that we've grown the log by a good bit - at least 40 files */ assertTrue((afterTracing - beforeTracing) > 40); }
/* * Clear as many resources as possible, even in the face of an environment * that has received a fatal error, in order to support reopening the * environment in the same JVM. */ public synchronized void closeAfterRunRecovery() throws DatabaseException { try { shutdownDaemons(); } catch (InterruptedException IE) { /* Klockwork - ok */ } try { fileManager.clear(); } catch (Exception e) { /* Klockwork - ok */ } try { fileManager.close(); } catch (Exception e) { /* Klockwork - ok */ } DbEnvPool.getInstance().remove(envHome); }
private void doClose(boolean doCheckpoint) throws DatabaseException { StringBuffer errors = new StringBuffer(); try { // refined trace Tracer.trace(Level.FINE, this, "Close of // environment " + envHome + " started"); try { envState.checkState(DbEnvState.VALID_FOR_CLOSE, DbEnvState.CLOSED); } catch (DatabaseException DBE) { throw DBE; } /* * Begin shutdown of the deamons before checkpointing. Cleaning * during the checkpoint is wasted and slows down the checkpoint. */ requestShutdownDaemons(); /* Checkpoint to bound recovery time. */ if (doCheckpoint && !isReadOnly && (envState != DbEnvState.INVALID) && logManager.getLastLsnAtRecovery() != fileManager.getLastUsedLsn()) { /* * Force a checkpoint. Don't allow deltas (minimize recovery * time) because they cause inefficiencies for two reasons: (1) * recovering BINDeltas causes extra random I/O in order to * reconstitute BINS, which can greatly increase recovery time, * and (2) logging deltas during close causes redundant logging * by the full checkpoint after recovery. */ CheckpointConfig ckptConfig = new CheckpointConfig(); ckptConfig.setForce(true); ckptConfig.setMinimizeRecoveryTime(true); try { invokeCheckpoint( ckptConfig, false, // flushAll "close"); } catch (DatabaseException IE) { errors.append("\nException performing checkpoint: "); errors.append(IE.toString()).append("\n"); } } try { shutdownDaemons(); } catch (InterruptedException IE) { errors.append("\nException shutting down daemon threads: "); errors.append(IE.toString()).append("\n"); } /* Flush log. */ // refined trace Tracer.trace(Level.FINE, this, "Env " + envHome + " // daemons shutdown"); try { logManager.flush(); } catch (DatabaseException DBE) { errors.append("\nException flushing log manager: "); errors.append(DBE.toString()).append("\n"); } try { fileManager.clear(); } catch (IOException IOE) { errors.append("\nException clearing file manager: "); errors.append(IOE.toString()).append("\n"); } catch (DatabaseException DBE) { errors.append("\nException clearing file manager: "); errors.append(DBE.toString()).append("\n"); } try { fileManager.close(); } catch (IOException IOE) { errors.append("\nException clearing file manager: "); errors.append(IOE.toString()).append("\n"); } catch (DatabaseException DBE) { errors.append("\nException clearing file manager: "); errors.append(DBE.toString()).append("\n"); } try { inMemoryINs.clear(); } catch (DatabaseException DBE) { errors.append("\nException closing file manager: "); errors.append(DBE.toString()).append("\n"); } hook_afterDoClose(errors); } finally { envState = DbEnvState.CLOSED; } if (errors.length() > 0 && savedInvalidatingException == null) { /* Don't whine again if we've already whined. */ throw new RunRecoveryException(this, errors.toString()); } }
/** * Create a database environment to represent the data in envHome. dbHome. Properties from the * je.properties file in that directory are used to initialize the system wide property bag. * Properties passed to this method are used to influence the open itself. * * @param envHome absolute path of the database environment home directory * @param envConfig * @throws DatabaseException on all other failures */ public EnvironmentImpl(File envHome, EnvironmentConfig envConfig) throws DatabaseException { try { this.envHome = envHome; envState = DbEnvState.INIT; /* Set up configuration parameters */ configManager = new DbConfigManager(envConfig); configObservers = new ArrayList(); addConfigObserver(this); /* * Decide on memory budgets based on environment config params and * memory available to this process. */ memoryBudget = new LogBufferBudget(this, configManager); /* * Set up debug logging. Depending on configuration, add handlers, * set logging level. */ // envLogger = initLogger(envHome); /* * Essential services. These must exist before recovery. */ hook_readProperties(configManager); forcedYield = configManager.getBoolean(EnvironmentParams.ENV_FORCED_YIELD); isNoLocking = !(configManager.getBoolean(EnvironmentParams.ENV_INIT_LOCKING)); isReadOnly = configManager.getBoolean(EnvironmentParams.ENV_RDONLY); fileManager = new FileManager(this, envHome, isReadOnly); if (!envConfig.getAllowCreate() && !fileManager.filesExist()) { throw new DatabaseException( "Enviroment creation isn't allowed, " + " but there is no pre-existing " + " environment in " + envHome); } logManager = new SyncedLogManager(this, isReadOnly); inMemoryINs = new INList(this); txnManager = new TxnManager(this); /* * Make sure that either log-size-based or time-based checkpointing is * enabled. */ checkpointer = new Checkpointer(this); cleaner = new Cleaner(this, "Cleaner"); /* * Daemons are always made here, but only started after recovery. We * want them to exist so we can call them programatically even if * the daemon thread is not started. */ createDaemons(); /* * Recovery will recreate the dbMapTree from the log if it exists. */ dbMapTree = new DbTree(this); referenceCount = 0; /* * Do not do recovery and start daemons if this environment is for a * utility. */ if (configManager.getBoolean(EnvironmentParams.ENV_RECOVERY)) { /* * Run recovery. Note that debug logging to the database log is * disabled until recovery is finished. */ try { RecoveryManager recoveryManager = new RecoveryManager(this); lastRecoveryInfo = recoveryManager.recover(isReadOnly); } finally { try { /* Flush to get all exception tracing out to the log. */ logManager.flush(); fileManager.clear(); } catch (IOException e) { throw new DatabaseException(e.getMessage()); } } } else { isReadOnly = true; noComparators = true; } /* Start daemons after recovery. */ runOrPauseDaemons(configManager); /* * Cache a few critical values. We keep our timeout in millis * instead of microseconds because Object.wait takes millis. */ lockTimeout = PropUtil.microsToMillis(configManager.getLong(EnvironmentParams.LOCK_TIMEOUT)); txnTimeout = PropUtil.microsToMillis(configManager.getLong(EnvironmentParams.TXN_TIMEOUT)); /* Mark as open. */ open(); } catch (DatabaseException e) { /* Release any environment locks if there was a problem. */ if (fileManager != null) { try { fileManager.close(); } catch (IOException IOE) { /* * Klockwork - ok Eat it, we want to throw the original * exception. */ } } throw e; } }
private long getLastFileNum(Environment env) { EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); FileManager fileManager = envImpl.getFileManager(); return fileManager.getLastFileNum(); }
/** * Send files in response to request messages. The request sequence looks like the following: * * <p>[FileReq | StatReq]+ Done * * <p>The response sequence to a FileReq looks like: * * <p>FileStart <file byte stream> FileEnd * * <p>and that for a StatReq, is simply a StatResp */ private void sendRequestedFiles(Protocol protocol) throws IOException, ProtocolException, DatabaseException { try { while (true) { FileReq fileReq = protocol.read(namedChannel.getChannel(), FileReq.class); final String fileName = fileReq.getFileName(); /* * Calculate the full path for a specified log file name, * especially when this Feeder is configured to run with sub * directories. */ FileManager fMgr = feederManager.getEnvImpl().getFileManager(); File file = new File(fMgr.getFullFileName(fileName)); if (!file.exists()) { throw EnvironmentFailureException.unexpectedState("Log file not found: " + fileName); } /* Freeze the length and last modified date. */ final long length = file.length(); final long lastModified = file.lastModified(); byte digest[] = null; FileInfoResp resp = null; Protocol.FileInfoResp cachedResp = feederManager.statResponses.get(fileName); byte cachedDigest[] = ((cachedResp != null) && (cachedResp.getFileLength() == length) && (cachedResp.getLastModifiedTime() == lastModified)) ? cachedResp.getDigestSHA1() : null; if (fileReq instanceof FileInfoReq) { if (cachedDigest != null) { digest = cachedDigest; } else if (((FileInfoReq) fileReq).getNeedSHA1()) { digest = getSHA1Digest(file, length).digest(); } else { // Digest not requested digest = new byte[0]; } resp = protocol.new FileInfoResp(fileName, length, lastModified, digest); } else { protocol.write(protocol.new FileStart(fileName, length, lastModified), namedChannel); digest = sendFileContents(file, length); if ((cachedDigest != null) && !Arrays.equals(cachedDigest, digest)) { throw EnvironmentFailureException.unexpectedState( "Inconsistent cached and computed digests"); } resp = protocol.new FileEnd(fileName, length, lastModified, digest); } /* Cache for subsequent requests, if it was computed. */ if (digest.length > 0) { feederManager.statResponses.put(fileName, resp); } protocol.write(resp, namedChannel); } } catch (ProtocolException pe) { if (pe.getUnexpectedMessage() instanceof Protocol.Done) { return; } throw pe; } }