static { envConfig.setConfigParam(EnvironmentParams.ENV_CHECK_LEAKS.getName(), "false"); envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); envConfig.setTxnNoSync(Boolean.getBoolean(TestUtils.NO_SYNC)); envConfig.setLockTimeout(1); // to speed up intentional deadlocks envConfig.setAllowCreate(true); }
protected void setUp() throws DatabaseException, IOException { /* Remove files to start with a clean slate. */ TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); EnvironmentConfig envConfig = TestUtils.initEnvConfig(); DbInternal.disableParameterValidation(envConfig); envConfig.setConfigParam( EnvironmentParams.LOG_FILE_MAX.getName(), new Integer(FILE_SIZE).toString()); /* Yank the cache size way down. */ envConfig.setConfigParam(EnvironmentParams.LOG_FILE_CACHE_SIZE.getName(), "3"); envConfig.setAllowCreate(true); envImpl = new EnvironmentImpl(envHome, envConfig); /* Make a standalone file manager for this test. */ envImpl.close(); envImpl.open(); /* Just sets state to OPEN. */ fileManager = new FileManager(envImpl, envHome, false); /* * Remove any files after the environment is created again! We want to * remove the files made by recovery, so we can test the file manager * in controlled cases. */ TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); }
/** SR #11123 Make sure that BINDeltas are applied only to non-deleted nodes. */ public void testBINDelta() throws Throwable { EnvironmentConfig envConfig = TestUtils.initEnvConfig(); turnOffEnvDaemons(envConfig); envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); envConfig.setConfigParam(EnvironmentParams.BIN_DELTA_PERCENT.getName(), "75"); envConfig.setAllowCreate(true); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); EnvironmentConfig restartConfig = TestUtils.initEnvConfig(); turnOffEnvDaemons(restartConfig); envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); testOneCase( DB_NAME, envConfig, dbConfig, new TestGenerator() { void generateData(Database db) throws DatabaseException { addData(db); } }, restartConfig, new DatabaseConfig()); }
/** * Returns a new read-only handle to the JE root container for this backend. The caller is * responsible for closing the root container after use. * * @return The read-only RootContainer object for this backend. * @throws ConfigException If an unrecoverable problem arises during initialization. * @throws InitializationException If a problem occurs during initialization that is not related * to the server configuration. */ public RootContainer getReadOnlyRootContainer() throws ConfigException, InitializationException { EnvironmentConfig envConfig = ConfigurableEnvironment.parseConfigEntry(cfg); envConfig.setReadOnly(true); envConfig.setAllowCreate(false); envConfig.setTransactional(false); envConfig.setConfigParam("je.env.isLocking", "true"); envConfig.setConfigParam("je.env.runCheckpointer", "true"); return initializeRootContainer(envConfig); }
private EnvironmentConfig makeEnvConfig() { EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setAllowCreate(true); envConfig.setTransactional(true); DbInternal.disableParameterValidation(envConfig); envConfig.setConfigParam(EnvironmentParams.LOG_FILE_MAX.getName(), "10000"); /* Control cleaning explicitly. */ envConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); return envConfig; }
private void open(final boolean runCleaner) { final EnvironmentConfig envConfig = TestUtils.initEnvConfig(); envConfig.setAllowCreate(true); envConfig.setConfigParam(EnvironmentConfig.LOG_FILE_MAX, Integer.toString(FILE_SIZE)); envConfig.setConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION, "50"); envConfig.setConfigParam(EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION, "0"); envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, runCleaner ? "true" : "false"); envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); env = new Environment(envHome, envConfig); final DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); db = env.openDatabase(null, DB_NAME, dbConfig); }
/** * Verify the integrity of the backend instance. * * @param verifyConfig The verify configuration. * @param statEntry Optional entry to save stats into. * @return The error count. * @throws ConfigException If an unrecoverable problem arises during initialization. * @throws InitializationException If a problem occurs during initialization that is not related * to the server configuration. * @throws DirectoryException If a Directory Server error occurs. */ public long verifyBackend(VerifyConfig verifyConfig, Entry statEntry) throws InitializationException, ConfigException, DirectoryException { // If the backend already has the root container open, we must use the same // underlying root container boolean openRootContainer = rootContainer == null; long errorCount = 0; try { if (openRootContainer) { EnvironmentConfig envConfig = ConfigurableEnvironment.parseConfigEntry(cfg); envConfig.setReadOnly(true); envConfig.setAllowCreate(false); envConfig.setTransactional(false); envConfig.setConfigParam("je.env.isLocking", "true"); envConfig.setConfigParam("je.env.runCheckpointer", "true"); rootContainer = initializeRootContainer(envConfig); } VerifyJob verifyJob = new VerifyJob(verifyConfig); errorCount = verifyJob.verifyBackend(rootContainer, statEntry); } catch (DatabaseException e) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, e); } throw createDirectoryException(e); } catch (JebException e) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, e); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), e.getMessageObject()); } finally { // If a root container was opened in this method as read only, close it // to leave the backend in the same state. if (openRootContainer && rootContainer != null) { try { rootContainer.close(); rootContainer = null; } catch (DatabaseException e) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, e); } } } } return errorCount; }
private EnvironmentConfig setupEnvConfig() { EnvironmentConfig envConfig = TestUtils.initEnvConfig(); turnOffEnvDaemons(envConfig); envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "4"); envConfig.setAllowCreate(true); return envConfig; }
public ByteStoreBDB(File dir, String dbname, boolean ro) { this.dir = Files.initDirectory(dir); this.readonly = ro; settings = new SettingsJE(); EnvironmentConfig bdb_eco = new EnvironmentConfig(); bdb_eco.setReadOnly(ro); bdb_eco.setAllowCreate(!ro); bdb_eco.setTransactional(false); // bdb_eco.setDurability(Durability.COMMIT_NO_SYNC); if (ro) { bdb_eco.setConfigParam( EnvironmentConfig.ENV_RUN_CLEANER, "false"); // Disable log cleaner thread bdb_eco.setCacheMode(CacheMode.EVICT_LN); } JEUtil.mergeSystemProperties(bdb_eco); SettingsJE.updateEnvironmentConfig(settings, bdb_eco); bdb_env = new Environment(dir, bdb_eco); bdb_cfg = new DatabaseConfig(); bdb_cfg.setReadOnly(ro); bdb_cfg.setAllowCreate(true); bdb_cfg.setDeferredWrite(true); SettingsJE.updateDatabaseConfig(settings, bdb_cfg); bdb = bdb_env.openDatabase(null, dbname, bdb_cfg); if (log.isDebugEnabled()) { log.debug(SettingsJE.dumpDebug(bdb)); } }
private void createRepEnvInfo(String sleepTime) throws Throwable { /* * Set a large buffer size and disable the checkpointing, so the * data in the buffer can only be flushed by the LogFlushTask. */ EnvironmentConfig envConfig = RepTestUtils.createEnvConfig(Durability.COMMIT_NO_SYNC); envConfig.setConfigParam(EnvironmentParams.MAX_MEMORY.getName(), "20000000"); envConfig.setConfigParam(EnvironmentParams.LOG_MEM_SIZE.getName(), "120000000"); envConfig.setConfigParam(EnvironmentParams.NUM_LOG_BUFFERS.getName(), "4"); envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); /* Configure the log flush task. */ ReplicationConfig repConfig = new ReplicationConfig(); repConfig.setConfigParam(ReplicationConfig.LOG_FLUSH_TASK_INTERVAL, sleepTime); repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3, envConfig, repConfig); }
public void setUp() throws IOException, DatabaseException { TestUtils.removeFiles("Setup", envHome, FileManager.JE_SUFFIX); EnvironmentConfig envConfig = TestUtils.initEnvConfig(); envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); envConfig.setAllowCreate(true); env = new EnvironmentImpl(envHome, envConfig); }
private static void setJEProperties( BackendCfg cfg, EnvironmentConfig envConfig, ByteString backendId) { for (Map.Entry<String, String> mapEntry : attrMap.entrySet()) { String jeProperty = mapEntry.getKey(); String attrName = mapEntry.getValue(); String value = getPropertyValue(cfg, attrName, backendId); envConfig.setConfigParam(jeProperty, value); } }
@Test public void testCleanAfterMinUtilizationChange() { open(true /*runCleaner*/); writeFiles(4 /*nActive*/, 3 /*nObsolete*/); expectNothingToClean(); final EnvironmentConfig envConfig = env.getConfig(); envConfig.setConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION, "90"); env.setMutableConfig(envConfig); expectBackgroundCleaning(); close(); }
private static void setDBLoggingLevel( EnvironmentConfig envConfig, String loggingLevel, DN dn, boolean loggingFileHandlerOn) throws ConfigException { Logger parent = Logger.getLogger("com.sleepycat.je"); try { parent.setLevel(Level.parse(loggingLevel)); } catch (Exception e) { throw new ConfigException(ERR_JEB_INVALID_LOGGING_LEVEL.get(loggingLevel, dn)); } final Level level = loggingFileHandlerOn ? Level.ALL : Level.OFF; envConfig.setConfigParam(FILE_LOGGING_LEVEL, level.getName()); }
static { long stime = System.currentTimeMillis(); File f = new File(FILE_PATH); if (!f.exists()) { f.mkdirs(); } /** =====================env配置======================= */ envConfig = new EnvironmentConfig(); envConfig.setAllowCreate(true); envConfig.setTransactional(false); /** 缓存40M 日志文件最大为20M,默认是10M */ envConfig.setCacheSize(1024 * 1024 * 40); envConfig.setConfigParam("je.log.fileMax", String.valueOf(1024 * 1024 * 20)); /** 修改写缓存比例,总共6M */ envConfig.setConfigParam("je.log.bufferSize", String.valueOf(1024 * 1024 * 2)); envConfig.setConfigParam("je.log.totalBufferBytes", String.valueOf(1024 * 1024 * 6)); // env = new Environment(new File(FILE_PATH), envConfig); env.cleanLog(); /** db配置 */ dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); // dbConfig.setDeferredWrite(true); database = env.openDatabase(null, SOURCE_DB, dbConfig); Runtime.getRuntime() .addShutdownHook( new Thread() { public void run() { close(); logger.warn("close db and env"); } }); logger.warn("init bdb success " + (System.currentTimeMillis() - stime)); }
/** * Create a JE environment configuration with default values. * * @return A JE environment config containing default values. */ public static EnvironmentConfig defaultConfig() { EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setTransactional(true); envConfig.setAllowCreate(true); // "je.env.sharedLatches" is "true" by default since JE #12136 (3.3.62?) // This parameter was set to false while diagnosing a Berkeley DB JE bug. // Normally cleansed log files are deleted, but if this is set false // they are instead renamed from .jdb to .del. envConfig.setConfigParam(CLEANER_EXPUNGE, "true"); // Under heavy write load the check point can fall behind causing // uncontrolled DB growth over time. This parameter makes the out of // the box configuration more robust at the cost of a slight // reduction in maximum write throughput. Experiments have shown // that response time predictability is not impacted negatively. envConfig.setConfigParam(CHECKPOINTER_HIGH_PRIORITY, "true"); // If the JVM is reasonably large then we can safely default to // bigger read buffers. This will result in more scalable checkpointer // and cleaner performance. if (Runtime.getRuntime().maxMemory() > 256 * 1024 * 1024) { envConfig.setConfigParam(CLEANER_LOOK_AHEAD_CACHE_SIZE, String.valueOf(2 * 1024 * 1024)); envConfig.setConfigParam(LOG_ITERATOR_READ_SIZE, String.valueOf(2 * 1024 * 1024)); envConfig.setConfigParam(LOG_FAULT_READ_SIZE, String.valueOf(4 * 1024)); } // Disable lock timeouts, meaning that no lock wait // timelimit is enforced and a deadlocked operation // will block indefinitely. envConfig.setLockTimeout(0, TimeUnit.MICROSECONDS); return envConfig; }
/** * Parse, validate and set native JE environment properties for a given environment config. * * @param envConfig The JE environment config for which to set the properties. * @param jeProperties The JE environment properties to parse, validate and set. * @param configAttrMap Component supported JE properties to their configuration attributes map. * @return An environment config instance with given properties set. * @throws ConfigException If there is an error while parsing, validating and setting any of the * properties provided. */ public static EnvironmentConfig setJEProperties( EnvironmentConfig envConfig, SortedSet<String> jeProperties, HashMap<String, String> configAttrMap) throws ConfigException { if (jeProperties.isEmpty()) { // return default config. return envConfig; } // Set to catch duplicate properties. HashSet<String> uniqueJEProperties = new HashSet<>(); // Iterate through the config values associated with a JE property. for (String jeEntry : jeProperties) { StringTokenizer st = new StringTokenizer(jeEntry, "="); if (st.countTokens() != 2) { throw new ConfigException(ERR_CONFIG_JE_PROPERTY_INVALID_FORM.get(jeEntry)); } String jePropertyName = st.nextToken(); String jePropertyValue = st.nextToken(); // Check if it is a duplicate. if (uniqueJEProperties.contains(jePropertyName)) { throw new ConfigException(ERR_CONFIG_JE_DUPLICATE_PROPERTY.get(jePropertyName)); } // Set JE property. try { envConfig.setConfigParam(jePropertyName, jePropertyValue); // If this property shadows an existing config attribute. if (configAttrMap.containsKey(jePropertyName)) { LocalizableMessage message = ERR_CONFIG_JE_PROPERTY_SHADOWS_CONFIG.get( jePropertyName, attrMap.get(jePropertyName)); throw new ConfigException(message); } // Add this property to unique set. uniqueJEProperties.add(jePropertyName); } catch (IllegalArgumentException e) { logger.traceException(e); LocalizableMessage message = ERR_CONFIG_JE_PROPERTY_INVALID.get(jeEntry, e.getMessage()); throw new ConfigException(message, e.getCause()); } } return envConfig; }
/** Set up the environment and db. */ private void initDbs(int nDumps, Hashtable[] dataMaps) throws DatabaseException { EnvironmentConfig envConfig = TestUtils.initEnvConfig(); envConfig.setConfigParam(EnvironmentParams.NODE_MAX.getName(), "6"); envConfig.setAllowCreate(true); env = new Environment(envHome, envConfig); /* Make a db and open it. */ for (int i = 0; i < nDumps; i += 1) { DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setSortedDuplicates(true); Database myDb = env.openDatabase(null, dbName + i, dbConfig); Cursor cursor = myDb.openCursor(null, null); doLargePut(dataMaps[i], cursor, N_KEYS); cursor.close(); myDb.close(); } }
/* * See SR11455 for details. * * This test is checking that the maxTxnId gets recovered properly during * recovery. The SR has to do with the INFileReader not including * DupCountLN_TX and DelDupLN_TX's in its txnIdTrackingMap. When these * were not included, it was possible for a transaction to consist solely * of DupCountLN_TX/DelDupLN_TX pairs. The "deleteData" transaction below * does just this. If no checkpoint occurred following such a transaction, * then the correct current txnid would not be written to the log and * determining this value during recovery would be left up to the * INFileReader. However, without reading the DupCountLN_TX/DelDupLN_TX * records, it would not recover the correct value. * * We take the poor man's way out of creating this situation by just * manually asserting the txn id is correct post-recovery. The txnid of 12 * was determined by looking through logs before and after the fix. */ public void testSR11455() throws Throwable { createEnvAndDbs(1 << 20, true, 1); int numRecs = 1; int nDups = 3; try { /* Set up an repository of expected data. */ Map<TestData, Set<TestData>> expectedData = new HashMap<TestData, Set<TestData>>(); /* Insert all the data. */ Transaction txn = env.beginTransaction(null, null); insertData(txn, 0, numRecs - 1, expectedData, nDups, true, 1); txn.commit(); txn = env.beginTransaction(null, null); /* Delete all the even records. */ deleteData(txn, expectedData, false, false, 1); txn.abort(); closeEnv(); /* Open it again, which will run recovery. */ EnvironmentConfig recoveryConfig = TestUtils.initEnvConfig(); recoveryConfig.setTransactional(true); recoveryConfig.setConfigParam(EnvironmentParams.ENV_RUN_CLEANER.getName(), "false"); recoveryConfig.setConfigParam(EnvironmentParams.ENV_RUN_EVICTOR.getName(), "false"); env = new Environment(envHome, recoveryConfig); txn = env.beginTransaction(null, null); assertEquals(6, txn.getId()); txn.commit(); env.close(); } catch (Throwable t) { t.printStackTrace(); throw t; } }
@Test public void testEnvRecovery() { Logger logger = LoggerUtils.getLoggerFixedPrefix(this.getClass(), "test"); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream p = new PrintStream(baos); EnvironmentConfig envConfig = TestUtils.initEnvConfig(); envConfig.setAllowCreate(true); envConfig.setConfigParam("je.env.startupThreshold", "0"); env = new Environment(envHome, envConfig); env.printStartupInfo(p); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); Database db = env.openDatabase(null, "foo", dbConfig); DatabaseEntry key = new DatabaseEntry(new byte[1000]); DatabaseEntry data = new DatabaseEntry(new byte[1000]); for (int i = 0; i < 10; i += 1) { db.put(null, key, data); } db.close(); env.close(); env = new Environment(envHome, envConfig); env.printStartupInfo(p); logger.fine(baos.toString()); env.close(); env = null; } catch (Exception e) { fail( "This test succeeds as long as the printing of the report " + "does not cause a problem. Any exception is a failure. "); } }
public BdbStorageConfiguration(VoldemortConfig config) { this.voldemortConfig = config; environmentConfig = new EnvironmentConfig(); environmentConfig.setTransactional(true); environmentConfig.setCacheSize(config.getBdbCacheSize()); if (config.isBdbWriteTransactionsEnabled() && config.isBdbFlushTransactionsEnabled()) { environmentConfig.setTxnNoSync(false); environmentConfig.setTxnWriteNoSync(false); } else if (config.isBdbWriteTransactionsEnabled() && !config.isBdbFlushTransactionsEnabled()) { environmentConfig.setTxnNoSync(false); environmentConfig.setTxnWriteNoSync(true); } else { environmentConfig.setTxnNoSync(true); } environmentConfig.setAllowCreate(true); environmentConfig.setConfigParam( EnvironmentConfig.LOG_FILE_MAX, Long.toString(config.getBdbMaxLogFileSize())); environmentConfig.setConfigParam( EnvironmentConfig.CHECKPOINTER_BYTES_INTERVAL, Long.toString(config.getBdbCheckpointBytes())); environmentConfig.setConfigParam( EnvironmentConfig.CHECKPOINTER_WAKEUP_INTERVAL, Long.toString(config.getBdbCheckpointMs() * Time.US_PER_MS)); environmentConfig.setConfigParam( EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION, Integer.toString(config.getBdbCleanerMinFileUtilization())); environmentConfig.setConfigParam( EnvironmentConfig.CLEANER_MIN_UTILIZATION, Integer.toString(config.getBdbCleanerMinUtilization())); environmentConfig.setConfigParam( EnvironmentConfig.CLEANER_THREADS, Integer.toString(config.getBdbCleanerThreads())); environmentConfig.setConfigParam( EnvironmentConfig.CLEANER_LOOK_AHEAD_CACHE_SIZE, Integer.toString(config.getBdbCleanerLookAheadCacheSize())); environmentConfig.setConfigParam( EnvironmentConfig.LOCK_N_LOCK_TABLES, Integer.toString(config.getBdbLockNLockTables())); environmentConfig.setConfigParam( EnvironmentConfig.ENV_FAIR_LATCHES, Boolean.toString(config.getBdbFairLatches())); environmentConfig.setConfigParam( EnvironmentConfig.CHECKPOINTER_HIGH_PRIORITY, Boolean.toString(config.getBdbCheckpointerHighPriority())); environmentConfig.setConfigParam( EnvironmentConfig.CLEANER_MAX_BATCH_FILES, Integer.toString(config.getBdbCleanerMaxBatchFiles())); environmentConfig.setLockTimeout(config.getBdbLockTimeoutMs(), TimeUnit.MILLISECONDS); databaseConfig = new DatabaseConfig(); databaseConfig.setAllowCreate(true); databaseConfig.setSortedDuplicates(config.isBdbSortedDuplicatesEnabled()); databaseConfig.setNodeMaxEntries(config.getBdbBtreeFanout()); databaseConfig.setTransactional(true); bdbMasterDir = config.getBdbDataDirectory(); useOneEnvPerStore = config.isBdbOneEnvPerStore(); if (useOneEnvPerStore) environmentConfig.setSharedCache(true); }
/** * Rebuild index(es) in the backend instance. Note that the server will not explicitly initialize * this backend before calling this method. * * @param rebuildConfig The rebuild configuration. * @throws ConfigException If an unrecoverable problem arises during initialization. * @throws InitializationException If a problem occurs during initialization that is not related * to the server configuration. * @throws DirectoryException If a Directory Server error occurs. */ public void rebuildBackend(RebuildConfig rebuildConfig) throws InitializationException, ConfigException, DirectoryException { // If the backend already has the root container open, we must use the same // underlying root container boolean openRootContainer = rootContainer == null; /* * If the rootContainer is open, the backend is initialized by something * else. We can't do any rebuild of system indexes while others are using * this backend. */ if (!openRootContainer && rebuildConfig.includesSystemIndex()) { Message message = ERR_JEB_REBUILD_BACKEND_ONLINE.get(); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } try { EnvironmentConfig envConfig; if (openRootContainer) { envConfig = new EnvironmentConfig(); envConfig.setAllowCreate(true); envConfig.setTransactional(false); envConfig.setDurability(Durability.COMMIT_NO_SYNC); envConfig.setLockTimeout(0, TimeUnit.SECONDS); envConfig.setTxnTimeout(0, TimeUnit.SECONDS); envConfig.setConfigParam( EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION, String.valueOf(cfg.getDBCleanerMinUtilization())); envConfig.setConfigParam( EnvironmentConfig.LOG_FILE_MAX, String.valueOf(cfg.getDBLogFileMax())); Importer importer = new Importer(rebuildConfig, cfg, envConfig); rootContainer = initializeRootContainer(envConfig); importer.rebuildIndexes(rootContainer); } else { envConfig = ConfigurableEnvironment.parseConfigEntry(cfg); Importer importer = new Importer(rebuildConfig, cfg, envConfig); importer.rebuildIndexes(rootContainer); } } catch (ExecutionException execEx) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, execEx); } Message message = ERR_EXECUTION_ERROR.get(execEx.getMessage()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } catch (InterruptedException intEx) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, intEx); } Message message = ERR_INTERRUPTED_ERROR.get(intEx.getMessage()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } catch (ConfigException ce) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, ce); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), ce.getMessageObject()); } catch (JebException e) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, e); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), e.getMessageObject()); } catch (InitializationException e) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, e); } throw new InitializationException(e.getMessageObject()); } finally { // If a root container was opened in this method as read only, close it // to leave the backend in the same state. if (openRootContainer && rootContainer != null) { try { rootContainer.close(); rootContainer = null; } catch (DatabaseException e) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, e); } } } } }
/** {@inheritDoc} */ @Override() public LDIFImportResult importLDIF(LDIFImportConfig importConfig) throws DirectoryException { RuntimeInformation.logInfo(); // If the backend already has the root container open, we must use the same // underlying root container boolean openRootContainer = rootContainer == null; // If the rootContainer is open, the backend is initialized by something // else. // We can't do import while the backend is online. if (!openRootContainer) { Message message = ERR_JEB_IMPORT_BACKEND_ONLINE.get(); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } try { EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setAllowCreate(true); envConfig.setTransactional(false); envConfig.setDurability(Durability.COMMIT_NO_SYNC); envConfig.setLockTimeout(0, TimeUnit.SECONDS); envConfig.setTxnTimeout(0, TimeUnit.SECONDS); envConfig.setConfigParam( EnvironmentConfig.CLEANER_MIN_FILE_UTILIZATION, String.valueOf(cfg.getDBCleanerMinUtilization())); envConfig.setConfigParam( EnvironmentConfig.LOG_FILE_MAX, String.valueOf(cfg.getDBLogFileMax())); if (!importConfig.appendToExistingData()) { if (importConfig.clearBackend() || cfg.getBaseDN().size() <= 1) { // We have the writer lock on the environment, now delete the // environment and re-open it. Only do this when we are // importing to all the base DNs in the backend or if the backend only // have one base DN. File parentDirectory = getFileForPath(cfg.getDBDirectory()); File backendDirectory = new File(parentDirectory, cfg.getBackendId()); // If the backend does not exist the import will create it. if (backendDirectory.exists()) { EnvManager.removeFiles(backendDirectory.getPath()); } } } Importer importer = new Importer(importConfig, cfg, envConfig); rootContainer = initializeRootContainer(envConfig); return importer.processImport(rootContainer); } catch (ExecutionException execEx) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, execEx); } if (execEx.getCause() instanceof DirectoryException) { throw ((DirectoryException) execEx.getCause()); } else { Message message = ERR_EXECUTION_ERROR.get(execEx.getMessage()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } catch (InterruptedException intEx) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, intEx); } Message message = ERR_INTERRUPTED_ERROR.get(intEx.getMessage()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } catch (JebException je) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, je); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), je.getMessageObject()); } catch (InitializationException ie) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, ie); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), ie.getMessageObject()); } catch (ConfigException ce) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, ce); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), ce.getMessageObject()); } finally { // leave the backend in the same state. try { if (rootContainer != null) { long startTime = System.currentTimeMillis(); rootContainer.close(); long finishTime = System.currentTimeMillis(); long closeTime = (finishTime - startTime) / 1000; Message msg = NOTE_JEB_IMPORT_LDIF_ROOTCONTAINER_CLOSE.get(closeTime); logError(msg); rootContainer = null; } // Sync the environment to disk. if (debugEnabled()) { Message message = NOTE_JEB_IMPORT_CLOSING_DATABASE.get(); TRACER.debugInfo(message.toString()); } } catch (DatabaseException de) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, de); } } } }
/** {@inheritDoc} */ @Override() public void exportLDIF(LDIFExportConfig exportConfig) throws DirectoryException { // If the backend already has the root container open, we must use the same // underlying root container boolean openRootContainer = rootContainer == null; try { if (openRootContainer) { EnvironmentConfig envConfig = ConfigurableEnvironment.parseConfigEntry(cfg); envConfig.setReadOnly(true); envConfig.setAllowCreate(false); envConfig.setTransactional(false); envConfig.setConfigParam("je.env.isLocking", "true"); envConfig.setConfigParam("je.env.runCheckpointer", "true"); rootContainer = initializeRootContainer(envConfig); } ExportJob exportJob = new ExportJob(exportConfig); exportJob.exportLDIF(rootContainer); } catch (IOException ioe) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, ioe); } Message message = ERR_JEB_EXPORT_IO_ERROR.get(ioe.getMessage()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } catch (JebException je) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, je); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), je.getMessageObject()); } catch (DatabaseException de) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, de); } throw createDirectoryException(de); } catch (LDIFException e) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, e); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), e.getMessageObject()); } catch (InitializationException ie) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, ie); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), ie.getMessageObject()); } catch (ConfigException ce) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, ce); } throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), ce.getMessageObject()); } finally { // If a root container was opened in this method as read only, close it // to leave the backend in the same state. if (openRootContainer && rootContainer != null) { try { rootContainer.close(); rootContainer = null; } catch (DatabaseException e) { if (debugEnabled()) { TRACER.debugCaught(DebugLogLevel.ERROR, e); } } } } }