private static void closeEnv() { System.out.println("Closing env and databases"); if (myDb != null) { try { myDb.close(); } catch (DatabaseException e) { System.err.println("closeEnv: myDb: " + e.toString()); e.printStackTrace(); } } if (myClassDb != null) { try { myClassDb.close(); } catch (DatabaseException e) { System.err.println("closeEnv: myClassDb: " + e.toString()); e.printStackTrace(); } } if (myEnv != null) { try { myEnv.close(); } catch (DatabaseException e) { System.err.println("closeEnv: " + e.toString()); e.printStackTrace(); } } }
public void testTruncate() throws DatabaseException { SecondaryDatabase secDb = initDb(); Database priDb = secDb.getPrimaryDatabase(); Transaction txn = txnBegin(); for (int i = 0; i < NUM_RECS; i += 1) { priDb.put(txn, entry(i), entry(i)); } verifyRecords(txn, priDb, NUM_RECS, false); verifyRecords(txn, secDb, NUM_RECS, true); txnCommit(txn); secDb.close(); priDb.close(); txn = txnBegin(); assertEquals(NUM_RECS, env.truncateDatabase(txn, "testDB", true)); assertEquals(NUM_RECS, env.truncateDatabase(txn, "testSecDB", true)); txnCommit(txn); secDb = initDb(); priDb = secDb.getPrimaryDatabase(); txn = txnBegin(); verifyRecords(txn, priDb, 0, false); verifyRecords(txn, secDb, 0, true); txnCommit(txn); secDb.close(); priDb.close(); }
/* * Check that the notReplicate attribute is properly immutable and * persistent. */ private void validate(DatabaseConfig config, boolean replicated) throws DatabaseException { /* Create the database -- is its config what we expect? */ db = env.openDatabase(null, TEST_DB, config); DatabaseConfig inUseConfig = db.getConfig(); assertEquals(replicated, DbInternal.getReplicated(inUseConfig)); /* Close, re-open. */ db.close(); db = null; db = env.openDatabase(null, TEST_DB, inUseConfig); assertEquals(replicated, DbInternal.getReplicated(db.getConfig())); /* * Close, re-open w/inappropriate value for the replicated bit. This is * only checked for replicated environments. */ db.close(); db = null; if (DbInternal.getEnvironmentImpl(env).isReplicated()) { DbInternal.setReplicated(inUseConfig, !replicated); try { db = env.openDatabase(null, TEST_DB, inUseConfig); fail("Should have caught config mismatch"); } catch (IllegalArgumentException expected) { } } }
public void testReadOnly() throws DatabaseException { SecondaryDatabase secDb = initDb(); Database priDb = secDb.getPrimaryDatabase(); OperationStatus status; Transaction txn = txnBegin(); for (int i = 0; i < NUM_RECS; i += 1) { status = priDb.put(txn, entry(i), entry(i)); assertSame(OperationStatus.SUCCESS, status); } /* * Secondaries can be opened without a key creator if the primary is * read only. openSecondary will specify a null key creator if the * readOnly param is false. */ Database readOnlyPriDb = openDatabase(false, "testDB", true); SecondaryDatabase readOnlySecDb = openSecondary(readOnlyPriDb, true, "testSecDB", false, true); assertNull(readOnlySecDb.getSecondaryConfig().getKeyCreator()); verifyRecords(txn, readOnlySecDb, NUM_RECS, true); txnCommit(txn); readOnlySecDb.close(); readOnlyPriDb.close(); secDb.close(); priDb.close(); }
public void merge() throws Exception { LOG.info("start merge"); Database crawldbDatabase = env.openDatabase(null, "crawldb", BerkeleyDBUtils.defaultDBConfig); /*合并fetch库*/ LOG.info("merge fetch database"); Database fetchDatabase = env.openDatabase(null, "fetch", BerkeleyDBUtils.defaultDBConfig); Cursor fetchCursor = fetchDatabase.openCursor(null, null); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry value = new DatabaseEntry(); while (fetchCursor.getNext(key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) { crawldbDatabase.put(null, key, value); } fetchCursor.close(); fetchDatabase.close(); /*合并link库*/ LOG.info("merge link database"); Database linkDatabase = env.openDatabase(null, "link", BerkeleyDBUtils.defaultDBConfig); Cursor linkCursor = linkDatabase.openCursor(null, null); while (linkCursor.getNext(key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) { if (!(crawldbDatabase.get(null, key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS)) { crawldbDatabase.put(null, key, value); } } linkCursor.close(); linkDatabase.close(); LOG.info("end merge"); crawldbDatabase.sync(); crawldbDatabase.close(); env.removeDatabase(null, "fetch"); LOG.debug("remove fetch database"); env.removeDatabase(null, "link"); LOG.debug("remove link database"); }
public void testOpenAndClose() throws DatabaseException { Database priDb = openDatabase(false, "testDB", false); /* Open two secondaries as regular databases and as secondaries. */ Database secDbDetached = openDatabase(true, "testSecDB", false); SecondaryDatabase secDb = openSecondary(priDb, true, "testSecDB", false, false); Database secDb2Detached = openDatabase(true, "testSecDB2", false); SecondaryDatabase secDb2 = openSecondary(priDb, true, "testSecDB2", false, false); assertEquals( priDb.getSecondaryDatabases(), Arrays.asList(new SecondaryDatabase[] {secDb, secDb2})); Transaction txn = txnBegin(); /* Check that primary writes to both secondaries. */ checkSecondaryUpdate(txn, priDb, 1, secDbDetached, true, secDb2Detached, true); /* New txn before closing database. */ txnCommit(txn); txn = txnBegin(); /* Close 2nd secondary. */ secDb2.close(); assertEquals(priDb.getSecondaryDatabases(), Arrays.asList(new SecondaryDatabase[] {secDb})); /* Check that primary writes to 1st secondary only. */ checkSecondaryUpdate(txn, priDb, 2, secDbDetached, true, secDb2Detached, false); /* New txn before closing database. */ txnCommit(txn); txn = txnBegin(); /* Close 1st secondary. */ secDb.close(); assertEquals(0, priDb.getSecondaryDatabases().size()); /* Check that primary writes to no secondaries. */ checkSecondaryUpdate(txn, priDb, 3, secDbDetached, false, secDb2Detached, false); /* Open the two secondaries again. */ secDb = openSecondary(priDb, true, "testSecDB", false, false); secDb2 = openSecondary(priDb, true, "testSecDB2", false, false); assertEquals( priDb.getSecondaryDatabases(), Arrays.asList(new SecondaryDatabase[] {secDb, secDb2})); /* Check that primary writes to both secondaries. */ checkSecondaryUpdate(txn, priDb, 4, secDbDetached, true, secDb2Detached, true); /* Close the primary first to disassociate secondaries. */ txnCommit(txn); priDb.close(); assertNull(secDb.getPrimaryDatabase()); assertNull(secDb2.getPrimaryDatabase()); secDb2.close(); secDb.close(); secDb2Detached.close(); secDbDetached.close(); }
@Override public synchronized void close() { try { db.close(); classCatalogDb.close(); env.close(); } catch (DatabaseException dbe) { logger.error("Error closing db environment!", dbe); } }
@Override public void shutdown() { // ------ closing down everything // sync according to literature needed if no transaction used RelationBDB.close(); NodeBDB.close(); environment.sync(); environment.close(); }
private void doReplicaHasGapNetworkRestore(boolean multiGaps) throws Throwable { Durability noAck = new Durability(SyncPolicy.NO_SYNC, SyncPolicy.NO_SYNC, ReplicaAckPolicy.NONE); db = null; try { Environment master = setupLogWithCleanedGaps(multiGaps); int masterIdx = findMasterIndex(master); /* * Write a record, so that we are sure that there will be a * network restore, because we have to cross a checkpoint. */ generateData(master, 1, noAck, false); CheckpointConfig cc = new CheckpointConfig(); master.checkpoint(cc); EnvironmentStats stats = master.getStats(clearConfig); assertEquals(0, stats.getCleanerBacklog()); if (multiGaps) { logger.info("Multigap: deletion backlog = " + stats.getFileDeletionBacklog()); } else { assertEquals(0, stats.getFileDeletionBacklog()); } db.close(); db = null; repEnvInfo[masterIdx].closeEnv(); /* Start up the two replicas */ openReplicas(masterIdx); /* Start the node that had been the master */ try { repEnvInfo[masterIdx].openEnv(); fail("Should be a network restore"); } catch (InsufficientLogException ile) { repEnvInfo[masterIdx].closeEnv(); NetworkRestore restore = new NetworkRestore(); NetworkRestoreConfig config = new NetworkRestoreConfig(); config.setRetainLogFiles(true); restore.execute(ile, config); repEnvInfo[masterIdx].openEnv(); } /* Check its last VLSN and size. */ } catch (Throwable t) { t.printStackTrace(); throw t; } finally { if (db != null) { db.close(); } RepTestUtils.shutdownRepEnvs(repEnvInfo); } }
public void close() { try { urlsDB.close(); } catch (DatabaseException e) { e.printStackTrace(); } }
private void verifyDb(Hashtable dataMap, int dumpIndex) throws DatabaseException { DatabaseConfig config = new DatabaseConfig(); config.setReadOnly(true); DbInternal.setUseExistingConfig(config, true); Database myDb = env.openDatabase(null, dbName + dumpIndex, config); Cursor cursor = myDb.openCursor(null, null); StringDbt foundKey = new StringDbt(); StringDbt foundData = new StringDbt(); OperationStatus status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); while (status == OperationStatus.SUCCESS) { String foundKeyString = foundKey.getString(); String foundDataString = foundData.getString(); if (dataMap.get(foundKeyString) != null) { assertTrue(((String) dataMap.get(foundKeyString)).equals(foundDataString)); dataMap.remove(foundKeyString); } else { fail("didn't find key in either map (" + foundKeyString + ")"); } status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); } assertTrue(dataMap.size() == 0); cursor.close(); myDb.close(); }
/* Insert 100 records begins with the beginKey. */ private void doWork(Environment master, String dbName, int beginKey) throws Exception { DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(true); /* Insert/Update the records of the database. */ Database db = master.openDatabase(null, dbName, dbConfig); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); for (int i = 0; i < 100; i++) { IntegerBinding.intToEntry(beginKey + i, key); StringBinding.stringToEntry("herococo", data); db.put(null, key, data); } db.close(); /* * Do a sync at the end of the stage to make sure master and * replica have the same data set. */ VLSN commitVLSN = RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); RepTestUtils.checkNodeEquality(commitVLSN, false, repEnvInfo); }
/** * Close the database. * * @param cleanLog if true then wait for the BerkeleyDB clean thread to finish. */ @Override public void close(boolean cleanLog) { log.info("Closing db & env for: " + dir.getAbsolutePath()); if (openIterators.size() > 0) { log.warn("closing " + openIterators.size() + " iterators on close"); for (Object e : openIterators.toArray(new Object[openIterators.size()])) { ((ClosableIterator<Map.Entry<byte[], byte[]>>) e).close(); } } log.info("pages:gets=" + gets + " puts=" + puts + " in=" + bytesIn + " out=" + bytesOut); bdb.close(); if (cleanLog) { bdb_env.getConfig().setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); int totalLogFiles = 0; int logFiles; do { logFiles = bdb_env.cleanLog(); totalLogFiles += logFiles; } while (logFiles > 0); log.info("Total of " + totalLogFiles + " log files cleaned."); if (totalLogFiles > 0) { CheckpointConfig force = new CheckpointConfig(); force.setForce(true); bdb_env.checkpoint(force); } } bdb_env.close(); }
public BDBCreator(String tableName, ArrayList<Integer> keyPositions, int secondaryIndex) { /* Creates and loads a BDB table with a secondary index on the column defined by*/ this.tableName = tableName.toLowerCase(); this.keyPositions = keyPositions; this.secIndexPos = secondaryIndex; this.tMap = Main.indexTypeMaps.get(this.tableName); setEnvironment(); dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); myDB = myDbEnvironment.openDatabase(null, this.tableName, dbConfig); SecondaryConfig secConfig = new SecondaryConfig(); secConfig.setAllowCreate(true); secConfig.setSortedDuplicates(true); createSecDB(secConfig); setConfig(dbConfig, secConfig); loadData(); if (secDB != null) { secDB.close(); } if (myDB != null) { myDB.close(); } closeEnvironment(); }
public void run(File envHomeDirectory) throws DatabaseException, IOException { /* Create the environment object. */ EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setAllowCreate(true); Environment env = new Environment(envHomeDirectory, envConfig); /* Create the database object. */ DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); Database db = env.openDatabase(null, DB_NAME, dbConfig); /* Create the sequence oject. */ SequenceConfig config = new SequenceConfig(); config.setAllowCreate(true); DatabaseEntry key = new DatabaseEntry(KEY_NAME.getBytes("UTF-8")); Sequence seq = db.openSequence(null, key, config); /* Allocate a few sequence numbers. */ for (int i = 0; i < 10; i++) { long seqnum = seq.get(null, 1); System.out.println("Got sequence number: " + seqnum); } /* Close all. */ seq.close(); db.close(); env.close(); }
/** * Copies entries from an existing environment db to a new one. If historyMap is not provided, * only logs the entries that would have been copied. * * @param sourceDir existing environment database directory * @param historyMap new environment db (or null for a dry run) * @return number of records * @throws DatabaseException */ private static int copyPersistEnv(File sourceDir, StoredSortedMap<String, Map> historyMap) throws DatabaseException { int count = 0; // open the source env history DB, copying entries to target env EnhancedEnvironment sourceEnv = setupCopyEnvironment(sourceDir, true); StoredClassCatalog sourceClassCatalog = sourceEnv.getClassCatalog(); DatabaseConfig historyDbConfig = HISTORY_DB_CONFIG.toDatabaseConfig(); historyDbConfig.setReadOnly(true); Database sourceHistoryDB = sourceEnv.openDatabase(null, URI_HISTORY_DBNAME, historyDbConfig); StoredSortedMap<String, Map> sourceHistoryMap = new StoredSortedMap<String, Map>( sourceHistoryDB, new StringBinding(), new SerialBinding<Map>(sourceClassCatalog, Map.class), true); Iterator<Entry<String, Map>> iter = sourceHistoryMap.entrySet().iterator(); while (iter.hasNext()) { Entry<String, Map> item = iter.next(); if (logger.isLoggable(Level.FINE)) { logger.fine(item.getKey() + " " + new JSONObject(item.getValue())); } if (historyMap != null) { historyMap.put(item.getKey(), item.getValue()); } count++; } StoredIterator.close(iter); sourceHistoryDB.close(); sourceEnv.close(); return count; }
/** Test exclusive creation. */ @Test public void testExclusive() throws Throwable { try { EnvironmentConfig envConfig = TestUtils.initEnvConfig(); /* * Make sure that the database keeps its own copy of the * configuration object. */ envConfig.setAllowCreate(true); env = create(envHome, envConfig); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setExclusiveCreate(true); /* Should succeed and create the database. */ Database dbA = env.openDatabase(null, "foo", dbConfig); dbA.close(); /* Should not succeed, because the database exists. */ try { env.openDatabase(null, "foo", dbConfig); fail("Database already exists"); } catch (DatabaseException e) { } close(env); } catch (Throwable t) { t.printStackTrace(); throw t; } }
/** * Close any databases opened during this operation when it fails. This method should be called * if a non-transactional operation fails, since we cannot rely on the transaction abort to * cleanup any databases that were opened. */ void closeDatabases() { for (Database db : databases.keySet()) { try { db.close(); } catch (Exception ignored) { } } }
public void lock() throws Exception { lockDatabase = env.openDatabase(null, "lock", BerkeleyDBUtils.defaultDBConfig); DatabaseEntry key = new DatabaseEntry("lock".getBytes("utf-8")); DatabaseEntry value = new DatabaseEntry("locked".getBytes("utf-8")); lockDatabase.put(null, key, value); lockDatabase.sync(); lockDatabase.close(); }
@Override protected void finalTasks() { try { historyDb.sync(); historyDb.close(); } catch (DatabaseException e) { // TODO Auto-generated catch block throw new RuntimeException(e); } }
private void close() { if (db != null) { db.close(); db = null; } if (env != null) { env.close(); env = null; } }
@Test public void testCleanAfterDbTruncate() { open(true /*runCleaner*/); writeFiles(5 /*nActive*/, 0 /*nObsolete*/); expectNothingToClean(); db.close(); db = null; env.truncateDatabase(null, DB_NAME, false); expectBackgroundCleaning(); close(); }
private DatabaseException closeDb(Database db, DatabaseException firstException) { if (db != null) { try { db.close(); } catch (DatabaseException e) { if (firstException == null) { firstException = e; } } } return firstException; }
public void testPopulate() throws DatabaseException { Database priDb = openDatabase(false, "testDB", false); Transaction txn = txnBegin(); /* Test population of newly created secondary database. */ for (int i = 0; i < NUM_RECS; i += 1) { assertSame(OperationStatus.SUCCESS, priDb.put(txn, entry(i), entry(i))); } txnCommit(txn); SecondaryDatabase secDb = openSecondary(priDb, true, "testSecDB", true, false); txn = txnBegin(); verifyRecords(txn, secDb, NUM_RECS, true); txnCommit(txn); /* * Clear secondary and perform populate again, to test the case where * an existing database is opened, and therefore a write txn will only * be created in order to populate it */ Database secDbDetached = openDatabase(true, "testSecDB", false); secDb.close(); txn = txnBegin(); for (int i = 0; i < NUM_RECS; i += 1) { assertSame(OperationStatus.SUCCESS, secDbDetached.delete(txn, entry(i + KEY_OFFSET))); } verifyRecords(txn, secDbDetached, 0, true); txnCommit(txn); secDb = openSecondary(priDb, true, "testSecDB", true, false); txn = txnBegin(); verifyRecords(txn, secDb, NUM_RECS, true); verifyRecords(txn, secDbDetached, NUM_RECS, true); txnCommit(txn); secDbDetached.close(); secDb.close(); priDb.close(); }
/* Do the real reading work. */ private void doRead(Environment env, DatabaseConfig dbConfig, ArrayList<TestObject> list) throws Exception { Database db = env.openDatabase(null, dbName, dbConfig); Cursor cursor = db.openCursor(null, null); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); while (OperationStatus.SUCCESS == cursor.getNext(key, data, null)) { list.add(new TestObject(IntegerBinding.entryToInt(key), StringBinding.entryToString(data))); } cursor.close(); db.close(); }
public static void main(String[] args) { Environment env = null; Database db = null; EnvironmentConfig envconfig = new EnvironmentConfig(); envconfig.setAllowCreate(true); try { env = new Environment(new File("D://bdb"), envconfig); DatabaseConfig dbconfig = new DatabaseConfig(); dbconfig.setAllowCreate(true); db = env.openDatabase(null, "dbac.db", dbconfig); String key = "mykey"; DatabaseEntry thekey = new DatabaseEntry(); thekey.setData(key.getBytes("utf-8")); Long value = new Long(123456); DatabaseEntry thevalue = new DatabaseEntry(); EntryBinding myBinging = TupleBinding.getPrimitiveBinding(Long.class); myBinging.objectToEntry(value, thevalue); // LongBinding myLongBinging=(LongBinding)TupleBinding.getPrimitiveBinding(Long.class); // myLongBinging.objectToEntry(value, thevalue); db.put(null, thekey, thevalue); DatabaseEntry valueEntry = new DatabaseEntry(); OperationStatus status = db.get(null, thekey, valueEntry, LockMode.DEFAULT); if (status == OperationStatus.SUCCESS) { // Long number=myLongBinging.entryToObject(valueEntry); Long number = (Long) myBinging.entryToObject(valueEntry); System.out.println(env.getDatabaseNames()); System.out.println(number); } } catch (EnvironmentLockedException e) { e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } finally { if (db != null) { try { db.close(); } catch (DatabaseException e) { e.printStackTrace(); } } if (env != null) { try { env.cleanLog(); env.close(); } catch (DatabaseException e) { e.printStackTrace(); } } } }
public void testDupInitialized() throws DatabaseException { /* Open db. */ initEnv(false); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); Database myDb = exampleEnv.openDatabase(null, "fooDb", dbConfig); /* Open uninitialized cursor. */ Cursor c1 = myDb.openCursor(null, null); try { c1.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null); fail(); } catch (DatabaseException expected) { } /* Dup uninitialized cursor with samePosition=false. */ Cursor c2 = c1.dup(false); try { c2.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null); fail(); } catch (DatabaseException expected) { } /* Dup uninitialized cursor with samePosition=true. */ Cursor c3 = c1.dup(true); try { c3.getCurrent(new DatabaseEntry(), new DatabaseEntry(), null); fail(); } catch (DatabaseException expected) { } /* Ensure dup'ed cursors are usable. */ assertEquals( OperationStatus.SUCCESS, c1.put(new DatabaseEntry(new byte[0]), new DatabaseEntry(new byte[0]))); assertEquals( OperationStatus.SUCCESS, c2.getFirst(new DatabaseEntry(), new DatabaseEntry(), null)); assertEquals( OperationStatus.NOTFOUND, c2.getNext(new DatabaseEntry(), new DatabaseEntry(), null)); assertEquals( OperationStatus.SUCCESS, c3.getFirst(new DatabaseEntry(), new DatabaseEntry(), null)); assertEquals( OperationStatus.NOTFOUND, c3.getNext(new DatabaseEntry(), new DatabaseEntry(), null)); /* Close db. */ c3.close(); c2.close(); c1.close(); myDb.close(); }
public static <EnvironmenConfig> void envconfig() { EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setTransactional(false); envConfig.setAllowCreate(true); File envDir = new File("d://"); try { // 新建环境变量 Environment exampleEnv = new Environment(envDir, envConfig); String databaseName = "ToDoTaskList.db"; DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(false); // 打开用来存储类信息的数据库 // 用来存储类信息的数据库数据库,不要求能共存储重复的关键字 dbConfig.setSortedDuplicates(false); Database myClassDb = exampleEnv.openDatabase(null, databaseName, dbConfig); // 初始化catalog类 StoredClassCatalog catalog = new StoredClassCatalog(myClassDb); TupleBinding<String> keyBinding = TupleBinding.getPrimitiveBinding(String.class); // 把value作为对象的序列化方式存储 SerialBinding<String> valueBinding = new SerialBinding<String>(catalog, String.class); Database store = exampleEnv.openDatabase(null, databaseName, dbConfig); // 建立数据存储映射 StoredSortedMap<String, String> map = new StoredSortedMap<String, String>(store, keyBinding, valueBinding, true); // 释放环境变量 // exampleEnv.syncReplication(); store.close(); myClassDb.close(); exampleEnv.close(); exampleEnv = null; } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } }
public boolean isLocked() throws Exception { boolean isLocked = false; lockDatabase = env.openDatabase(null, "lock", BerkeleyDBUtils.defaultDBConfig); DatabaseEntry key = new DatabaseEntry("lock".getBytes("utf-8")); DatabaseEntry value = new DatabaseEntry(); if (lockDatabase.get(null, key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) { String lockInfo = new String(value.getData(), "utf-8"); if (lockInfo.equals("locked")) { isLocked = true; } } lockDatabase.close(); return isLocked; }
/* * This method will: * 1. apply the modified DatabaseConfig to the database. * 2. close the database and do a sync to make sure the new configuration * is written to the log. * 3. open the database with a useExisting config and return the current * DatabaseConfig. */ private DatabaseConfig setAndGetDbConfig(Environment env, DatabaseConfig dbConfig, String dbName) throws Exception { Database db = env.openDatabase(null, "foo", dbConfig); db.close(); env.sync(); /* * Open with the useExisting config to see what attributes have been * persisted. */ DatabaseConfig newConfig = new DatabaseConfig(); newConfig.setReadOnly(true); newConfig.setTransactional(true); newConfig.setUseExistingConfig(true); db = env.openDatabase(null, dbName, newConfig); newConfig = db.getConfig(); db.close(); return newConfig; }