public AbstractFrontier(String homeDirectory) throws DatabaseException, FileNotFoundException { // 打开env File file = new File(homeDirectory); if (!file.exists()) { file.mkdirs(); } System.out.println("Opening environment in: " + homeDirectory); EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setTransactional(true); envConfig.setAllowCreate(true); env = new Environment(new File(homeDirectory), envConfig); // 设置DatabaseConfig DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setTransactional(true); dbConfig.setAllowCreate(true); dbConfig.setSortedDuplicates(false); // 打开 catalogdatabase = env.openDatabase(null, CLASS_CATALOG, dbConfig); javaCatalog = new StoredClassCatalog(catalogdatabase); // 设置DatabaseConfig DatabaseConfig dbConfig0 = new DatabaseConfig(); dbConfig0.setTransactional(true); dbConfig0.setAllowCreate(true); // 打开 database = env.openDatabase(null, "URL", dbConfig); }
public void merge() throws Exception { LOG.info("start merge"); Database crawldbDatabase = env.openDatabase(null, "crawldb", BerkeleyDBUtils.defaultDBConfig); /*合并fetch库*/ LOG.info("merge fetch database"); Database fetchDatabase = env.openDatabase(null, "fetch", BerkeleyDBUtils.defaultDBConfig); Cursor fetchCursor = fetchDatabase.openCursor(null, null); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry value = new DatabaseEntry(); while (fetchCursor.getNext(key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) { crawldbDatabase.put(null, key, value); } fetchCursor.close(); fetchDatabase.close(); /*合并link库*/ LOG.info("merge link database"); Database linkDatabase = env.openDatabase(null, "link", BerkeleyDBUtils.defaultDBConfig); Cursor linkCursor = linkDatabase.openCursor(null, null); while (linkCursor.getNext(key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) { if (!(crawldbDatabase.get(null, key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS)) { crawldbDatabase.put(null, key, value); } } linkCursor.close(); linkDatabase.close(); LOG.info("end merge"); crawldbDatabase.sync(); crawldbDatabase.close(); env.removeDatabase(null, "fetch"); LOG.debug("remove fetch database"); env.removeDatabase(null, "link"); LOG.debug("remove link database"); }
private Environment getEnvironment(String storeName) throws DatabaseException { synchronized (lock) { if (useOneEnvPerStore) { // if we have already created this environment return a // reference if (environments.containsKey(storeName)) return environments.get(storeName); // otherwise create a new environment File bdbDir = new File(bdbMasterDir, storeName); createBdbDirIfNecessary(bdbDir); Environment environment = new Environment(bdbDir, environmentConfig); logger.info("Creating environment for " + storeName + ": "); logEnvironmentConfig(environment.getConfig()); environments.put(storeName, environment); return environment; } else { if (!environments.isEmpty()) return environments.get(SHARED_ENV_KEY); File bdbDir = new File(bdbMasterDir); createBdbDirIfNecessary(bdbDir); Environment environment = new Environment(bdbDir, environmentConfig); logger.info("Creating shared BDB environment: "); logEnvironmentConfig(environment.getConfig()); environments.put(SHARED_ENV_KEY, environment); return environment; } } }
public BDB_GIDLookupImpl(String aim) { EnvironmentConfig environmentConfig = new EnvironmentConfig(); // will be created if not existing -> will crash if folder not found environmentConfig.setAllowCreate(true); // no transaction yet -> might be needed later // not sure if needed to be set in environment and database environmentConfig.setTransactional(false); File file = new File(aim); if (!file.exists()) { file.mkdirs(); } environment = new Environment(file, environmentConfig); DatabaseConfig databaseConfig = new DatabaseConfig(); // will be created if not existing -> will crash if folder not found databaseConfig.setAllowCreate(true); // no transaction yet -> might be needed later // not sure if needed to be set in environment and database databaseConfig.setTransactional(false); // create 2 "tables" one for relations-gid one for node-gid RelationBDB = environment.openDatabase(null, "Relation", databaseConfig); NodeBDB = environment.openDatabase(null, "Node", databaseConfig); }
/** * Close the database. * * @param cleanLog if true then wait for the BerkeleyDB clean thread to finish. */ @Override public void close(boolean cleanLog) { log.info("Closing db & env for: " + dir.getAbsolutePath()); if (openIterators.size() > 0) { log.warn("closing " + openIterators.size() + " iterators on close"); for (Object e : openIterators.toArray(new Object[openIterators.size()])) { ((ClosableIterator<Map.Entry<byte[], byte[]>>) e).close(); } } log.info("pages:gets=" + gets + " puts=" + puts + " in=" + bytesIn + " out=" + bytesOut); bdb.close(); if (cleanLog) { bdb_env.getConfig().setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); int totalLogFiles = 0; int logFiles; do { logFiles = bdb_env.cleanLog(); totalLogFiles += logFiles; } while (logFiles > 0); log.info("Total of " + totalLogFiles + " log files cleaned."); if (totalLogFiles > 0) { CheckpointConfig force = new CheckpointConfig(); force.setForce(true); bdb_env.checkpoint(force); } } bdb_env.close(); }
/* * Check that the notReplicate attribute is properly immutable and * persistent. */ private void validate(DatabaseConfig config, boolean replicated) throws DatabaseException { /* Create the database -- is its config what we expect? */ db = env.openDatabase(null, TEST_DB, config); DatabaseConfig inUseConfig = db.getConfig(); assertEquals(replicated, DbInternal.getReplicated(inUseConfig)); /* Close, re-open. */ db.close(); db = null; db = env.openDatabase(null, TEST_DB, inUseConfig); assertEquals(replicated, DbInternal.getReplicated(db.getConfig())); /* * Close, re-open w/inappropriate value for the replicated bit. This is * only checked for replicated environments. */ db.close(); db = null; if (DbInternal.getEnvironmentImpl(env).isReplicated()) { DbInternal.setReplicated(inUseConfig, !replicated); try { db = env.openDatabase(null, TEST_DB, inUseConfig); fail("Should have caught config mismatch"); } catch (IllegalArgumentException expected) { } } }
public void run(File envHomeDirectory) throws DatabaseException, IOException { /* Create the environment object. */ EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setAllowCreate(true); Environment env = new Environment(envHomeDirectory, envConfig); /* Create the database object. */ DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); Database db = env.openDatabase(null, DB_NAME, dbConfig); /* Create the sequence oject. */ SequenceConfig config = new SequenceConfig(); config.setAllowCreate(true); DatabaseEntry key = new DatabaseEntry(KEY_NAME.getBytes("UTF-8")); Sequence seq = db.openSequence(null, key, config); /* Allocate a few sequence numbers. */ for (int i = 0; i < 10; i++) { long seqnum = seq.get(null, 1); System.out.println("Got sequence number: " + seqnum); } /* Close all. */ seq.close(); db.close(); env.close(); }
/* Close the Environments after finishing reading operations. */ private void closeEnvironments() { if (master != null) { master.close(); } if (replica != null) { replica.close(); } }
/** @see org.geogit.storage.ObjectDatabase#create() */ @Override public void create() { txn = CurrentTransaction.getInstance(env); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(env.getConfig().getTransactional()); this.objectDb = env.openDatabase(null, "BlobStore", dbConfig); }
private void expectNothingToClean() { env.cleanLog(); final EnvironmentStats stats = env.getStats(null); final String msg = String.format( "%d probes, %d non-probes", stats.getNCleanerProbeRuns(), stats.getNCleanerRuns()); assertEquals(msg, 0, stats.getNCleanerRuns() - stats.getNCleanerProbeRuns()); }
@Override public void shutdown() { // ------ closing down everything // sync according to literature needed if no transaction used RelationBDB.close(); NodeBDB.close(); environment.sync(); environment.close(); }
public EnvironmentStats getStats(String storeName, boolean fast) { StatsConfig config = new StatsConfig(); config.setFast(fast); try { Environment env = getEnvironment(storeName); return env.getStats(config); } catch (DatabaseException e) { throw new VoldemortException(e); } }
private void doReplicaHasGapNetworkRestore(boolean multiGaps) throws Throwable { Durability noAck = new Durability(SyncPolicy.NO_SYNC, SyncPolicy.NO_SYNC, ReplicaAckPolicy.NONE); db = null; try { Environment master = setupLogWithCleanedGaps(multiGaps); int masterIdx = findMasterIndex(master); /* * Write a record, so that we are sure that there will be a * network restore, because we have to cross a checkpoint. */ generateData(master, 1, noAck, false); CheckpointConfig cc = new CheckpointConfig(); master.checkpoint(cc); EnvironmentStats stats = master.getStats(clearConfig); assertEquals(0, stats.getCleanerBacklog()); if (multiGaps) { logger.info("Multigap: deletion backlog = " + stats.getFileDeletionBacklog()); } else { assertEquals(0, stats.getFileDeletionBacklog()); } db.close(); db = null; repEnvInfo[masterIdx].closeEnv(); /* Start up the two replicas */ openReplicas(masterIdx); /* Start the node that had been the master */ try { repEnvInfo[masterIdx].openEnv(); fail("Should be a network restore"); } catch (InsufficientLogException ile) { repEnvInfo[masterIdx].closeEnv(); NetworkRestore restore = new NetworkRestore(); NetworkRestoreConfig config = new NetworkRestoreConfig(); config.setRetainLogFiles(true); restore.execute(ile, config); repEnvInfo[masterIdx].openEnv(); } /* Check its last VLSN and size. */ } catch (Throwable t) { t.printStackTrace(); throw t; } finally { if (db != null) { db.close(); } RepTestUtils.shutdownRepEnvs(repEnvInfo); } }
/** * Make a thread allocate a vlsn, but then fail before it's tracked by the vlsn index. This * happened in [#20919] when 1.rep environment close was called 2.the repNode was nulled out 3.a * concurrent writing thread got a NPE within its call to LogManager.log because the repNode was * null. This thread exited after it had bumped the vlsn, but before it had entered the vlsn in * the vlsnIndex 4.rep environment close tried to do a checkpoint, but the checkpoint hung. This * fix works by having (3) invalidate the environment, and by having (4) check for an invalidated * environment. */ @Test public void testLoggingFailure() throws DatabaseException, IOException { /* Make a single replicated environment. */ RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1); RepTestUtils.joinGroup(repEnvInfo); /* * Disable cleaning and CBVLSN updating, to control vlsn creation * explicitly. */ Environment env = repEnvInfo[0].getEnv(); EnvironmentMutableConfig config = env.getMutableConfig(); config.setConfigParam("je.env.runCleaner", "false"); env.setMutableConfig(config); LocalCBVLSNUpdater.setSuppressGroupDBUpdates(false); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setTransactional(true); dbConfig.setAllowCreate(true); Database db = env.openDatabase(null, "foo", dbConfig); DatabaseEntry value = new DatabaseEntry(new byte[4]); EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); LogManager logManager = DbInternal.getEnvironmentImpl(env).getLogManager(); /* * Inject an exception into the next call to log() that is made * for a replicated log entry. */ logManager.setDelayVLSNRegisterHook(new ForceException()); VLSNIndex vlsnIndex = ((RepImpl) envImpl).getVLSNIndex(); try { db.put(null, value, value); fail("Should throw exception"); } catch (Exception expected) { assertTrue( "latest=" + vlsnIndex.getLatestAllocatedVal() + " last mapped=" + vlsnIndex.getRange().getLast().getSequence(), vlsnIndex.getLatestAllocatedVal() > vlsnIndex.getRange().getLast().getSequence()); } try { VLSNIndex.AWAIT_CONSISTENCY_MS = 1000; envImpl.awaitVLSNConsistency(); fail("Should throw and break out"); } catch (DatabaseException expected) { } /* Before the fix, this test hung. */ }
@Test public void testCleanAfterMinUtilizationChange() { open(true /*runCleaner*/); writeFiles(4 /*nActive*/, 3 /*nObsolete*/); expectNothingToClean(); final EnvironmentConfig envConfig = env.getConfig(); envConfig.setConfigParam(EnvironmentConfig.CLEANER_MIN_UTILIZATION, "90"); env.setMutableConfig(envConfig); expectBackgroundCleaning(); close(); }
public void close() { synchronized (lock) { try { for (Environment environment : environments.values()) { environment.sync(); environment.close(); } } catch (DatabaseException e) { throw new VoldemortException(e); } } }
/** Forceful cleanup the logs */ @JmxOperation(description = "Forceful start the cleaner threads") public void cleanLogs() { synchronized (lock) { try { for (Environment environment : environments.values()) { environment.cleanLog(); } } catch (DatabaseException e) { throw new VoldemortException(e); } } }
/** * Set an attribute value for the given environment. * * @param targetEnv The target JE environment. May be null if the environment is not open. * @param attribute name/value pair */ public void setAttribute(Environment targetEnv, Attribute attribute) throws AttributeNotFoundException, InvalidAttributeValueException { if (attribute == null) { throw new AttributeNotFoundException("Attribute cannot be null"); } /* Sanity check parameters. */ String name = attribute.getName(); Object value = attribute.getValue(); if (name == null) { throw new AttributeNotFoundException("Attribute name cannot be null"); } if (value == null) { throw new InvalidAttributeValueException( "Attribute value for attribute " + name + " cannot be null"); } try { if (name.equals(ATT_SET_READ_ONLY)) { openConfig.setReadOnly(((Boolean) value).booleanValue()); } else if (name.equals(ATT_SET_TRANSACTIONAL)) { openConfig.setTransactional(((Boolean) value).booleanValue()); } else if (name.equals(ATT_SET_SERIALIZABLE)) { openConfig.setTxnSerializableIsolation(((Boolean) value).booleanValue()); } else { /* Set the specified attribute if the environment is open. */ if (targetEnv != null) { EnvironmentMutableConfig config = targetEnv.getMutableConfig(); if (name.equals(ATT_CACHE_SIZE)) { config.setCacheSize(((Long) value).longValue()); targetEnv.setMutableConfig(config); } else if (name.equals(ATT_CACHE_PERCENT)) { config.setCachePercent(((Integer) value).intValue()); targetEnv.setMutableConfig(config); } else { throw new AttributeNotFoundException("attribute " + name + " is not valid."); } } else { throw new AttributeNotFoundException("attribute " + name + " is not valid."); } } } catch (NumberFormatException e) { throw new InvalidAttributeValueException("attribute name=" + name); } catch (DatabaseException e) { throw new InvalidAttributeValueException("attribute name=" + name + e.getMessage()); } }
public StorageEngine<ByteArray, byte[], byte[]> getStore(String storeName) { synchronized (lock) { try { Environment environment = getEnvironment(storeName); Database db = environment.openDatabase(null, storeName, databaseConfig); BdbRuntimeConfig runtimeConfig = new BdbRuntimeConfig(voldemortConfig); BdbStorageEngine engine = new BdbStorageEngine(storeName, environment, db, runtimeConfig); return engine; } catch (DatabaseException d) { throw new StorageInitializationException(d); } } }
public static void main(String[] args) { Environment env = null; Database db = null; EnvironmentConfig envconfig = new EnvironmentConfig(); envconfig.setAllowCreate(true); try { env = new Environment(new File("D://bdb"), envconfig); DatabaseConfig dbconfig = new DatabaseConfig(); dbconfig.setAllowCreate(true); db = env.openDatabase(null, "dbac.db", dbconfig); String key = "mykey"; DatabaseEntry thekey = new DatabaseEntry(); thekey.setData(key.getBytes("utf-8")); Long value = new Long(123456); DatabaseEntry thevalue = new DatabaseEntry(); EntryBinding myBinging = TupleBinding.getPrimitiveBinding(Long.class); myBinging.objectToEntry(value, thevalue); // LongBinding myLongBinging=(LongBinding)TupleBinding.getPrimitiveBinding(Long.class); // myLongBinging.objectToEntry(value, thevalue); db.put(null, thekey, thevalue); DatabaseEntry valueEntry = new DatabaseEntry(); OperationStatus status = db.get(null, thekey, valueEntry, LockMode.DEFAULT); if (status == OperationStatus.SUCCESS) { // Long number=myLongBinging.entryToObject(valueEntry); Long number = (Long) myBinging.entryToObject(valueEntry); System.out.println(env.getDatabaseNames()); System.out.println(number); } } catch (EnvironmentLockedException e) { e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } finally { if (db != null) { try { db.close(); } catch (DatabaseException e) { e.printStackTrace(); } } if (env != null) { try { env.cleanLog(); env.close(); } catch (DatabaseException e) { e.printStackTrace(); } } } }
public BerkeleyStorage(File file, boolean readonly, boolean async) { if (file == null) { int pid = 0; try { pid = Integer.parseInt((new File("/proc/self")).getCanonicalFile().getName()); } catch (NumberFormatException | IOException e) { } String path = "/tmp"; String db_path = System.getenv("DB"); if (db_path != null) { path = db_path; } file = new File(path + "/ringpaxos-db/" + pid); file.mkdirs(); } EnvironmentConfig envConfig = new EnvironmentConfig(); DatabaseConfig dbConfig = new DatabaseConfig(); envConfig.setReadOnly(readonly); dbConfig.setReadOnly(readonly); envConfig.setAllowCreate(!readonly); dbConfig.setAllowCreate(!readonly); // performance settings envConfig.setTransactional(true); envConfig.setCacheMode(CacheMode.DEFAULT); // envConfig.setCacheSize(1000000*800); // 800M if (async) { dbConfig.setTransactional(false); envConfig.setDurability(Durability.COMMIT_NO_SYNC); dbConfig.setDeferredWrite(true); } else { dbConfig.setTransactional(true); envConfig.setDurability(Durability.COMMIT_SYNC); dbConfig.setDeferredWrite(false); } env = new Environment(file, envConfig); db = env.openDatabase(null, "paxosDB", dbConfig); classCatalogDb = env.openDatabase(null, "ClassCatalogDB", dbConfig); classCatalog = new StoredClassCatalog(classCatalogDb); keyBinding = TupleBinding.getPrimitiveBinding(Long.class); dataBinding = new SerialBinding<Decision>(classCatalog, Decision.class); ballotBinding = TupleBinding.getPrimitiveBinding(Integer.class); logger.info("BerkeleyStorage cache size: " + env.getMutableConfig().getCacheSize()); logger.info( "BerkeleyStorage durability: " + env.getMutableConfig().getDurability().getLocalSync()); logger.info("BerkeleyStorage deferred write: " + db.getConfig().getDeferredWrite()); }
private void verifyDb(Hashtable dataMap, int dumpIndex) throws DatabaseException { DatabaseConfig config = new DatabaseConfig(); config.setReadOnly(true); DbInternal.setUseExistingConfig(config, true); Database myDb = env.openDatabase(null, dbName + dumpIndex, config); Cursor cursor = myDb.openCursor(null, null); StringDbt foundKey = new StringDbt(); StringDbt foundData = new StringDbt(); OperationStatus status = cursor.getFirst(foundKey, foundData, LockMode.DEFAULT); while (status == OperationStatus.SUCCESS) { String foundKeyString = foundKey.getString(); String foundDataString = foundData.getString(); if (dataMap.get(foundKeyString) != null) { assertTrue(((String) dataMap.get(foundKeyString)).equals(foundDataString)); dataMap.remove(foundKeyString); } else { fail("didn't find key in either map (" + foundKeyString + ")"); } status = cursor.getNext(foundKey, foundData, LockMode.DEFAULT); } assertTrue(dataMap.size() == 0); cursor.close(); myDb.close(); }
private static void closeEnv() { System.out.println("Closing env and databases"); if (myDb != null) { try { myDb.close(); } catch (DatabaseException e) { System.err.println("closeEnv: myDb: " + e.toString()); e.printStackTrace(); } } if (myClassDb != null) { try { myClassDb.close(); } catch (DatabaseException e) { System.err.println("closeEnv: myClassDb: " + e.toString()); e.printStackTrace(); } } if (myEnv != null) { try { myEnv.close(); } catch (DatabaseException e) { System.err.println("closeEnv: " + e.toString()); e.printStackTrace(); } } }
/** Write data into the database. */ private void generateData( Environment master, int numTxns, Durability durability, boolean doCommit) { /* Write some data. */ DatabaseEntry key = new DatabaseEntry(); byte[] dataPadding = new byte[1000]; DatabaseEntry data = new DatabaseEntry(dataPadding); TransactionConfig txnConfig = new TransactionConfig(); txnConfig.setDurability(durability); for (int i = 0; i < numTxns; i++) { final Transaction txn = master.beginTransaction(null, txnConfig); // long keyPrefix = i << 10; // LongBinding.longToEntry(keyPrefix + i, key); LongBinding.longToEntry(i, key); db.put(txn, key, data); if (doCommit) { txn.commit(); } else { txn.abort(); } } }
public ByteStoreBDB(File dir, String dbname, boolean ro) { this.dir = Files.initDirectory(dir); this.readonly = ro; settings = new SettingsJE(); EnvironmentConfig bdb_eco = new EnvironmentConfig(); bdb_eco.setReadOnly(ro); bdb_eco.setAllowCreate(!ro); bdb_eco.setTransactional(false); // bdb_eco.setDurability(Durability.COMMIT_NO_SYNC); if (ro) { bdb_eco.setConfigParam( EnvironmentConfig.ENV_RUN_CLEANER, "false"); // Disable log cleaner thread bdb_eco.setCacheMode(CacheMode.EVICT_LN); } JEUtil.mergeSystemProperties(bdb_eco); SettingsJE.updateEnvironmentConfig(settings, bdb_eco); bdb_env = new Environment(dir, bdb_eco); bdb_cfg = new DatabaseConfig(); bdb_cfg.setReadOnly(ro); bdb_cfg.setAllowCreate(true); bdb_cfg.setDeferredWrite(true); SettingsJE.updateDatabaseConfig(settings, bdb_cfg); bdb = bdb_env.openDatabase(null, dbname, bdb_cfg); if (log.isDebugEnabled()) { log.debug(SettingsJE.dumpDebug(bdb)); } }
/** Get a locker for a read or cursor operation. */ private static Locker getReadableLocker( Environment env, Locker locker, boolean readCommittedIsolation) throws DatabaseException { EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); if (locker == null) { Transaction xaTxn = env.getThreadTransaction(); if (xaTxn != null) { return DbInternal.getLocker(xaTxn); } } if (locker == null) { /* Non-transactional user operations use ThreadLocker. */ locker = ThreadLocker.createThreadLocker(envImpl); } else { /* * Use the given locker. For read-committed, wrap the given * transactional locker in a special locker for that isolation * level. */ if (readCommittedIsolation) { locker = ReadCommittedLocker.createReadCommittedLocker(envImpl, locker); } } return locker; }
/** * Tests that when a file is opened with a lesser version than the current version, a new log file * is started for writing new log entries. This is important so that the new header version is * written even if no new log file is needed. If the new version were not written, an older * version of JE would not recognize that there had been a version change. */ public void testLesserVersionNotUpdated() throws DatabaseException, IOException { TestUtils.loadLog(getClass(), Utils.MIN_VERSION_NAME, envHome); File logFile = new File(envHome, TestUtils.LOG_FILE_NAME); long origFileSize = logFile.length(); EnvironmentConfig envConfig = TestUtils.initEnvConfig(); envConfig.setAllowCreate(false); envConfig.setTransactional(true); Environment env = new Environment(envHome, envConfig); env.sync(); env.close(); assertEquals(origFileSize, logFile.length()); }
/* Insert 100 records begins with the beginKey. */ private void doWork(Environment master, String dbName, int beginKey) throws Exception { DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(true); /* Insert/Update the records of the database. */ Database db = master.openDatabase(null, dbName, dbConfig); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); for (int i = 0; i < 100; i++) { IntegerBinding.intToEntry(beginKey + i, key); StringBinding.stringToEntry("herococo", data); db.put(null, key, data); } db.close(); /* * Do a sync at the end of the stage to make sure master and * replica have the same data set. */ VLSN commitVLSN = RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); RepTestUtils.checkNodeEquality(commitVLSN, false, repEnvInfo); }
public void put(WebURL url) throws DatabaseException { /* * The key that is used for storing URLs determines the order * they are crawled. Lower key values results in earlier crawling. * Here our keys are 6 bytes. The first byte comes from the URL priority. * The second byte comes from depth of crawl at which this URL is first found. * The rest of the 4 bytes come from the docid of the URL. As a result, * URLs with lower priority numbers will be crawled earlier. If priority * numbers are the same, those found at lower depths will be crawled earlier. * If depth is also equal, those found earlier (therefore, smaller docid) will * be crawled earlier. */ byte[] keyData = new byte[6]; keyData[0] = url.getPriority(); keyData[1] = (url.getDepth() > Byte.MAX_VALUE ? Byte.MAX_VALUE : (byte) url.getDepth()); Util.putIntInByteArray(url.getDocid(), keyData, 2); DatabaseEntry value = new DatabaseEntry(); webURLBinding.objectToEntry(url, value); Transaction txn; if (resumable) { txn = env.beginTransaction(null, null); } else { txn = null; } urlsDB.put(txn, new DatabaseEntry(keyData), value); if (resumable) { if (txn != null) { txn.commit(); } } }
public static void close() { long startTs = System.currentTimeMillis(); try { if (database != null) { database.close(); // 会调用sync } env.cleanLog(); env.close(); } catch (Exception e) { // ingore logger.warn("close exception ", e); } logger.warn("close takes " + (System.currentTimeMillis() - startTs)); }