public void merge() throws Exception { LOG.info("start merge"); Database crawldbDatabase = env.openDatabase(null, "crawldb", BerkeleyDBUtils.defaultDBConfig); /*合并fetch库*/ LOG.info("merge fetch database"); Database fetchDatabase = env.openDatabase(null, "fetch", BerkeleyDBUtils.defaultDBConfig); Cursor fetchCursor = fetchDatabase.openCursor(null, null); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry value = new DatabaseEntry(); while (fetchCursor.getNext(key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) { crawldbDatabase.put(null, key, value); } fetchCursor.close(); fetchDatabase.close(); /*合并link库*/ LOG.info("merge link database"); Database linkDatabase = env.openDatabase(null, "link", BerkeleyDBUtils.defaultDBConfig); Cursor linkCursor = linkDatabase.openCursor(null, null); while (linkCursor.getNext(key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) { if (!(crawldbDatabase.get(null, key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS)) { crawldbDatabase.put(null, key, value); } } linkCursor.close(); linkDatabase.close(); LOG.info("end merge"); crawldbDatabase.sync(); crawldbDatabase.close(); env.removeDatabase(null, "fetch"); LOG.debug("remove fetch database"); env.removeDatabase(null, "link"); LOG.debug("remove link database"); }
/** * This test checks for the bug described in SR11123. If an IN and its child-subtree is deleted, * an INDeleteInfo is written to the log. If there is a BINDelta in the log for a BIN-child of the * removed subtree (i.e. compressed), then recovery will apply it to the compressed IN. Since the * IN has no data in * it, that is not necessarily a problem. However, reinstantiating the * obsolete IN may cause a parent IN to split which is not allowed during IN recovery. * * <p>Here's the case: * * <p>| IN1 +---------------------------------+ | | IN2 IN6 / | / | \ BIN3 BIN4 BIN7 BIN8 BIN9 * * <p>IN2 and the subtree below are compressed away. During recovery replay, after the pass where * INs and INDeleteINfos are processed, the in-memory tree looks like this: * * <p>IN1 | IN6 / | \ BIN7 BIN8 BIN9 * * <p>However, let's assume that BINDeltas were written for BIN3, BIN4, BIN5 within the recovery * part of the log, before the subtree was compressed. We'll replay those BINDeltas in the * following pass, and in the faulty implementation, they cause the ghosts of BIN3, BIN4 to be * resurrected and applied to IN6. Let's assume that the max node size is 4 -- we won't be able to * connect BIN3, BIN4 because IN6 doesn't have the capacity, and we don't expect to have to do * splits. */ private void addData(Database db) throws DatabaseException { DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); /* Populate a tree so there are 3 levels. */ for (int i = 0; i < 140; i += 10) { IntegerBinding.intToEntry(i, key); IntegerBinding.intToEntry(i, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); } CheckpointConfig ckptConfig = new CheckpointConfig(); ckptConfig.setForce(true); env.checkpoint(ckptConfig); Tree tree = DbInternal.dbGetDatabaseImpl(db).getTree(); com.sleepycat.je.tree.Key.DUMP_TYPE = com.sleepycat.je.tree.Key.DumpType.BINARY; com.sleepycat.je.tree.Key.DUMP_INT_BINDING = true; if (DEBUG) { tree.dump(); } /* * Update a key on the BIN3 and a key on BIN4, to create reason for * a BINDelta. Force a BINDelta for BIN3 and BIN4 out to the log. */ IntegerBinding.intToEntry(0, key); IntegerBinding.intToEntry(100, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); IntegerBinding.intToEntry(20, key); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); EnvironmentImpl envImpl = DbInternal.envGetEnvironmentImpl(env); BIN bin = (BIN) tree.getFirstNode(); bin.log(envImpl.getLogManager(), true, false, false, false, null); bin = tree.getNextBin(bin, false /* traverseWithinDupTree */); bin.log(envImpl.getLogManager(), true, false, false, false, null); bin.releaseLatch(); /* * Delete all of left hand side of the tree, so that the subtree root * headed by IN2 is compressed. */ for (int i = 0; i < 50; i += 10) { IntegerBinding.intToEntry(i, key); assertEquals(OperationStatus.SUCCESS, db.delete(null, key)); } /* force a compression */ env.compress(); if (DEBUG) { tree.dump(); } }
/** Create a populated tree, delete all records, then begin to insert again. */ private void setupEvictedRoot(Database db) throws DatabaseException { setStepwiseStart(); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); /* Populate a tree so it grows to 2 levels, with 2 BINs. */ for (int i = 0; i < 10; i++) { IntegerBinding.intToEntry(i, key); IntegerBinding.intToEntry(i, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); } Trace.trace(DbInternal.getEnvironmentImpl(env), "After inserts"); env.checkpoint(FORCE_CONFIG); /* * Add another record so that the eviction below will log * a different versions of the IN nodes. */ IntegerBinding.intToEntry(10, key); IntegerBinding.intToEntry(10, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); /* Evict */ TestHook<Boolean> evictHook = new TestHook<Boolean>() { public void doIOHook() { throw new UnsupportedOperationException(); } public void doHook() { throw new UnsupportedOperationException(); } public Boolean getHookValue() { return Boolean.TRUE; } public void hookSetup() { throw new UnsupportedOperationException(); } public void doHook(Boolean obj) { throw new UnsupportedOperationException(); } }; DbInternal.getEnvironmentImpl(env).getEvictor().setRunnableHook(evictHook); env.evictMemory(); /* Checkpoint again. */ env.checkpoint(FORCE_CONFIG); }
private void writeFiles(final int nActive, final int nObsolete) { int key = 0; final DatabaseEntry keyEntry = new DatabaseEntry(); final DatabaseEntry dataEntry = new DatabaseEntry(new byte[FILE_SIZE]); for (int i = 0; i < nActive; i += 1) { IntegerBinding.intToEntry(key, keyEntry); db.put(null, keyEntry, dataEntry); key += 1; } IntegerBinding.intToEntry(key, keyEntry); for (int i = 0; i <= nObsolete; i += 1) { db.put(null, keyEntry, dataEntry); } env.checkpoint(new CheckpointConfig().setForce(true)); }
public void put(WebURL url) throws DatabaseException { /* * The key that is used for storing URLs determines the order * they are crawled. Lower key values results in earlier crawling. * Here our keys are 6 bytes. The first byte comes from the URL priority. * The second byte comes from depth of crawl at which this URL is first found. * The rest of the 4 bytes come from the docid of the URL. As a result, * URLs with lower priority numbers will be crawled earlier. If priority * numbers are the same, those found at lower depths will be crawled earlier. * If depth is also equal, those found earlier (therefore, smaller docid) will * be crawled earlier. */ byte[] keyData = new byte[6]; keyData[0] = url.getPriority(); keyData[1] = (url.getDepth() > Byte.MAX_VALUE ? Byte.MAX_VALUE : (byte) url.getDepth()); Util.putIntInByteArray(url.getDocid(), keyData, 2); DatabaseEntry value = new DatabaseEntry(); webURLBinding.objectToEntry(url, value); Transaction txn; if (resumable) { txn = env.beginTransaction(null, null); } else { txn = null; } urlsDB.put(txn, new DatabaseEntry(keyData), value); if (resumable) { if (txn != null) { txn.commit(); } } }
/* Insert 100 records begins with the beginKey. */ private void doWork(Environment master, String dbName, int beginKey) throws Exception { DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(true); /* Insert/Update the records of the database. */ Database db = master.openDatabase(null, dbName, dbConfig); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); for (int i = 0; i < 100; i++) { IntegerBinding.intToEntry(beginKey + i, key); StringBinding.stringToEntry("herococo", data); db.put(null, key, data); } db.close(); /* * Do a sync at the end of the stage to make sure master and * replica have the same data set. */ VLSN commitVLSN = RepTestUtils.syncGroupToLastCommit(repEnvInfo, repEnvInfo.length); RepTestUtils.checkNodeEquality(commitVLSN, false, repEnvInfo); }
@Override public void addRela(long gid, long[] pos) { // update index DatabaseEntry key = longToEntry(gid); DatabaseEntry data = addrToEntry(pos); RelationBDB.put(null, key, data); }
/** Create a populated tree, delete all records, then begin to insert again. */ private void setupWrittenByCompression(Database db) throws DatabaseException { setStepwiseStart(); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); /* Populate a tree so it grows to 2 levels, with 2 BINs. */ for (int i = 0; i < 10; i++) { IntegerBinding.intToEntry(i, key); IntegerBinding.intToEntry(i, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); } Trace.trace(DbInternal.getEnvironmentImpl(env), "After inserts"); env.checkpoint(FORCE_CONFIG); if (DEBUG) { System.out.println(db.getStats(new StatsConfig())); } /* Now delete all of 1 BIN. */ for (int i = 0; i < 5; i++) { IntegerBinding.intToEntry(i, key); assertEquals(OperationStatus.SUCCESS, db.delete(null, key)); } /* Compress, removing a BIN. */ env.compress(); if (DEBUG) { System.out.println("After compress"); System.out.println(db.getStats(new StatsConfig())); } /* Checkpoint again. */ env.checkpoint(FORCE_CONFIG); }
/** * 写入数据 * * @param obj */ public static void insert(Object obj) { if (!Constants.IN_BDB) { MonitorLog.addStat( Constants.DATA_PERSISTENCE_LOG, new String[] {"Bdb Ingore"}, new Long[] {1l}); return; } Long kIndex = keyIndex.getAndIncrement(); try { byte[] vbytes = serialData(obj); byte[] kbytes = serialData("j" + kIndex); DatabaseEntry keyEntry = new DatabaseEntry(kbytes); DatabaseEntry valueEntry = new DatabaseEntry(vbytes); OperationStatus rtn = database.put(null, keyEntry, valueEntry); if (rtn != OperationStatus.SUCCESS) { logger.warn("write to bdb fail" + rtn.name()); MonitorLog.addStat( Constants.DATA_PERSISTENCE_LOG, new String[] {"Bdb Write Fail"}, new Long[] {1l}); } else { MonitorLog.addStat( Constants.DATA_PERSISTENCE_LOG, new String[] {"Bdb Success"}, new Long[] {1l}); } } catch (Exception e) { logger.error("write to bdb exception", e); MonitorLog.addStat( Constants.DATA_PERSISTENCE_LOG, new String[] {"Bdb Write Exception"}, new Long[] {1l}); } }
public void testTruncate() throws DatabaseException { SecondaryDatabase secDb = initDb(); Database priDb = secDb.getPrimaryDatabase(); Transaction txn = txnBegin(); for (int i = 0; i < NUM_RECS; i += 1) { priDb.put(txn, entry(i), entry(i)); } verifyRecords(txn, priDb, NUM_RECS, false); verifyRecords(txn, secDb, NUM_RECS, true); txnCommit(txn); secDb.close(); priDb.close(); txn = txnBegin(); assertEquals(NUM_RECS, env.truncateDatabase(txn, "testDB", true)); assertEquals(NUM_RECS, env.truncateDatabase(txn, "testSecDB", true)); txnCommit(txn); secDb = initDb(); priDb = secDb.getPrimaryDatabase(); txn = txnBegin(); verifyRecords(txn, priDb, 0, false); verifyRecords(txn, secDb, 0, true); txnCommit(txn); secDb.close(); priDb.close(); }
public void testReadOnly() throws DatabaseException { SecondaryDatabase secDb = initDb(); Database priDb = secDb.getPrimaryDatabase(); OperationStatus status; Transaction txn = txnBegin(); for (int i = 0; i < NUM_RECS; i += 1) { status = priDb.put(txn, entry(i), entry(i)); assertSame(OperationStatus.SUCCESS, status); } /* * Secondaries can be opened without a key creator if the primary is * read only. openSecondary will specify a null key creator if the * readOnly param is false. */ Database readOnlyPriDb = openDatabase(false, "testDB", true); SecondaryDatabase readOnlySecDb = openSecondary(readOnlyPriDb, true, "testSecDB", false, true); assertNull(readOnlySecDb.getSecondaryConfig().getKeyCreator()); verifyRecords(txn, readOnlySecDb, NUM_RECS, true); txnCommit(txn); readOnlySecDb.close(); readOnlyPriDb.close(); secDb.close(); priDb.close(); }
/** Write data into the database. */ private void generateData( Environment master, int numTxns, Durability durability, boolean doCommit) { /* Write some data. */ DatabaseEntry key = new DatabaseEntry(); byte[] dataPadding = new byte[1000]; DatabaseEntry data = new DatabaseEntry(dataPadding); TransactionConfig txnConfig = new TransactionConfig(); txnConfig.setDurability(durability); for (int i = 0; i < numTxns; i++) { final Transaction txn = master.beginTransaction(null, txnConfig); // long keyPrefix = i << 10; // LongBinding.longToEntry(keyPrefix + i, key); LongBinding.longToEntry(i, key); db.put(txn, key, data); if (doCommit) { txn.commit(); } else { txn.abort(); } } }
/** Check that primary put() writes to each secondary that is open. */ private void checkSecondaryUpdate( Transaction txn, Database priDb, int val, Database secDb, boolean expectSecDbVal, Database secDb2, boolean expectSecDb2Val) throws DatabaseException { OperationStatus status; DatabaseEntry data = new DatabaseEntry(); int secVal = KEY_OFFSET + val; status = priDb.put(txn, entry(val), entry(val)); assertSame(OperationStatus.SUCCESS, status); status = secDb.get(txn, entry(secVal), data, LockMode.DEFAULT); assertSame(expectSecDbVal ? OperationStatus.SUCCESS : OperationStatus.NOTFOUND, status); status = secDb2.get(txn, entry(secVal), data, LockMode.DEFAULT); assertSame(expectSecDb2Val ? OperationStatus.SUCCESS : OperationStatus.NOTFOUND, status); status = priDb.delete(txn, entry(val)); assertSame(OperationStatus.SUCCESS, status); }
/** * Add a dummy 'cap' entry at the given insertion key. Prevents 'seeks' to queue heads from * holding lock on last item of 'preceding' queue. See: * http://sourceforge.net/tracker/index.php?func=detail&aid=1262665&group_id=73833&atid=539102 * * @param origin key at which to insert the cap */ public void addCap(byte[] origin) { try { pendingUrisDB.put(null, new DatabaseEntry(origin), new DatabaseEntry(new byte[0])); } catch (DatabaseException e) { throw new RuntimeException(e); } }
@Override public void put(byte[] key, byte[] val) { if (bdb.put(null, new DatabaseEntry(key), new DatabaseEntry(val)) != opSuccess) { throw new RuntimeException("put fail"); } bytesOut.addAndGet(key.length + val.length); puts.incrementAndGet(); }
public void lock() throws Exception { lockDatabase = env.openDatabase(null, "lock", BerkeleyDBUtils.defaultDBConfig); DatabaseEntry key = new DatabaseEntry("lock".getBytes("utf-8")); DatabaseEntry value = new DatabaseEntry("locked".getBytes("utf-8")); lockDatabase.put(null, key, value); lockDatabase.sync(); lockDatabase.close(); }
@Override public synchronized void putBallot(Long instance, int ballot) { keyBinding.objectToEntry(instance, key); ballotBinding.objectToEntry(ballot, ballot_data); OperationStatus status = db.put(null, key, ballot_data); if (logger.isDebugEnabled()) { logger.debug("DB put ballot " + ballot + " for instance " + instance + " " + status.name()); } }
@Override public synchronized void putDecision(Long instance, Decision decision) { keyBinding.objectToEntry(instance, key); dataBinding.objectToEntry(decision, data); OperationStatus status = db.put(null, key, data); if (logger.isDebugEnabled()) { logger.debug("DB put " + decision + " " + status.name()); } }
/** * Make a thread allocate a vlsn, but then fail before it's tracked by the vlsn index. This * happened in [#20919] when 1.rep environment close was called 2.the repNode was nulled out 3.a * concurrent writing thread got a NPE within its call to LogManager.log because the repNode was * null. This thread exited after it had bumped the vlsn, but before it had entered the vlsn in * the vlsnIndex 4.rep environment close tried to do a checkpoint, but the checkpoint hung. This * fix works by having (3) invalidate the environment, and by having (4) check for an invalidated * environment. */ @Test public void testLoggingFailure() throws DatabaseException, IOException { /* Make a single replicated environment. */ RepEnvInfo[] repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 1); RepTestUtils.joinGroup(repEnvInfo); /* * Disable cleaning and CBVLSN updating, to control vlsn creation * explicitly. */ Environment env = repEnvInfo[0].getEnv(); EnvironmentMutableConfig config = env.getMutableConfig(); config.setConfigParam("je.env.runCleaner", "false"); env.setMutableConfig(config); LocalCBVLSNUpdater.setSuppressGroupDBUpdates(false); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setTransactional(true); dbConfig.setAllowCreate(true); Database db = env.openDatabase(null, "foo", dbConfig); DatabaseEntry value = new DatabaseEntry(new byte[4]); EnvironmentImpl envImpl = DbInternal.getEnvironmentImpl(env); LogManager logManager = DbInternal.getEnvironmentImpl(env).getLogManager(); /* * Inject an exception into the next call to log() that is made * for a replicated log entry. */ logManager.setDelayVLSNRegisterHook(new ForceException()); VLSNIndex vlsnIndex = ((RepImpl) envImpl).getVLSNIndex(); try { db.put(null, value, value); fail("Should throw exception"); } catch (Exception expected) { assertTrue( "latest=" + vlsnIndex.getLatestAllocatedVal() + " last mapped=" + vlsnIndex.getRange().getLast().getSequence(), vlsnIndex.getLatestAllocatedVal() > vlsnIndex.getRange().getLast().getSequence()); } try { VLSNIndex.AWAIT_CONSISTENCY_MS = 1000; envImpl.awaitVLSNConsistency(); fail("Should throw and break out"); } catch (DatabaseException expected) { } /* Before the fix, this test hung. */ }
public static void main(String[] args) { Environment env = null; Database db = null; EnvironmentConfig envconfig = new EnvironmentConfig(); envconfig.setAllowCreate(true); try { env = new Environment(new File("D://bdb"), envconfig); DatabaseConfig dbconfig = new DatabaseConfig(); dbconfig.setAllowCreate(true); db = env.openDatabase(null, "dbac.db", dbconfig); String key = "mykey"; DatabaseEntry thekey = new DatabaseEntry(); thekey.setData(key.getBytes("utf-8")); Long value = new Long(123456); DatabaseEntry thevalue = new DatabaseEntry(); EntryBinding myBinging = TupleBinding.getPrimitiveBinding(Long.class); myBinging.objectToEntry(value, thevalue); // LongBinding myLongBinging=(LongBinding)TupleBinding.getPrimitiveBinding(Long.class); // myLongBinging.objectToEntry(value, thevalue); db.put(null, thekey, thevalue); DatabaseEntry valueEntry = new DatabaseEntry(); OperationStatus status = db.get(null, thekey, valueEntry, LockMode.DEFAULT); if (status == OperationStatus.SUCCESS) { // Long number=myLongBinging.entryToObject(valueEntry); Long number = (Long) myBinging.entryToObject(valueEntry); System.out.println(env.getDatabaseNames()); System.out.println(number); } } catch (EnvironmentLockedException e) { e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } finally { if (db != null) { try { db.close(); } catch (DatabaseException e) { e.printStackTrace(); } } if (env != null) { try { env.cleanLog(); env.close(); } catch (DatabaseException e) { e.printStackTrace(); } } } }
private void setupWrittenBySplits(Database db) throws DatabaseException { setStepwiseStart(); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); /* Create a tree and checkpoint. */ IntegerBinding.intToEntry(0, key); IntegerBinding.intToEntry(0, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); env.checkpoint(FORCE_CONFIG); Trace.trace(DbInternal.getEnvironmentImpl(env), "After creation"); /* Populate a tree so it splits. */ for (int i = 1; i < 6; i++) { IntegerBinding.intToEntry(i, key); IntegerBinding.intToEntry(i, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); } Trace.trace(DbInternal.getEnvironmentImpl(env), "After inserts"); env.checkpoint(FORCE_CONFIG); }
/** Add a Map of pubication information to the Database */ private void addToDb(Transaction txn, Database db, Map<String, Map<String, Object>> fromServerMap) throws IOException, DatabaseException { for (Map.Entry<String, Map<String, Object>> entry : fromServerMap.entrySet()) { String pubMedId = entry.getKey(); // System.err .println("adding to cache: " + pubMedId); DatabaseEntry key = new DatabaseEntry(pubMedId.getBytes()); Map dataMap = entry.getValue(); ByteArrayOutputStream arrayOutputStream = new ByteArrayOutputStream(); ObjectOutputStream serializer = new ObjectOutputStream(arrayOutputStream); serializer.writeObject(dataMap); DatabaseEntry data = new DatabaseEntry(arrayOutputStream.toByteArray()); db.put(txn, key, data); } }
public void testCursorDupAndCloseDb() throws DatabaseException { initEnv(false); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); Database myDb = exampleEnv.openDatabase(null, "fooDb", dbConfig); myDb.put(null, new StringDbt("blah"), new StringDbt("blort")); Cursor cursor = myDb.openCursor(null, null); OperationStatus status = cursor.getNext(new DatabaseEntry(), new DatabaseEntry(), LockMode.DEFAULT); assertEquals(OperationStatus.SUCCESS, status); Cursor cursorDup = cursor.dup(true); cursor.close(); cursorDup.close(); myDb.close(); }
/** Create a populated tree, delete all records, then begin to insert again. */ private void setupCreateNewTree(Database db) throws DatabaseException { DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); TestHook ckptHook = new CheckpointHook(env); DbInternal.getDatabaseImpl(db).getTree().setCkptHook(ckptHook); env.checkpoint(FORCE_CONFIG); /* * Create in the log * provisional BIN, IN, ckpt start, LN */ IntegerBinding.intToEntry(1, key); IntegerBinding.intToEntry(1, data); assertEquals(OperationStatus.SUCCESS, db.put(null, key, data)); }
/** [#14966] */ public void testDirtyReadPartialGet() throws DatabaseException { SecondaryDatabase secDb = initDb(); Database priDb = secDb.getPrimaryDatabase(); DatabaseEntry data = new DatabaseEntry(); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry secKey = new DatabaseEntry(); OperationStatus status; /* Put a record */ Transaction txn = txnBegin(); status = priDb.put(txn, entry(0), entry(0)); assertSame(OperationStatus.SUCCESS, status); txnCommit(txn); /* Regular get */ status = secDb.get(null, entry(0 + KEY_OFFSET), key, data, LockMode.DEFAULT); assertSame(OperationStatus.SUCCESS, status); assertDataEquals(entry(0), key); assertDataEquals(entry(0), data); /* Dirty read returning no data */ data.setPartial(0, 0, true); status = secDb.get(null, entry(0 + KEY_OFFSET), key, data, LockMode.READ_UNCOMMITTED); assertSame(OperationStatus.SUCCESS, status); assertDataEquals(entry(0), key); assertEquals(0, data.getData().length); assertEquals(0, data.getSize()); /* Dirty read returning partial data */ data.setPartial(0, 1, true); status = secDb.get(null, entry(0 + KEY_OFFSET), key, data, LockMode.READ_UNCOMMITTED); assertSame(OperationStatus.SUCCESS, status); assertDataEquals(entry(0), key); assertEquals(1, data.getData().length); assertEquals(1, data.getSize()); secDb.close(); priDb.close(); }
/** * inserts into databse * * @param key * @param data * @throws Exception */ public void insert(Object key, Object data) throws Exception { Serializer serializer = new Serializer(); byte[] keyBytes = serializer.serialize(key); byte[] dataBytes = serializer.serialize(data); final DatabaseEntry keyEntry = new DatabaseEntry(keyBytes); final DatabaseEntry dataEntry = new DatabaseEntry(dataBytes); try { final Transaction transaction = myEnv.beginTransaction(null, null); final OperationStatus result = myDatabase.put(transaction, keyEntry, dataEntry); if (result != OperationStatus.SUCCESS) { System.out.println("operation status-failure"); throw new Exception("Error"); } transaction.commit(); } catch (Exception DE) { System.out.println(DE); } }
public void testPopulate() throws DatabaseException { Database priDb = openDatabase(false, "testDB", false); Transaction txn = txnBegin(); /* Test population of newly created secondary database. */ for (int i = 0; i < NUM_RECS; i += 1) { assertSame(OperationStatus.SUCCESS, priDb.put(txn, entry(i), entry(i))); } txnCommit(txn); SecondaryDatabase secDb = openSecondary(priDb, true, "testSecDB", true, false); txn = txnBegin(); verifyRecords(txn, secDb, NUM_RECS, true); txnCommit(txn); /* * Clear secondary and perform populate again, to test the case where * an existing database is opened, and therefore a write txn will only * be created in order to populate it */ Database secDbDetached = openDatabase(true, "testSecDB", false); secDb.close(); txn = txnBegin(); for (int i = 0; i < NUM_RECS; i += 1) { assertSame(OperationStatus.SUCCESS, secDbDetached.delete(txn, entry(i + KEY_OFFSET))); } verifyRecords(txn, secDbDetached, 0, true); txnCommit(txn); secDb = openSecondary(priDb, true, "testSecDB", true, false); txn = txnBegin(); verifyRecords(txn, secDb, NUM_RECS, true); verifyRecords(txn, secDbDetached, NUM_RECS, true); txnCommit(txn); secDbDetached.close(); secDb.close(); priDb.close(); }
public void openEnv() { try { EnvironmentConfig envConfig = new EnvironmentConfig(); envConfig.setReadOnly(readOnly); envConfig.setAllowCreate(!readOnly); env = new Environment(envHome, envConfig); if (sleepTime < 0) { DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(!readOnly); Database db = env.openDatabase(null, "testDB", dbConfig); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); for (int i = 1; i <= 50; i++) { IntegerBinding.intToEntry(i, key); StringBinding.stringToEntry("herococo", data); db.put(null, key, data); } db.close(); } else { Thread.sleep(sleepTime); } } catch (EnvironmentLockedException e) { /* * Exit the process with value 4, the exception is expected in * this casse, don't dump it out. */ System.exit(4); } catch (Exception e) { /* Dump unexpected exception, exit process with value 5. */ e.printStackTrace(); System.exit(5); } finally { if (env != null) { env.close(); } } }
/** @see org.geogit.storage.ObjectDatabase#put(org.geogit.storage.ObjectWriter) */ @Override protected boolean putInternal(final ObjectId id, final byte[] rawData, final boolean override) throws IOException { final byte[] rawKey = id.getRawValue(); DatabaseEntry key = new DatabaseEntry(rawKey); DatabaseEntry data = new DatabaseEntry(rawData); OperationStatus status; if (override) { status = objectDb.put(txn.getTransaction(), key, data); } else { status = objectDb.putNoOverwrite(txn.getTransaction(), key, data); } final boolean didntExist = SUCCESS.equals(status); if (LOGGER.isLoggable(Level.FINER)) { if (didntExist) { LOGGER.finer("Key already exists in blob store, blob reused for id: " + id); } } return didntExist; }
@Test public void testEnvRecovery() { Logger logger = LoggerUtils.getLoggerFixedPrefix(this.getClass(), "test"); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream p = new PrintStream(baos); EnvironmentConfig envConfig = TestUtils.initEnvConfig(); envConfig.setAllowCreate(true); envConfig.setConfigParam("je.env.startupThreshold", "0"); env = new Environment(envHome, envConfig); env.printStartupInfo(p); DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); Database db = env.openDatabase(null, "foo", dbConfig); DatabaseEntry key = new DatabaseEntry(new byte[1000]); DatabaseEntry data = new DatabaseEntry(new byte[1000]); for (int i = 0; i < 10; i += 1) { db.put(null, key, data); } db.close(); env.close(); env = new Environment(envHome, envConfig); env.printStartupInfo(p); logger.fine(baos.toString()); env.close(); env = null; } catch (Exception e) { fail( "This test succeeds as long as the printing of the report " + "does not cause a problem. Any exception is a failure. "); } }