public long getLength() { try { return urlsDB.count(); } catch (Exception e) { e.printStackTrace(); } return -1; }
private void experienceLogFlushTask(String sleepTime, boolean flushBeforeCrash) throws Throwable { try { createRepEnvInfo(sleepTime); ReplicatedEnvironment master = RepTestUtils.joinGroup(repEnvInfo); long startTime = System.currentTimeMillis(); StatsConfig stConfig = new StatsConfig(); stConfig.setClear(true); /* Flush the existed dirty data before we do writes. */ for (int i = 0; i < repEnvInfo.length; i++) { repEnvInfo[i].getEnv().sync(); repEnvInfo[i].getEnv().getStats(stConfig); } DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setAllowCreate(true); dbConfig.setTransactional(true); Database db = master.openDatabase(null, dbName, dbConfig); DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); for (int i = 1; i <= 100; i++) { IntegerBinding.intToEntry(i, key); StringBinding.stringToEntry(value, data); db.put(null, key, data); } assertTrue(System.currentTimeMillis() - startTime < 15000); Thread.sleep(15000); long endTime = System.currentTimeMillis(); for (int i = 0; i < repEnvInfo.length; i++) { EnvironmentStats envStats = repEnvInfo[i].getEnv().getStats(stConfig); LogFlusher flusher = repEnvInfo[i].getRepNode().getLogFlusher(); if (flushBeforeCrash) { /* Make sure the LogFlushTask has been invoked. */ assertTrue(flusher.getFlushTask().scheduledExecutionTime() > startTime); assertTrue(flusher.getFlushTask().scheduledExecutionTime() < endTime); /* * Since the log file size is not so big, we can't assure * all the data will be written in the same log file, but * we can sure that a flush does happen. */ assertTrue(envStats.getNSequentialWrites() >= 1); assertTrue(envStats.getNLogFSyncs() == 1); } else { /* * Make sure the LogFlushTask is not invoked after making * the changes. */ assertTrue(flusher.getFlushTask().scheduledExecutionTime() < startTime); assertTrue(envStats.getNSequentialWrites() == 0); assertTrue(envStats.getNLogFSyncs() == 0); } assertTrue(envStats.getNFSyncs() == 0); } File[] envHomes = new File[3]; /* Close the replicas without doing a checkpoint. */ for (int i = 0; i < repEnvInfo.length; i++) { envHomes[i] = repEnvInfo[i].getEnvHome(); repEnvInfo[i].getRepImpl().abnormalClose(); } /* * Open a read only standalone Environment on the replicas to see * whether the data has been synced to the disk. */ EnvironmentConfig newConfig = new EnvironmentConfig(); newConfig.setAllowCreate(false); newConfig.setReadOnly(true); newConfig.setTransactional(true); for (int i = 0; i < repEnvInfo.length; i++) { Environment env = new Environment(envHomes[i], newConfig); dbConfig.setAllowCreate(false); dbConfig.setReadOnly(true); try { db = env.openDatabase(null, dbName, dbConfig); } catch (DatabaseNotFoundException e) { /* * If the system crashes before the flush, the database is * not synced to the disk, so this database can't be found * at all, it's expected. */ assertFalse(flushBeforeCrash); } if (flushBeforeCrash) { assertTrue(db.count() == 100); for (int index = 1; index <= 100; index++) { IntegerBinding.intToEntry(index, key); OperationStatus status = db.get(null, key, data, null); if (flushBeforeCrash) { assertTrue(status == OperationStatus.SUCCESS); assertEquals(value, StringBinding.entryToString(data)); } } } if (flushBeforeCrash) { db.close(); } env.close(); } } catch (Throwable t) { t.printStackTrace(); throw t; } }
@Override public long count() { return bdb.count(); }