/** * Create a log that will have swathes of cleaned files that follow the replication stream, or are * intermingled in the replication stream. * * @return master */ private Environment setupLogWithCleanedGaps(boolean multipleGaps) throws Exception { db = null; repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3, makeEnvConfig()); Environment master = RepTestUtils.joinGroup(repEnvInfo); int masterIdx = findMasterIndex(master); db = openDatabase(master); /* Write some data so there is a replication stream. */ generateData(master, 50, Durability.COMMIT_NO_SYNC, true); /* * Make the master have a low-utilization log, and gate cleaning * with a non-updating global cbvlsn. Shut down the replicas so the * global cbvlsn remains low, and then fill the master with junk. * The junk will either entirely be to the right of the last VLSN, * or (since we can't predict RepGroupDB updates) at least within * the range of the active VLSN range. */ closeReplicas(masterIdx); fillLogWithTraceMsgs(master, 50); if (multipleGaps) { Durability noAck = new Durability(SyncPolicy.NO_SYNC, SyncPolicy.NO_SYNC, ReplicaAckPolicy.NONE); /* Write more data */ generateData(master, 50, noAck, true); /* Make a second cleanup area of junk */ fillLogWithTraceMsgs(master, 50); } CheckpointConfig cc = new CheckpointConfig(); cc.setForce(true); master.checkpoint(cc); EnvironmentStats stats = master.getStats(clearConfig); stats = master.getStats(clearConfig); /* Clean the log */ int totalCleaned = 0; int cleanedThisPass = 0; do { cleanedThisPass = cleanLog(master); totalCleaned += cleanedThisPass; master.checkpoint(cc); stats = master.getStats(clearConfig); logger.info( "after cleaning, cleaner backlog = " + stats.getCleanerBacklog() + " deletionBacklog=" + stats.getFileDeletionBacklog()); } while (cleanedThisPass > 0); assertTrue(totalCleaned > 0); return master; }
/** * Close the database. * * @param cleanLog if true then wait for the BerkeleyDB clean thread to finish. */ @Override public void close(boolean cleanLog) { log.info("Closing db & env for: " + dir.getAbsolutePath()); if (openIterators.size() > 0) { log.warn("closing " + openIterators.size() + " iterators on close"); for (Object e : openIterators.toArray(new Object[openIterators.size()])) { ((ClosableIterator<Map.Entry<byte[], byte[]>>) e).close(); } } log.info("pages:gets=" + gets + " puts=" + puts + " in=" + bytesIn + " out=" + bytesOut); bdb.close(); if (cleanLog) { bdb_env.getConfig().setConfigParam(EnvironmentConfig.ENV_RUN_CLEANER, "false"); int totalLogFiles = 0; int logFiles; do { logFiles = bdb_env.cleanLog(); totalLogFiles += logFiles; } while (logFiles > 0); log.info("Total of " + totalLogFiles + " log files cleaned."); if (totalLogFiles > 0) { CheckpointConfig force = new CheckpointConfig(); force.setForce(true); bdb_env.checkpoint(force); } } bdb_env.close(); }
private void doReplicaHasGapNetworkRestore(boolean multiGaps) throws Throwable { Durability noAck = new Durability(SyncPolicy.NO_SYNC, SyncPolicy.NO_SYNC, ReplicaAckPolicy.NONE); db = null; try { Environment master = setupLogWithCleanedGaps(multiGaps); int masterIdx = findMasterIndex(master); /* * Write a record, so that we are sure that there will be a * network restore, because we have to cross a checkpoint. */ generateData(master, 1, noAck, false); CheckpointConfig cc = new CheckpointConfig(); master.checkpoint(cc); EnvironmentStats stats = master.getStats(clearConfig); assertEquals(0, stats.getCleanerBacklog()); if (multiGaps) { logger.info("Multigap: deletion backlog = " + stats.getFileDeletionBacklog()); } else { assertEquals(0, stats.getFileDeletionBacklog()); } db.close(); db = null; repEnvInfo[masterIdx].closeEnv(); /* Start up the two replicas */ openReplicas(masterIdx); /* Start the node that had been the master */ try { repEnvInfo[masterIdx].openEnv(); fail("Should be a network restore"); } catch (InsufficientLogException ile) { repEnvInfo[masterIdx].closeEnv(); NetworkRestore restore = new NetworkRestore(); NetworkRestoreConfig config = new NetworkRestoreConfig(); config.setRetainLogFiles(true); restore.execute(ile, config); repEnvInfo[masterIdx].openEnv(); } /* Check its last VLSN and size. */ } catch (Throwable t) { t.printStackTrace(); throw t; } finally { if (db != null) { db.close(); } RepTestUtils.shutdownRepEnvs(repEnvInfo); } }
/** * Invoke an operation for the given environment. * * @param targetEnv The target JE environment. May be null if the environment is not open. * @param actionName operation name. * @param params operation parameters. May be null. * @param signature operation signature. May be null. * @return the operation result */ public Object invoke( Environment targetEnv, String actionName, Object[] params, String[] signature) throws MBeanException { /* Sanity checking. */ if (actionName == null) { throw new IllegalArgumentException("actionName cannot be null"); } try { if (targetEnv != null) { if (actionName.equals(OP_CLEAN)) { int numFiles = targetEnv.cleanLog(); return new Integer(numFiles); } else if (actionName.equals(OP_EVICT)) { targetEnv.evictMemory(); return null; } else if (actionName.equals(OP_CHECKPOINT)) { CheckpointConfig config = new CheckpointConfig(); if ((params != null) && (params.length > 0)) { Boolean force = (Boolean) params[0]; config.setForce(force.booleanValue()); } targetEnv.checkpoint(config); return null; } else if (actionName.equals(OP_SYNC)) { targetEnv.sync(); return null; } else if (actionName.equals(OP_ENV_STAT)) { return targetEnv.getStats(getStatsConfig(params)); } else if (actionName.equals(OP_LOCK_STAT)) { return targetEnv.getLockStats(getStatsConfig(params)); } else if (actionName.equals(OP_TXN_STAT)) { return targetEnv.getTransactionStats(getStatsConfig(params)); } else if (actionName.equals(OP_DB_NAMES)) { return targetEnv.getDatabaseNames(); } else if (actionName.equals(OP_DB_STAT)) { return getDatabaseStats(targetEnv, params); } } return new IllegalArgumentException("actionName: " + actionName + " is not valid"); } catch (DatabaseException e) { /* * Add both the message and the exception for easiest * deciphering of the problem. Sometimes the original exception * stacktrace gets hidden in server logs. */ throw new MBeanException(e, e.getMessage()); } }
private void writeFiles(final int nActive, final int nObsolete) { int key = 0; final DatabaseEntry keyEntry = new DatabaseEntry(); final DatabaseEntry dataEntry = new DatabaseEntry(new byte[FILE_SIZE]); for (int i = 0; i < nActive; i += 1) { IntegerBinding.intToEntry(key, keyEntry); db.put(null, keyEntry, dataEntry); key += 1; } IntegerBinding.intToEntry(key, keyEntry); for (int i = 0; i <= nObsolete; i += 1) { db.put(null, keyEntry, dataEntry); } env.checkpoint(new CheckpointConfig().setForce(true)); }