/** * Create a log that will have swathes of cleaned files that follow the replication stream, or are * intermingled in the replication stream. * * @return master */ private Environment setupLogWithCleanedGaps(boolean multipleGaps) throws Exception { db = null; repEnvInfo = RepTestUtils.setupEnvInfos(envRoot, 3, makeEnvConfig()); Environment master = RepTestUtils.joinGroup(repEnvInfo); int masterIdx = findMasterIndex(master); db = openDatabase(master); /* Write some data so there is a replication stream. */ generateData(master, 50, Durability.COMMIT_NO_SYNC, true); /* * Make the master have a low-utilization log, and gate cleaning * with a non-updating global cbvlsn. Shut down the replicas so the * global cbvlsn remains low, and then fill the master with junk. * The junk will either entirely be to the right of the last VLSN, * or (since we can't predict RepGroupDB updates) at least within * the range of the active VLSN range. */ closeReplicas(masterIdx); fillLogWithTraceMsgs(master, 50); if (multipleGaps) { Durability noAck = new Durability(SyncPolicy.NO_SYNC, SyncPolicy.NO_SYNC, ReplicaAckPolicy.NONE); /* Write more data */ generateData(master, 50, noAck, true); /* Make a second cleanup area of junk */ fillLogWithTraceMsgs(master, 50); } CheckpointConfig cc = new CheckpointConfig(); cc.setForce(true); master.checkpoint(cc); EnvironmentStats stats = master.getStats(clearConfig); stats = master.getStats(clearConfig); /* Clean the log */ int totalCleaned = 0; int cleanedThisPass = 0; do { cleanedThisPass = cleanLog(master); totalCleaned += cleanedThisPass; master.checkpoint(cc); stats = master.getStats(clearConfig); logger.info( "after cleaning, cleaner backlog = " + stats.getCleanerBacklog() + " deletionBacklog=" + stats.getFileDeletionBacklog()); } while (cleanedThisPass > 0); assertTrue(totalCleaned > 0); return master; }
private void expectNothingToClean() { env.cleanLog(); final EnvironmentStats stats = env.getStats(null); final String msg = String.format( "%d probes, %d non-probes", stats.getNCleanerProbeRuns(), stats.getNCleanerRuns()); assertEquals(msg, 0, stats.getNCleanerRuns() - stats.getNCleanerProbeRuns()); }
public EnvironmentStats getStats(String storeName, boolean fast) { StatsConfig config = new StatsConfig(); config.setFast(fast); try { Environment env = getEnvironment(storeName); return env.getStats(config); } catch (DatabaseException e) { throw new VoldemortException(e); } }
private void doReplicaHasGapNetworkRestore(boolean multiGaps) throws Throwable { Durability noAck = new Durability(SyncPolicy.NO_SYNC, SyncPolicy.NO_SYNC, ReplicaAckPolicy.NONE); db = null; try { Environment master = setupLogWithCleanedGaps(multiGaps); int masterIdx = findMasterIndex(master); /* * Write a record, so that we are sure that there will be a * network restore, because we have to cross a checkpoint. */ generateData(master, 1, noAck, false); CheckpointConfig cc = new CheckpointConfig(); master.checkpoint(cc); EnvironmentStats stats = master.getStats(clearConfig); assertEquals(0, stats.getCleanerBacklog()); if (multiGaps) { logger.info("Multigap: deletion backlog = " + stats.getFileDeletionBacklog()); } else { assertEquals(0, stats.getFileDeletionBacklog()); } db.close(); db = null; repEnvInfo[masterIdx].closeEnv(); /* Start up the two replicas */ openReplicas(masterIdx); /* Start the node that had been the master */ try { repEnvInfo[masterIdx].openEnv(); fail("Should be a network restore"); } catch (InsufficientLogException ile) { repEnvInfo[masterIdx].closeEnv(); NetworkRestore restore = new NetworkRestore(); NetworkRestoreConfig config = new NetworkRestoreConfig(); config.setRetainLogFiles(true); restore.execute(ile, config); repEnvInfo[masterIdx].openEnv(); } /* Check its last VLSN and size. */ } catch (Throwable t) { t.printStackTrace(); throw t; } finally { if (db != null) { db.close(); } RepTestUtils.shutdownRepEnvs(repEnvInfo); } }
private void expectBackgroundCleaning() { final long endTime = System.currentTimeMillis() + (30 * 1000); while (System.currentTimeMillis() < endTime) { final EnvironmentStats stats = env.getStats(null); if (stats.getNCleanerRuns() > 0) { return; } } close(); fail("Cleaner did not run"); }
/** * Invoke an operation for the given environment. * * @param targetEnv The target JE environment. May be null if the environment is not open. * @param actionName operation name. * @param params operation parameters. May be null. * @param signature operation signature. May be null. * @return the operation result */ public Object invoke( Environment targetEnv, String actionName, Object[] params, String[] signature) throws MBeanException { /* Sanity checking. */ if (actionName == null) { throw new IllegalArgumentException("actionName cannot be null"); } try { if (targetEnv != null) { if (actionName.equals(OP_CLEAN)) { int numFiles = targetEnv.cleanLog(); return new Integer(numFiles); } else if (actionName.equals(OP_EVICT)) { targetEnv.evictMemory(); return null; } else if (actionName.equals(OP_CHECKPOINT)) { CheckpointConfig config = new CheckpointConfig(); if ((params != null) && (params.length > 0)) { Boolean force = (Boolean) params[0]; config.setForce(force.booleanValue()); } targetEnv.checkpoint(config); return null; } else if (actionName.equals(OP_SYNC)) { targetEnv.sync(); return null; } else if (actionName.equals(OP_ENV_STAT)) { return targetEnv.getStats(getStatsConfig(params)); } else if (actionName.equals(OP_LOCK_STAT)) { return targetEnv.getLockStats(getStatsConfig(params)); } else if (actionName.equals(OP_TXN_STAT)) { return targetEnv.getTransactionStats(getStatsConfig(params)); } else if (actionName.equals(OP_DB_NAMES)) { return targetEnv.getDatabaseNames(); } else if (actionName.equals(OP_DB_STAT)) { return getDatabaseStats(targetEnv, params); } } return new IllegalArgumentException("actionName: " + actionName + " is not valid"); } catch (DatabaseException e) { /* * Add both the message and the exception for easiest * deciphering of the problem. Sometimes the original exception * stacktrace gets hidden in server logs. */ throw new MBeanException(e, e.getMessage()); } }
/** * On the master, generate a log that has section A: a lot of records packed together section B: a * lot of junk that gets cleaned away, creating a gap in the log section C: a new section of data * * <p>Bring the replicas down after A is replicated, but before C is written. When the replicas * come up, they will have to be fed by the feeder from point A. */ @Test public void testFeederHasGap() throws Throwable { Durability noAck = new Durability(SyncPolicy.NO_SYNC, SyncPolicy.NO_SYNC, ReplicaAckPolicy.NONE); db = null; try { Environment master = setupLogWithCleanedGaps(false); int masterIdx = findMasterIndex(master); /* * Write a single record, and then junk, so that we are sure there * is a new VLSN, and that the replicas will have to sync up to * this point, across the gap of cleaned junk. */ generateData(master, 1, noAck, false); EnvironmentStats stats = master.getStats(clearConfig); assertEquals(0, stats.getCleanerBacklog()); assertEquals(0, stats.getFileDeletionBacklog()); /* Start up the two replicas */ for (int i = 0; i < repEnvInfo.length; i++) { if (i != masterIdx) { repEnvInfo[i].openEnv(); /* make sure we have up to date data */ readData(repEnvInfo[i].getEnv(), 50); } } } catch (Throwable t) { t.printStackTrace(); throw t; } finally { if (db != null) { db.close(); } RepTestUtils.shutdownRepEnvs(repEnvInfo); } }
private EnvironmentStats getStats(Environment environment) { StatsConfig config = new StatsConfig(); config.setFast(true); return environment.getStats(config); }