/** Create the server reading configuration information from ebean.properties. */ public SpiEbeanServer createServer(String name) { ConfigBuilder b = new ConfigBuilder(); ServerConfig config = b.build(name); return createServer(config); }
private void mySetup(int stripeLength) throws Exception { if (System.getProperty("hadoop.log.dir") == null) { String base = new File(".").getAbsolutePath(); System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs"); } new File(TEST_DIR).mkdirs(); // Make sure data directory exists conf = new Configuration(); conf.set("raid.config.file", CONFIG_FILE); conf.setBoolean("raid.config.reload", true); conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL); // scan all policies once every 5 second conf.setLong("raid.policy.rescan.interval", 5000); // do not use map-reduce cluster for Raiding conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode"); conf.set("raid.server.address", "localhost:" + MiniDFSCluster.getFreePort()); conf.set("mapred.raid.http.address", "localhost:0"); Utils.loadTestCodecs( conf, stripeLength, stripeLength, 1, 3, "/destraid", "/destraidrs", false, true); conf.setBoolean("dfs.permissions", false); // Make sure initial repl is smaller than NUM_DATANODES conf.setInt(RaidNode.RAID_PARITY_INITIAL_REPL_KEY, 1); dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null); dfsCluster.waitActive(); fileSys = dfsCluster.getFileSystem(); namenode = fileSys.getUri().toString(); FileSystem.setDefaultUri(conf, namenode); mr = new MiniMRCluster(4, namenode, 3); jobTrackerName = "localhost:" + mr.getJobTrackerPort(); hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort(); FileSystem.setDefaultUri(conf, namenode); conf.set("mapred.job.tracker", jobTrackerName); conf.set(RaidNode.RAID_CHECKSUM_STORE_CLASS_KEY, "org.apache.hadoop.raid.LocalChecksumStore"); conf.setBoolean(RaidNode.RAID_CHECKSUM_STORE_REQUIRED_KEY, true); conf.set(LocalChecksumStore.LOCAL_CHECK_STORE_DIR_KEY, CHECKSUM_STORE_DIR); conf.set(RaidNode.RAID_STRIPE_STORE_CLASS_KEY, "org.apache.hadoop.raid.LocalStripeStore"); conf.set(LocalStripeStore.LOCAL_STRIPE_STORE_DIR_KEY, STRIPE_STORE_DIR); ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE); cb.addPolicy("RaidTest1", "/user/dhruba/raidtest", 1, 1); cb.addPolicy("RaidTest2", "/user/dhruba/raidtestrs", 1, 1, "rs"); cb.persist(); }
private Configuration createSomeRealConfiguration( final Random valueGenerator, final Set<String>... keySets) { final ConfigBuilder builder = ConfigBuilder.buildConfig(ConfigSource.EMPTY_SOURCE); if (keySets.length == 1) { for (final String key : keySets[0]) { builder.select(key).addValue(valueGenerator.nextFloat()).back(); } } else if (keySets.length > 1) { for (final String key : keySets[0]) { final Configuration tmpCfg = createSomeRealConfiguration( valueGenerator, Arrays.copyOfRange(keySets, 1, keySets.length)); builder.select(key).addConfig(tmpCfg).back(); } } return builder.getConfig(); }
@Test public void testMerge_whenInstantiatedWithEmptyAndNotMergedWithNull_shouldReturnOther() { // run final Configuration mergedConfig = mergeStrategy.merge(ConfigBuilder.getEmptyConfiguration(), otherConfig); // assert assertSame(mergedConfig, otherConfig); }
@Test public void testMerge_whenMergedWithEmpty_shouldReturnBaseConfig() { final Configuration mergedConfig = mergeStrategy.merge(baseConfig, ConfigBuilder.getEmptyConfiguration()); assertSame(baseConfig, mergedConfig); }