/** Check whether the name-node can be started. */ private boolean canStartNameNode(Configuration conf) throws IOException { NameNode nn2 = null; try { nn2 = NameNode.createNameNode(new String[] {}, conf); } catch (IOException e) { if (e instanceof java.net.BindException) return false; throw e; } stopNameNode(nn2); return true; }
/** Start the name-node. */ public NameNode startNameNode() throws IOException { String dataDir = System.getProperty("test.build.data"); hdfsDir = new File(dataDir, "dfs"); if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new Configuration(); config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath()); FileSystem.setDefaultUri(config, "hdfs://" + NAME_NODE_HOST + "0"); config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0"); NameNode.format(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
NNThroughputBenchmark(Configuration conf) throws IOException, LoginException { config = conf; // We do not need many handlers, since each thread simulates a handler // by calling name-node methods directly config.setInt("dfs.namenode.handler.count", 1); // set exclude file config.set("dfs.hosts.exclude", "${hadoop.tmp.dir}/dfs/hosts/exclude"); File excludeFile = new File(config.get("dfs.hosts.exclude", "exclude")); if (!excludeFile.exists()) { if (!excludeFile.getParentFile().mkdirs()) throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile); } new FileOutputStream(excludeFile).close(); // Start the NameNode String[] argv = new String[] {}; nameNode = NameNode.createNameNode(argv, config); }