@BeforeClass public static void setUpBeforeClass() throws Exception { ZK_DIR = new File(System.getProperty("java.io.tmpdir") + File.separator + "lily.zklocktest"); ZK_CLIENT_PORT = NetUtils.getFreePort(); ZK_CLUSTER = new MiniZooKeeperCluster(); ZK_CLUSTER.setClientPort(ZK_CLIENT_PORT); ZK_CLUSTER.startup(ZK_DIR); }
private int startMaster() { Configuration conf = getConf(); try { // If 'local', defer to LocalHBaseCluster instance. Starts master // and regionserver both in the one JVM. if (LocalHBaseCluster.isLocal(conf)) { final MiniZooKeeperCluster zooKeeperCluster = new MiniZooKeeperCluster(); File zkDataPath = new File(conf.get(HConstants.ZOOKEEPER_DATA_DIR)); int zkClientPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 0); if (zkClientPort == 0) { throw new IOException("No config value for " + HConstants.ZOOKEEPER_CLIENT_PORT); } zooKeeperCluster.setDefaultClientPort(zkClientPort); // login the zookeeper server principal (if using security) ZKUtil.loginServer( conf, "hbase.zookeeper.server.keytab.file", "hbase.zookeeper.server.kerberos.principal", null); int clientPort = zooKeeperCluster.startup(zkDataPath); if (clientPort != zkClientPort) { String errorMsg = "Could not start ZK at requested port of " + zkClientPort + ". ZK was started at port: " + clientPort + ". Aborting as clients (e.g. shell) will not be able to find " + "this ZK quorum."; System.err.println(errorMsg); throw new IOException(errorMsg); } conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort)); // Need to have the zk cluster shutdown when master is shutdown. // Run a subclass that does the zk cluster shutdown on its way out. LocalHBaseCluster cluster = new LocalHBaseCluster(conf, 1, 1, LocalHMaster.class, HRegionServer.class); ((LocalHMaster) cluster.getMaster(0)).setZKCluster(zooKeeperCluster); cluster.startup(); waitOnMasterThreads(cluster); } else { HMaster master = HMaster.constructMaster(masterClass, conf); if (master.isStopped()) { LOG.info("Won't bring the Master up as a shutdown is requested"); return -1; } master.start(); master.join(); if (master.isAborted()) throw new RuntimeException("HMaster Aborted"); } } catch (Throwable t) { LOG.error("Failed to start master", t); return -1; } return 0; }
public void preTest(HiveConf conf) throws Exception { if (zooKeeperCluster == null) { // create temp dir String tmpBaseDir = System.getProperty("test.tmp.dir"); File tmpDir = Utilities.createTempDir(tmpBaseDir); zooKeeperCluster = new MiniZooKeeperCluster(); zkPort = zooKeeperCluster.startup(tmpDir); } if (zooKeeper != null) { zooKeeper.close(); } int sessionTimeout = (int) conf.getTimeVar( HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, TimeUnit.MILLISECONDS); zooKeeper = new ZooKeeper( "localhost:" + zkPort, sessionTimeout, new Watcher() { @Override public void process(WatchedEvent arg0) {} }); String zkServer = "localhost"; conf.set("hive.zookeeper.quorum", zkServer); conf.set("hive.zookeeper.client.port", "" + zkPort); }
private void shutDownMiniClusters() throws Exception { int numClusters = utilities.length; for (int i = numClusters - 1; i >= 0; i--) { if (utilities[i] != null) { utilities[i].shutdownMiniCluster(); } } miniZK.shutdown(); }
public void tearDown() throws Exception { if (zooKeeperCluster != null) { zooKeeperCluster.shutdown(); zooKeeperCluster = null; } }
@Test public void testHostRank() throws Exception { if (System.getProperty("prop.mapred.job.tracker") != null) { if (LOG.isInfoEnabled()) LOG.info("testHBaseInputOutput: Ignore this test if not local mode."); return; } File jarTest = new File(System.getProperty("prop.jarLocation")); if (!jarTest.exists()) { fail( "Could not find Giraph jar at " + "location specified by 'prop.jarLocation'. " + "Make sure you built the main Giraph artifact?."); } MiniHBaseCluster cluster = null; MiniZooKeeperCluster zkCluster = null; FileSystem fs = null; try { // using the restart method allows us to avoid having the hbase // root directory overwritten by /home/$username zkCluster = testUtil.startMiniZKCluster(); testUtil.restartHBaseCluster(2); cluster = testUtil.getMiniHBaseCluster(); final byte[] OL_BYTES = Bytes.toBytes("ol"); final byte[] S_BYTES = Bytes.toBytes("s"); final byte[] METADATA_BYTES = Bytes.toBytes("mtdt"); final byte[] HR_BYTES = Bytes.toBytes("_hr_"); final byte[] TAB = Bytes.toBytes(TABLE_NAME); Configuration conf = cluster.getConfiguration(); HTableDescriptor desc = new HTableDescriptor(TAB); desc.addFamily(new HColumnDescriptor(OL_BYTES)); desc.addFamily(new HColumnDescriptor(S_BYTES)); desc.addFamily(new HColumnDescriptor(METADATA_BYTES)); HBaseAdmin hbaseAdmin = new HBaseAdmin(conf); if (hbaseAdmin.isTableAvailable(TABLE_NAME)) { hbaseAdmin.disableTable(TABLE_NAME); hbaseAdmin.deleteTable(TABLE_NAME); } hbaseAdmin.createTable(desc); /** * Enter the initial data (a,b), (b,c), (a,c) a = 1.0 - google b = 1.0 - yahoo c = 1.0 - bing */ HTable table = new HTable(conf, TABLE_NAME); Put p1 = new Put(Bytes.toBytes("com.google.www")); p1.add(OL_BYTES, Bytes.toBytes("www.yahoo.com"), Bytes.toBytes("ab")); Put p2 = new Put(Bytes.toBytes("com.google.www")); p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("ac")); p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("invalid1")); p2.add(OL_BYTES, Bytes.toBytes("www.google.com"), Bytes.toBytes("invalid2")); Put p3 = new Put(Bytes.toBytes("com.yahoo.www")); p3.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("bc")); // p3.add(OL_BYTES, Bytes.toBytes(""), Bytes.toBytes("invalid4")); Put p4 = new Put(Bytes.toBytes("com.bing.www")); // TODO: Handle below case. use apache isValid method. p4.add(OL_BYTES, Bytes.toBytes("http://invalidurl"), Bytes.toBytes("invalid5")); p4.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d)); Put p5 = new Put(Bytes.toBytes("dummy")); p5.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d)); table.put(p1); table.put(p2); table.put(p3); table.put(p4); table.put(p5); // Set Giraph configuration // now operate over HBase using Vertex I/O formats conf.set(TableInputFormat.INPUT_TABLE, TABLE_NAME); conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE_NAME); // Start the giraph job GiraphJob giraphJob = new GiraphJob(conf, BspCase.getCallingMethodName()); GiraphConfiguration giraphConf = giraphJob.getConfiguration(); giraphConf.setZooKeeperConfiguration(cluster.getMaster().getZooKeeper().getQuorum()); setupConfiguration(giraphJob); giraphConf.setComputationClass(LinkRankComputation.class); giraphConf.setMasterComputeClass(LinkRankVertexMasterCompute.class); giraphConf.setOutEdgesClass(ByteArrayEdges.class); giraphConf.setVertexInputFormatClass(Nutch2HostInputFormat.class); giraphConf.setVertexOutputFormatClass(Nutch2HostOutputFormat.class); giraphConf.setInt("giraph.linkRank.superstepCount", 10); giraphConf.setInt("giraph.linkRank.scale", 10); giraphConf.set("giraph.linkRank.family", "mtdt"); giraphConf.set("giraph.linkRank.qualifier", "_hr_"); giraphConf.setVertexInputFilterClass(HostRankVertexFilter.class); assertTrue(giraphJob.run(true)); if (LOG.isInfoEnabled()) LOG.info("Giraph job successful. Checking output qualifier."); /** Check the results * */ Result result; String key; byte[] calculatedScoreByte; HashMap expectedValues = new HashMap<String, Double>(); expectedValues.put("com.google.www", 1.3515060339386287d); expectedValues.put("com.yahoo.www", 4.144902009567587d); expectedValues.put("com.bing.www", 9.063893290511482d); for (Object keyObject : expectedValues.keySet()) { key = keyObject.toString(); result = table.get(new Get(key.getBytes())); calculatedScoreByte = result.getValue(METADATA_BYTES, HR_BYTES); assertNotNull(calculatedScoreByte); assertTrue(calculatedScoreByte.length > 0); Assert.assertEquals( "Scores are not the same", (Double) expectedValues.get(key), Bytes.toDouble(calculatedScoreByte), DELTA); } } finally { if (cluster != null) { cluster.shutdown(); } if (zkCluster != null) { zkCluster.shutdown(); } // clean test files if (fs != null) { fs.delete(hbaseRootdir); } } }
@AfterClass public static void tearDownAfterClass() throws Exception { if (ZK_CLUSTER != null) { ZK_CLUSTER.shutdown(); } }