/** @throws java.lang.Exception */ @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); admin = new ReplicationAdmin(conf); }
@Before public void setup() throws Exception { TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); conf = TEST_UTIL.getConfiguration(); // Use a different ZK wrapper instance for each tests. zkw = new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null); ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode); ZKUtil.createAndFailSilent(zkw, zkw.baseZNode); assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1); LOG.debug(zkw.baseZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.splitLogZNode); assertTrue(ZKUtil.checkExists(zkw, zkw.splitLogZNode) != -1); LOG.debug(zkw.splitLogZNode + " created"); stopped = false; resetCounters(); // By default, we let the test manage the error as before, so the server // does not appear as dead from the master point of view, only from the split log pov. Mockito.when(sm.isServerOnline(Mockito.any(ServerName.class))).thenReturn(true); Mockito.when(master.getServerManager()).thenReturn(sm); to = 4000; conf.setInt("hbase.splitlog.manager.timeout", to); conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to); conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100); to = to + 4 * 100; }
@Before public void before() throws Exception { testingUtility = new HBaseTestingUtility(); testingUtility.startMiniZKCluster(); testingUtility.startMiniCluster(1); hTable = testingUtility.createTable(TABLE, CF); }
@Test public void testRPCException() throws Exception { HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); conf.set(HConstants.MASTER_PORT, "0"); HMaster hm = new HMaster(conf); HServerAddress hma = hm.getMasterAddress(); try { HMasterInterface inf = (HMasterInterface) HBaseRPC.getProxy( HMasterInterface.class, HBaseRPCProtocolVersion.versionID, hma.getInetSocketAddress(), conf, 100); inf.isMasterRunning(); fail(); } catch (RemoteException ex) { assertTrue( ex.getMessage() .startsWith( "org.apache.hadoop.hbase.ipc.ServerNotRunningException: Server is not running yet")); } catch (Throwable t) { fail("Unexpected throwable: " + t); } }
@BeforeClass public static void setUpBeforeClass() throws Exception { // Start up our mini cluster on top of an 0.92 root.dir that has data from // a 0.92 hbase run -- it has a table with 100 rows in it -- and see if // we can migrate from 0.92 TEST_UTIL.startMiniZKCluster(); TEST_UTIL.startMiniDFSCluster(1); Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB"); // Untar our test dir. File untar = untar(new File(testdir.toString())); // Now copy the untar up into hdfs so when we start hbase, we'll run from it. Configuration conf = TEST_UTIL.getConfiguration(); FsShell shell = new FsShell(conf); FileSystem fs = FileSystem.get(conf); // find where hbase will root itself, so we can copy filesystem there Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath(); if (!fs.isDirectory(hbaseRootDir.getParent())) { // mkdir at first fs.mkdirs(hbaseRootDir.getParent()); } doFsCommand(shell, new String[] {"-put", untar.toURI().toString(), hbaseRootDir.toString()}); // windows fix: tgz file has .META. directory renamed as -META- since the original is an illegal // name under windows. So we rename it back. See // src/test/data//TestMetaMigrationConvertingToPB.README and // https://issues.apache.org/jira/browse/HBASE-6821 doFsCommand( shell, new String[] { "-mv", new Path(hbaseRootDir, "-META-").toString(), new Path(hbaseRootDir, ".META.").toString() }); // See whats in minihdfs. doFsCommand(shell, new String[] {"-lsr", "/"}); TEST_UTIL.startMiniHBaseCluster(1, 1); // Assert we are running against the copied-up filesystem. The copied-up // rootdir should have had a table named 'TestTable' in it. Assert it // present. HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE); ResultScanner scanner = t.getScanner(new Scan()); int count = 0; while (scanner.next() != null) { count++; } // Assert that we find all 100 rows that are in the data we loaded. If // so then we must have migrated it from 0.90 to 0.92. Assert.assertEquals(ROW_COUNT, count); scanner.close(); t.close(); }
@Before public void setup() throws Exception { TEST_UTIL.startMiniZKCluster(); conf = TEST_UTIL.getConfiguration(); zkw = new ZooKeeperWatcher(conf, "split-log-manager-tests", null); ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode); ZKUtil.createAndFailSilent(zkw, zkw.baseZNode); assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1); LOG.debug(zkw.baseZNode + " created"); ZKUtil.createAndFailSilent(zkw, zkw.splitLogZNode); assertTrue(ZKUtil.checkExists(zkw, zkw.splitLogZNode) != -1); LOG.debug(zkw.splitLogZNode + " created"); stopped = false; resetCounters(); }
@BeforeClass public static void setUpBeforeClass() throws Exception { conf1 = HBaseConfiguration.create(); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); // smaller block size and capacity to trigger more operations // and test them conf1.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20); conf1.setInt("replication.source.size.capacity", 1024); conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setStrings( CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter"); utility1 = new HBaseTestingUtility(conf1); utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); new ZooKeeperWatcher(conf1, "cluster1", null, true); conf2 = new Configuration(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf3 = new Configuration(conf1); conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3"); utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); new ZooKeeperWatcher(conf2, "cluster3", null, true); utility3 = new HBaseTestingUtility(conf3); utility3.setZkCluster(miniZK); new ZooKeeperWatcher(conf3, "cluster3", null, true); table = new HTableDescriptor(tableName); HColumnDescriptor fam = new HColumnDescriptor(famName); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); fam = new HColumnDescriptor(noRepfamName); table.addFamily(fam); }
@SuppressWarnings("resource") private void startMiniClusters(int numClusters) throws Exception { Random random = new Random(); utilities = new HBaseTestingUtility[numClusters]; configurations = new Configuration[numClusters]; for (int i = 0; i < numClusters; i++) { Configuration conf = new Configuration(baseConfiguration); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + i + random.nextInt()); HBaseTestingUtility utility = new HBaseTestingUtility(conf); if (i == 0) { utility.startMiniZKCluster(); miniZK = utility.getZkCluster(); } else { utility.setZkCluster(miniZK); } utility.startMiniCluster(); utilities[i] = utility; configurations[i] = conf; new ZooKeeperWatcher(conf, "cluster" + i, null, true); } }
@Test public void testRPCException() throws Exception { HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); conf.set(HConstants.MASTER_PORT, "0"); HMaster hm = new HMaster(conf); ServerName sm = hm.getServerName(); InetSocketAddress isa = new InetSocketAddress(sm.getHostname(), sm.getPort()); int i = 0; // retry the RPC a few times; we have seen SocketTimeoutExceptions if we // try to connect too soon. Retry on SocketTimeoutException. while (i < 20) { try { MasterMonitorProtocol inf = (MasterMonitorProtocol) HBaseClientRPC.getProxy(MasterMonitorProtocol.class, isa, conf, 100 * 10); inf.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance()); fail(); } catch (ServiceException ex) { IOException ie = ProtobufUtil.getRemoteException(ex); if (!(ie instanceof SocketTimeoutException)) { if (ie.getMessage() .startsWith( "org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet")) { return; } } else { System.err.println("Got SocketTimeoutException. Will retry. "); } } catch (Throwable t) { fail("Unexpected throwable: " + t); } Thread.sleep(100); i++; } fail(); }
@Before public void setUp() throws Exception { File tempDir = Files.createTempDir(); tempDir.deleteOnExit(); htu = HBaseTestingUtility.createLocalHTU(); try { htu.cleanupTestDir(); htu.startMiniZKCluster(); htu.startMiniHBaseCluster(1, 1); try { htu.deleteTable(Bytes.toBytes(tableName)); } catch (Exception e) { Log.info(" - no table " + tableName + " found"); } htu.createTable(Bytes.toBytes(tableName), colFam); dao = new Hbase1OffsetStore.Builder() .setHbaseConfiguration(htu.getConfiguration()) .setOffsetTable(tableName) .build(); } catch (Exception e1) { throw new RuntimeException(e1); } KOM = new KafkaOffsetManager.Builder() .setOffsetManager(dao) .setKafkaBrokerList("localhost:" + kafkaRule.kafkaBrokerPort()) .setGroupID(testGroupID) .setTopic(testTopicName) .build(); }
/** @throws java.lang.Exception */ @BeforeClass public static void setUpBeforeClass() throws Exception { conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); // smaller log roll size to trigger more events conf1.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f); conf1.setInt("replication.source.size.capacity", 10240); conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setInt("zookeeper.recovery.retry", 1); conf1.setInt("zookeeper.recovery.retry.intervalmill", 10); conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setInt("replication.stats.thread.period.seconds", 5); conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); utility1 = new HBaseTestingUtility(conf1); utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); // Have to reget conf1 in case zk cluster location different // than default conf1 = utility1.getConfiguration(); zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true); admin = new ReplicationAdmin(conf1); LOG.info("Setup first Zk"); // Base conf2 on conf1 so it gets the right zk cluster. conf2 = HBaseConfiguration.create(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); conf2.setBoolean("dfs.support.append", true); conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true); admin.addPeer("2", utility2.getClusterKey()); LOG.info("Setup second Zk"); CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1); utility1.startMiniCluster(2); utility2.startMiniCluster(2); HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor fam = new HColumnDescriptor(famName); fam.setMaxVersions(3); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); fam = new HColumnDescriptor(noRepfamName); table.addFamily(fam); HBaseAdmin admin1 = new HBaseAdmin(conf1); HBaseAdmin admin2 = new HBaseAdmin(conf2); admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); htable1 = new HTable(conf1, tableName); htable1.setWriteBufferSize(1024); htable2 = new HTable(conf2, tableName); }
@BeforeClass public static void beforeClass() throws Exception { // Set this down so tests run quicker UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3); UTIL.startMiniZKCluster(); }
@Test public void testReducerNumEstimation() throws Exception { // Skip the test for Tez. Tez use a different mechanism. // Equivalent test is in TestTezAutoParallelism Assume.assumeTrue("Skip this test for TEZ", Util.isMapredExecType(cluster.getExecType())); // use the estimation Configuration conf = HBaseConfiguration.create(new Configuration()); HBaseTestingUtility util = new HBaseTestingUtility(conf); int clientPort = util.startMiniZKCluster().getClientPort(); util.startMiniHBaseCluster(1, 1); String query = "a = load '/passwd';" + "b = group a by $0;" + "store b into 'output';"; PigServer ps = new PigServer(cluster.getExecType(), cluster.getProperties()); PhysicalPlan pp = Util.buildPp(ps, query); MROperPlan mrPlan = Util.buildMRPlan(pp, pc); pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100"); pc.getConf().setProperty("pig.exec.reducers.max", "10"); pc.getConf().setProperty(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort)); ConfigurationValidator.validatePigProperties(pc.getProperties()); conf = ConfigurationUtil.toConfiguration(pc.getProperties()); JobControlCompiler jcc = new JobControlCompiler(pc, conf); JobControl jc = jcc.compile(mrPlan, "Test"); Job job = jc.getWaitingJobs().get(0); long reducer = Math.min( (long) Math.ceil(new File("test/org/apache/pig/test/data/passwd").length() / 100.0), 10); Util.assertParallelValues(-1, -1, reducer, reducer, job.getJobConf()); // use the PARALLEL key word, it will override the estimated reducer number query = "a = load '/passwd';" + "b = group a by $0 PARALLEL 2;" + "store b into 'output';"; pp = Util.buildPp(ps, query); mrPlan = Util.buildMRPlan(pp, pc); pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100"); pc.getConf().setProperty("pig.exec.reducers.max", "10"); ConfigurationValidator.validatePigProperties(pc.getProperties()); conf = ConfigurationUtil.toConfiguration(pc.getProperties()); jcc = new JobControlCompiler(pc, conf); jc = jcc.compile(mrPlan, "Test"); job = jc.getWaitingJobs().get(0); Util.assertParallelValues(-1, 2, -1, 2, job.getJobConf()); final byte[] COLUMNFAMILY = Bytes.toBytes("pig"); util.createTable(Bytes.toBytesBinary("test_table"), COLUMNFAMILY); // the estimation won't take effect when it apply to non-dfs or the files doesn't exist, such as // hbase query = "a = load 'hbase://test_table' using org.apache.pig.backend.hadoop.hbase.HBaseStorage('c:f1 c:f2');" + "b = group a by $0 ;" + "store b into 'output';"; pp = Util.buildPp(ps, query); mrPlan = Util.buildMRPlan(pp, pc); pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100"); pc.getConf().setProperty("pig.exec.reducers.max", "10"); ConfigurationValidator.validatePigProperties(pc.getProperties()); conf = ConfigurationUtil.toConfiguration(pc.getProperties()); jcc = new JobControlCompiler(pc, conf); jc = jcc.compile(mrPlan, "Test"); job = jc.getWaitingJobs().get(0); Util.assertParallelValues(-1, -1, 1, 1, job.getJobConf()); util.deleteTable(Bytes.toBytesBinary("test_table")); // In HBase 0.90.1 and above we can use util.shutdownMiniHBaseCluster() // here instead. MiniHBaseCluster hbc = util.getHBaseCluster(); if (hbc != null) { hbc.shutdown(); hbc.join(); } util.shutdownMiniZKCluster(); }
@Before public void before() throws Exception { UTIL.startMiniZKCluster(); }
@BeforeClass public static void setUpBeforeClass() throws Exception { conf1.setInt("hfile.format.version", 3); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); conf1.setInt("replication.source.size.capacity", 10240); conf1.setLong("replication.source.sleepforretries", 100); conf1.setInt("hbase.regionserver.maxlogs", 10); conf1.setLong("hbase.master.logcleaner.ttl", 10); conf1.setInt("zookeeper.recovery.retry", 1); conf1.setInt("zookeeper.recovery.retry.intervalmill", 10); conf1.setBoolean("dfs.support.append", true); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setInt("replication.stats.thread.period.seconds", 5); conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); conf1.setStrings( CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, TestCoprocessorForTagsAtSource.class.getName()); utility1 = new HBaseTestingUtility(conf1); utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); // Have to reget conf1 in case zk cluster location different // than default conf1 = utility1.getConfiguration(); replicationAdmin = new ReplicationAdmin(conf1); LOG.info("Setup first Zk"); // Base conf2 on conf1 so it gets the right zk cluster. conf2 = HBaseConfiguration.create(conf1); conf2.setInt("hfile.format.version", 3); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); conf2.setBoolean("dfs.support.append", true); conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName()); conf2.setStrings( CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, TestCoprocessorForTagsAtSink.class.getName()); utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); replicationAdmin.addPeer("2", utility2.getClusterKey()); LOG.info("Setup second Zk"); utility1.startMiniCluster(2); utility2.startMiniCluster(2); HTableDescriptor table = new HTableDescriptor(TABLE_NAME); HColumnDescriptor fam = new HColumnDescriptor(FAMILY); fam.setMaxVersions(3); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); try (Connection conn = ConnectionFactory.createConnection(conf1); Admin admin = conn.getAdmin()) { admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); } try (Connection conn = ConnectionFactory.createConnection(conf2); Admin admin = conn.getAdmin()) { admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); } htable1 = utility1.getConnection().getTable(TABLE_NAME); htable2 = utility2.getConnection().getTable(TABLE_NAME); }
/** @throws java.lang.Exception */ @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniZKCluster(); }
@BeforeClass public static void before() throws Exception { HTU.startMiniZKCluster(); }
@Test public void testHostRank() throws Exception { if (System.getProperty("prop.mapred.job.tracker") != null) { if (LOG.isInfoEnabled()) LOG.info("testHBaseInputOutput: Ignore this test if not local mode."); return; } File jarTest = new File(System.getProperty("prop.jarLocation")); if (!jarTest.exists()) { fail( "Could not find Giraph jar at " + "location specified by 'prop.jarLocation'. " + "Make sure you built the main Giraph artifact?."); } MiniHBaseCluster cluster = null; MiniZooKeeperCluster zkCluster = null; FileSystem fs = null; try { // using the restart method allows us to avoid having the hbase // root directory overwritten by /home/$username zkCluster = testUtil.startMiniZKCluster(); testUtil.restartHBaseCluster(2); cluster = testUtil.getMiniHBaseCluster(); final byte[] OL_BYTES = Bytes.toBytes("ol"); final byte[] S_BYTES = Bytes.toBytes("s"); final byte[] METADATA_BYTES = Bytes.toBytes("mtdt"); final byte[] HR_BYTES = Bytes.toBytes("_hr_"); final byte[] TAB = Bytes.toBytes(TABLE_NAME); Configuration conf = cluster.getConfiguration(); HTableDescriptor desc = new HTableDescriptor(TAB); desc.addFamily(new HColumnDescriptor(OL_BYTES)); desc.addFamily(new HColumnDescriptor(S_BYTES)); desc.addFamily(new HColumnDescriptor(METADATA_BYTES)); HBaseAdmin hbaseAdmin = new HBaseAdmin(conf); if (hbaseAdmin.isTableAvailable(TABLE_NAME)) { hbaseAdmin.disableTable(TABLE_NAME); hbaseAdmin.deleteTable(TABLE_NAME); } hbaseAdmin.createTable(desc); /** * Enter the initial data (a,b), (b,c), (a,c) a = 1.0 - google b = 1.0 - yahoo c = 1.0 - bing */ HTable table = new HTable(conf, TABLE_NAME); Put p1 = new Put(Bytes.toBytes("com.google.www")); p1.add(OL_BYTES, Bytes.toBytes("www.yahoo.com"), Bytes.toBytes("ab")); Put p2 = new Put(Bytes.toBytes("com.google.www")); p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("ac")); p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("invalid1")); p2.add(OL_BYTES, Bytes.toBytes("www.google.com"), Bytes.toBytes("invalid2")); Put p3 = new Put(Bytes.toBytes("com.yahoo.www")); p3.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("bc")); // p3.add(OL_BYTES, Bytes.toBytes(""), Bytes.toBytes("invalid4")); Put p4 = new Put(Bytes.toBytes("com.bing.www")); // TODO: Handle below case. use apache isValid method. p4.add(OL_BYTES, Bytes.toBytes("http://invalidurl"), Bytes.toBytes("invalid5")); p4.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d)); Put p5 = new Put(Bytes.toBytes("dummy")); p5.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d)); table.put(p1); table.put(p2); table.put(p3); table.put(p4); table.put(p5); // Set Giraph configuration // now operate over HBase using Vertex I/O formats conf.set(TableInputFormat.INPUT_TABLE, TABLE_NAME); conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE_NAME); // Start the giraph job GiraphJob giraphJob = new GiraphJob(conf, BspCase.getCallingMethodName()); GiraphConfiguration giraphConf = giraphJob.getConfiguration(); giraphConf.setZooKeeperConfiguration(cluster.getMaster().getZooKeeper().getQuorum()); setupConfiguration(giraphJob); giraphConf.setComputationClass(LinkRankComputation.class); giraphConf.setMasterComputeClass(LinkRankVertexMasterCompute.class); giraphConf.setOutEdgesClass(ByteArrayEdges.class); giraphConf.setVertexInputFormatClass(Nutch2HostInputFormat.class); giraphConf.setVertexOutputFormatClass(Nutch2HostOutputFormat.class); giraphConf.setInt("giraph.linkRank.superstepCount", 10); giraphConf.setInt("giraph.linkRank.scale", 10); giraphConf.set("giraph.linkRank.family", "mtdt"); giraphConf.set("giraph.linkRank.qualifier", "_hr_"); giraphConf.setVertexInputFilterClass(HostRankVertexFilter.class); assertTrue(giraphJob.run(true)); if (LOG.isInfoEnabled()) LOG.info("Giraph job successful. Checking output qualifier."); /** Check the results * */ Result result; String key; byte[] calculatedScoreByte; HashMap expectedValues = new HashMap<String, Double>(); expectedValues.put("com.google.www", 1.3515060339386287d); expectedValues.put("com.yahoo.www", 4.144902009567587d); expectedValues.put("com.bing.www", 9.063893290511482d); for (Object keyObject : expectedValues.keySet()) { key = keyObject.toString(); result = table.get(new Get(key.getBytes())); calculatedScoreByte = result.getValue(METADATA_BYTES, HR_BYTES); assertNotNull(calculatedScoreByte); assertTrue(calculatedScoreByte.length > 0); Assert.assertEquals( "Scores are not the same", (Double) expectedValues.get(key), Bytes.toDouble(calculatedScoreByte), DELTA); } } finally { if (cluster != null) { cluster.shutdown(); } if (zkCluster != null) { zkCluster.shutdown(); } // clean test files if (fs != null) { fs.delete(hbaseRootdir); } } }