/** * Assert that getSplitEditFilesSorted returns files in expected order and that it skips * moved-aside files. * * @throws IOException */ @Test public void testGetSplitEditFilesSorted() throws IOException { FileSystem fs = FileSystem.get(util.getConfiguration()); Path regiondir = util.getDataTestDir("regiondir"); fs.delete(regiondir, true); fs.mkdirs(regiondir); Path recoverededits = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); String first = WALSplitter.formatRecoveredEditsFileName(-1); createFile(fs, recoverededits, first); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(0)); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(1)); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(11)); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(2)); createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(50)); String last = WALSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE); createFile(fs, recoverededits, last); createFile( fs, recoverededits, Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis()); final Configuration walConf = new Configuration(util.getConfiguration()); FSUtils.setRootDir(walConf, regiondir); (new WALFactory(walConf, null, "dummyLogName")).getWAL(new byte[] {}, null); NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir); assertEquals(7, files.size()); assertEquals(files.pollFirst().getName(), first); assertEquals(files.pollLast().getName(), last); assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(0)); assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(1)); assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(2)); assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(11)); }
/** * Assert that getSplitEditFilesSorted returns files in expected order and that it skips * moved-aside files. * * @throws IOException */ @Test public void testGetSplitEditFilesSorted() throws IOException { FileSystem fs = FileSystem.get(util.getConfiguration()); Path regiondir = util.getDataTestDir("regiondir"); fs.delete(regiondir, true); fs.mkdirs(regiondir); Path recoverededits = HLogUtil.getRegionDirRecoveredEditsDir(regiondir); String first = HLogSplitter.formatRecoveredEditsFileName(-1); createFile(fs, recoverededits, first); createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(0)); createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(1)); createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(11)); createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(2)); createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(50)); String last = HLogSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE); createFile(fs, recoverededits, last); createFile( fs, recoverededits, Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis()); HLogFactory.createHLog(fs, regiondir, "dummyLogName", util.getConfiguration()); NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regiondir); assertEquals(7, files.size()); assertEquals(files.pollFirst().getName(), first); assertEquals(files.pollLast().getName(), last); assertEquals(files.pollFirst().getName(), HLogSplitter.formatRecoveredEditsFileName(0)); assertEquals(files.pollFirst().getName(), HLogSplitter.formatRecoveredEditsFileName(1)); assertEquals(files.pollFirst().getName(), HLogSplitter.formatRecoveredEditsFileName(2)); assertEquals(files.pollFirst().getName(), HLogSplitter.formatRecoveredEditsFileName(11)); }
@Test public void testNoSuchTable() throws IOException { final String name = "testNoSuchTable"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(fs, rootdir); assertNull("There shouldn't be any HTD for this table", htds.get("NoSuchTable")); }
@Test public void testReadingHTDFromFS() throws IOException { final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(name); Path rootdir = UTIL.getDataTestDir(name); createHTDInFS(fs, rootdir, htd); HTableDescriptor htd2 = FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString()); assertTrue(htd.equals(htd2)); }
@Test public void testUpdates() throws IOException { final String name = "testUpdates"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); htds.add(htd); htds.add(htd); htds.add(htd); }
@Test public void testReadingArchiveDirectoryFromFS() throws IOException { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); try { new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration())) .get(HConstants.HFILE_ARCHIVE_DIRECTORY); fail("Shouldn't be able to read a table descriptor for the archive directory."); } catch (IOException e) { LOG.debug( "Correctly got error when reading a table descriptor from the archive directory: " + e.getMessage()); } }
@Test public void testHTableDescriptors() throws IOException, InterruptedException { final String name = "testHTableDescriptors"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any debris laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); createHTDInFS(fs, rootdir, htd); } FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { @Override public HTableDescriptor get(byte[] tablename) throws TableExistsException, FileNotFoundException, IOException { LOG.info(Bytes.toString(tablename) + ", cachehits=" + this.cachehits); return super.get(tablename); } }; for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } // Update the table infos for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); htd.addFamily(new HColumnDescriptor("" + i)); FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } assertEquals(count * 4, htds.invocations); assertTrue( "expected=" + (count * 2) + ", actual=" + htds.cachehits, htds.cachehits >= (count * 2)); assertTrue(htds.get(HConstants.ROOT_TABLE_NAME) != null); assertEquals(htds.invocations, count * 4 + 1); assertTrue( "expected=" + ((count * 2) + 1) + ", actual=" + htds.cachehits, htds.cachehits >= ((count * 2) + 1)); }
@Test public void testSequenceidAdvancesOnTableInfo() throws IOException { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); int i0 = FSTableDescriptors.getTableInfoSequenceid(p0); Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); int i1 = FSTableDescriptors.getTableInfoSequenceid(p1); assertTrue(i1 == i0 + 1); Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceid(p2); assertTrue(i2 == i1 + 1); }
@Test public void testCreateAndUpdate() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd = new HTableDescriptor("testCreate"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); FileStatus[] statuses = fs.listStatus(testdir); assertTrue("statuses.length=" + statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); Path tmpTableDir = new Path(FSUtils.getTablePath(testdir, htd.getName()), ".tmp"); statuses = fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); }
@BeforeClass public static void setUpBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(); REST_TEST_UTIL.startServletContainer(conf); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); context = JAXBContext.newInstance( CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); Admin admin = TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(TABLE)) { return; } HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFB)); admin.createTable(htd); expectedRows1 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_1, 1.0); expectedRows2 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_2, 0.5); }
@Test public void testHostRank() throws Exception { if (System.getProperty("prop.mapred.job.tracker") != null) { if (LOG.isInfoEnabled()) LOG.info("testHBaseInputOutput: Ignore this test if not local mode."); return; } File jarTest = new File(System.getProperty("prop.jarLocation")); if (!jarTest.exists()) { fail( "Could not find Giraph jar at " + "location specified by 'prop.jarLocation'. " + "Make sure you built the main Giraph artifact?."); } MiniHBaseCluster cluster = null; MiniZooKeeperCluster zkCluster = null; FileSystem fs = null; try { // using the restart method allows us to avoid having the hbase // root directory overwritten by /home/$username zkCluster = testUtil.startMiniZKCluster(); testUtil.restartHBaseCluster(2); cluster = testUtil.getMiniHBaseCluster(); final byte[] OL_BYTES = Bytes.toBytes("ol"); final byte[] S_BYTES = Bytes.toBytes("s"); final byte[] METADATA_BYTES = Bytes.toBytes("mtdt"); final byte[] HR_BYTES = Bytes.toBytes("_hr_"); final byte[] TAB = Bytes.toBytes(TABLE_NAME); Configuration conf = cluster.getConfiguration(); HTableDescriptor desc = new HTableDescriptor(TAB); desc.addFamily(new HColumnDescriptor(OL_BYTES)); desc.addFamily(new HColumnDescriptor(S_BYTES)); desc.addFamily(new HColumnDescriptor(METADATA_BYTES)); HBaseAdmin hbaseAdmin = new HBaseAdmin(conf); if (hbaseAdmin.isTableAvailable(TABLE_NAME)) { hbaseAdmin.disableTable(TABLE_NAME); hbaseAdmin.deleteTable(TABLE_NAME); } hbaseAdmin.createTable(desc); /** * Enter the initial data (a,b), (b,c), (a,c) a = 1.0 - google b = 1.0 - yahoo c = 1.0 - bing */ HTable table = new HTable(conf, TABLE_NAME); Put p1 = new Put(Bytes.toBytes("com.google.www")); p1.add(OL_BYTES, Bytes.toBytes("www.yahoo.com"), Bytes.toBytes("ab")); Put p2 = new Put(Bytes.toBytes("com.google.www")); p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("ac")); p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("invalid1")); p2.add(OL_BYTES, Bytes.toBytes("www.google.com"), Bytes.toBytes("invalid2")); Put p3 = new Put(Bytes.toBytes("com.yahoo.www")); p3.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("bc")); // p3.add(OL_BYTES, Bytes.toBytes(""), Bytes.toBytes("invalid4")); Put p4 = new Put(Bytes.toBytes("com.bing.www")); // TODO: Handle below case. use apache isValid method. p4.add(OL_BYTES, Bytes.toBytes("http://invalidurl"), Bytes.toBytes("invalid5")); p4.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d)); Put p5 = new Put(Bytes.toBytes("dummy")); p5.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d)); table.put(p1); table.put(p2); table.put(p3); table.put(p4); table.put(p5); // Set Giraph configuration // now operate over HBase using Vertex I/O formats conf.set(TableInputFormat.INPUT_TABLE, TABLE_NAME); conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE_NAME); // Start the giraph job GiraphJob giraphJob = new GiraphJob(conf, BspCase.getCallingMethodName()); GiraphConfiguration giraphConf = giraphJob.getConfiguration(); giraphConf.setZooKeeperConfiguration(cluster.getMaster().getZooKeeper().getQuorum()); setupConfiguration(giraphJob); giraphConf.setComputationClass(LinkRankComputation.class); giraphConf.setMasterComputeClass(LinkRankVertexMasterCompute.class); giraphConf.setOutEdgesClass(ByteArrayEdges.class); giraphConf.setVertexInputFormatClass(Nutch2HostInputFormat.class); giraphConf.setVertexOutputFormatClass(Nutch2HostOutputFormat.class); giraphConf.setInt("giraph.linkRank.superstepCount", 10); giraphConf.setInt("giraph.linkRank.scale", 10); giraphConf.set("giraph.linkRank.family", "mtdt"); giraphConf.set("giraph.linkRank.qualifier", "_hr_"); giraphConf.setVertexInputFilterClass(HostRankVertexFilter.class); assertTrue(giraphJob.run(true)); if (LOG.isInfoEnabled()) LOG.info("Giraph job successful. Checking output qualifier."); /** Check the results * */ Result result; String key; byte[] calculatedScoreByte; HashMap expectedValues = new HashMap<String, Double>(); expectedValues.put("com.google.www", 1.3515060339386287d); expectedValues.put("com.yahoo.www", 4.144902009567587d); expectedValues.put("com.bing.www", 9.063893290511482d); for (Object keyObject : expectedValues.keySet()) { key = keyObject.toString(); result = table.get(new Get(key.getBytes())); calculatedScoreByte = result.getValue(METADATA_BYTES, HR_BYTES); assertNotNull(calculatedScoreByte); assertTrue(calculatedScoreByte.length > 0); Assert.assertEquals( "Scores are not the same", (Double) expectedValues.get(key), Bytes.toDouble(calculatedScoreByte), DELTA); } } finally { if (cluster != null) { cluster.shutdown(); } if (zkCluster != null) { zkCluster.shutdown(); } // clean test files if (fs != null) { fs.delete(hbaseRootdir); } } }
@AfterClass public static void tearDownAfterClass() throws Exception { REST_TEST_UTIL.shutdownServletContainer(); TEST_UTIL.shutdownMiniCluster(); }