@Test public void createTableTest() throws IOException, InterruptedException { String testName = "createTableTest"; String nsName = prefix + "_" + testName; LOG.info(testName); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(nsName + ":my_table")); HColumnDescriptor colDesc = new HColumnDescriptor("my_cf"); desc.addFamily(colDesc); try { admin.createTable(desc); fail("Expected no namespace exists exception"); } catch (NamespaceNotFoundException ex) { } // create table and in new namespace admin.createNamespace(NamespaceDescriptor.create(nsName).build()); admin.createTable(desc); TEST_UTIL.waitTableAvailable(desc.getTableName().getName(), 10000); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); assertTrue( fs.exists( new Path( master.getMasterFileSystem().getRootDir(), new Path( HConstants.BASE_NAMESPACE_DIR, new Path(nsName, desc.getTableName().getQualifierAsString()))))); assertEquals(1, admin.listTables().length); // verify non-empty namespace can't be removed try { admin.deleteNamespace(nsName); fail("Expected non-empty namespace constraint exception"); } catch (Exception ex) { LOG.info("Caught expected exception: " + ex); } // sanity check try to write and read from table Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); Put p = new Put(Bytes.toBytes("row1")); p.add(Bytes.toBytes("my_cf"), Bytes.toBytes("my_col"), Bytes.toBytes("value1")); table.put(p); // flush and read from disk to make sure directory changes are working admin.flush(desc.getTableName()); Get g = new Get(Bytes.toBytes("row1")); assertTrue(table.exists(g)); // normal case of removing namespace TEST_UTIL.deleteTable(desc.getTableName()); admin.deleteNamespace(nsName); }
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); generateRandomStartKeys(5); util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); Table table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir final Path storePath = new Path( FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), new Path( admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(), Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // put some data in it and flush to create a storefile Put p = new Put(Bytes.toBytes("test")); p.addColumn(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1")); table.put(p); admin.flush(TABLE_NAME); assertEquals(1, util.countRows(table)); quickPoll( new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); // Generate a bulk load file with more rows conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAME); runIncrementalPELoad(conf, table.getTableDescriptor(), regionLocator, testDir); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator); // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals( "LoadIncrementalHFiles should put expected data in table", expectedRows + 1, util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); // minor compactions shouldn't get rid of the file admin.compact(TABLE_NAME); try { quickPoll( new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); throw new IOException("SF# = " + fs.listStatus(storePath).length); } catch (AssertionError ae) { // this is expected behavior } // a major compaction should work though admin.majorCompact(TABLE_NAME); quickPoll( new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; } }, 5000); } finally { util.shutdownMiniCluster(); } }
@Test public void testRun() throws Exception { TableName tn = TableName.valueOf(tableName); byte[] mobValueBytes = new byte[100]; // get the path where mob files lie in Path mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, family); Put put = new Put(Bytes.toBytes(row)); put.addColumn(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes); Put put2 = new Put(Bytes.toBytes(row + "ignore")); put2.addColumn(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes); table.mutate(put); table.mutate(put2); table.flush(); admin.flush(tn); FileStatus[] fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath); // check the generation of a mob file assertEquals(1, fileStatuses.length); String mobFile1 = fileStatuses[0].getPath().getName(); Configuration configuration = new Configuration(TEST_UTIL.getConfiguration()); configuration.setFloat(MobConstants.MOB_SWEEP_TOOL_COMPACTION_RATIO, 0.6f); configuration.setStrings(TableInputFormat.INPUT_TABLE, tableName); configuration.setStrings(TableInputFormat.SCAN_COLUMN_FAMILY, family); configuration.setStrings(SweepJob.WORKING_VISITED_DIR_KEY, "jobWorkingNamesDir"); configuration.setStrings(SweepJob.WORKING_FILES_DIR_KEY, "compactionFileDir"); configuration.setStrings( CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, JavaSerialization.class.getName()); configuration.set(SweepJob.WORKING_VISITED_DIR_KEY, "compactionVisitedDir"); configuration.setLong( MobConstants.MOB_SWEEP_TOOL_COMPACTION_START_DATE, System.currentTimeMillis() + 24 * 3600 * 1000); ZooKeeperWatcher zkw = new ZooKeeperWatcher(configuration, "1", new DummyMobAbortable()); TableName lockName = MobUtils.getTableLockName(tn); String znode = ZKUtil.joinZNode(zkw.tableLockZNode, lockName.getNameAsString()); configuration.set(SweepJob.SWEEP_JOB_ID, "1"); configuration.set(SweepJob.SWEEP_JOB_TABLE_NODE, znode); ServerName serverName = SweepJob.getCurrentServerName(configuration); configuration.set(SweepJob.SWEEP_JOB_SERVERNAME, serverName.toString()); TableLockManager tableLockManager = TableLockManager.createTableLockManager(configuration, zkw, serverName); TableLock lock = tableLockManager.writeLock(lockName, "Run sweep tool"); lock.acquire(); try { // use the same counter when mocking Counter counter = new GenericCounter(); Reducer<Text, KeyValue, Writable, Writable>.Context ctx = mock(Reducer.Context.class); when(ctx.getConfiguration()).thenReturn(configuration); when(ctx.getCounter(Matchers.any(SweepCounter.class))).thenReturn(counter); when(ctx.nextKey()).thenReturn(true).thenReturn(false); when(ctx.getCurrentKey()).thenReturn(new Text(mobFile1)); byte[] refBytes = Bytes.toBytes(mobFile1); long valueLength = refBytes.length; byte[] newValue = Bytes.add(Bytes.toBytes(valueLength), refBytes); KeyValue kv2 = new KeyValue( Bytes.toBytes(row), Bytes.toBytes(family), Bytes.toBytes(qf), 1, KeyValue.Type.Put, newValue); List<KeyValue> list = new ArrayList<KeyValue>(); list.add(kv2); when(ctx.getValues()).thenReturn(list); SweepReducer reducer = new SweepReducer(); reducer.run(ctx); } finally { lock.release(); } FileStatus[] filsStatuses2 = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath); String mobFile2 = filsStatuses2[0].getPath().getName(); // new mob file is generated, old one has been archived assertEquals(1, filsStatuses2.length); assertEquals(false, mobFile2.equalsIgnoreCase(mobFile1)); // test sequence file String workingPath = configuration.get(SweepJob.WORKING_VISITED_DIR_KEY); FileStatus[] statuses = TEST_UTIL.getTestFileSystem().listStatus(new Path(workingPath)); Set<String> files = new TreeSet<String>(); for (FileStatus st : statuses) { files.addAll( getKeyFromSequenceFile(TEST_UTIL.getTestFileSystem(), st.getPath(), configuration)); } assertEquals(1, files.size()); assertEquals(true, files.contains(mobFile1)); }