@Test public void testStartStopRow() throws Exception { final TableName TABLENAME1 = TableName.valueOf("testStartStopRow1"); final TableName TABLENAME2 = TableName.valueOf("testStartStopRow2"); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] COLUMN1 = Bytes.toBytes("c1"); final byte[] ROW0 = Bytes.toBytesBinary("\\x01row0"); final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1"); final byte[] ROW2 = Bytes.toBytesBinary("\\x01row2"); Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY); Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); // put rows into the first table Put p = new Put(ROW0); p.addColumn(FAMILY, COLUMN1, COLUMN1); t1.put(p); p = new Put(ROW1); p.addColumn(FAMILY, COLUMN1, COLUMN1); t1.put(p); p = new Put(ROW2); p.addColumn(FAMILY, COLUMN1, COLUMN1); t1.put(p); CopyTable copy = new CopyTable(); assertEquals( 0, ToolRunner.run( new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { "--new.name=" + TABLENAME2, "--startrow=\\x01row1", "--stoprow=\\x01row2", TABLENAME1.getNameAsString() })); // verify the data was copied into table 2 // row1 exist, row0, row2 do not exist Get g = new Get(ROW1); Result r = t2.get(g); assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); g = new Get(ROW0); r = t2.get(g); assertEquals(0, r.size()); g = new Get(ROW2); r = t2.get(g); assertEquals(0, r.size()); t1.close(); t2.close(); TEST_UTIL.deleteTable(TABLENAME1); TEST_UTIL.deleteTable(TABLENAME2); }
public void putBatch(Optional<List<Request>> putRequests, boolean optimize) { if (!valid) { Logger.error("CANNOT PUT! NO VALID CONNECTION"); return; } List<Put> puts = new ArrayList<>(); if (putRequests.isPresent() && !putRequests.get().isEmpty()) { String tableName = putRequests.get().get(0).table; putRequests .get() .forEach( pr -> pr.getPut() .ifPresent( p -> { if (optimize) { p.setDurability(Durability.SKIP_WAL); } puts.add(p); })); try { final Table table = connection.getTable(TableName.valueOf(tableName)); if (optimize && table instanceof HTable) { ((HTable) table).setAutoFlush(false, true); } table.put(puts); table.close(); } catch (IOException e) { e.printStackTrace(); } } }
private void runTestOnTable(Table table) throws IOException, InterruptedException, ClassNotFoundException { Job job = null; try { LOG.info("Before map/reduce startup"); job = new Job(table.getConfiguration(), "process column contents"); job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); TableMapReduceUtil.initTableMapperJob( table.getName(), scan, MultithreadedTableMapper.class, ImmutableBytesWritable.class, Put.class, job); MultithreadedTableMapper.setMapperClass(job, ProcessContentsMapper.class); MultithreadedTableMapper.setNumberOfThreads(job, NUMBER_OF_THREADS); TableMapReduceUtil.initTableReducerJob( table.getName().getNameAsString(), IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName()); assertTrue(job.waitForCompletion(true)); LOG.info("After map/reduce completion"); // verify map-reduce results verify(table.getName()); } finally { table.close(); if (job != null) { FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } }
@Test public void testTableWithCFNameStartWithUnderScore() throws Exception { Path dir = util.getDataTestDirOnTestFS("cfNameStartWithUnderScore"); FileSystem fs = util.getTestFileSystem(); dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory()); String family = "_cf"; Path familyDir = new Path(dir, family); byte[] from = Bytes.toBytes("begin"); byte[] to = Bytes.toBytes("end"); Configuration conf = util.getConfiguration(); String tableName = "mytable_cfNameStartWithUnderScore"; Table table = util.createTable(TableName.valueOf(tableName), family); HFileTestUtil.createHFile( conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family), QUALIFIER, from, to, 1000); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); String[] args = {dir.toString(), tableName}; try { loader.run(args); assertEquals(1000, util.countRows(table)); } finally { if (null != table) { table.close(); } } }
private void verify(TableName tableName) throws IOException { Table table = UTIL.getConnection().getTable(tableName); boolean verified = false; long pause = UTIL.getConfiguration().getLong("hbase.client.pause", 5 * 1000); int numRetries = UTIL.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); for (int i = 0; i < numRetries; i++) { try { LOG.info("Verification attempt #" + i); verifyAttempt(table); verified = true; break; } catch (NullPointerException e) { // If here, a cell was empty. Presume its because updates came in // after the scanner had been opened. Wait a while and retry. LOG.debug("Verification attempt failed: " + e.getMessage()); } try { Thread.sleep(pause); } catch (InterruptedException e) { // continue } } assertTrue(verified); table.close(); }
/** @throws java.lang.Exception */ @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(); TEST_UTIL.startMiniMapReduceCluster(); Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME), Bytes.toBytes(COL_FAM)); writeRows(table); table.close(); }
private void runTest( String testName, HTableDescriptor htd, BloomType bloomType, boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception { Path dir = util.getDataTestDirOnTestFS(testName); FileSystem fs = util.getTestFileSystem(); dir = dir.makeQualified(fs); Path familyDir = new Path(dir, Bytes.toString(FAMILY)); int hfileIdx = 0; for (byte[][] range : hfileRanges) { byte[] from = range[0]; byte[] to = range[1]; HFileTestUtil.createHFile( util.getConfiguration(), fs, new Path(familyDir, "hfile_" + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000); } int expectedRows = hfileIdx * 1000; if (preCreateTable) { util.getHBaseAdmin().createTable(htd, tableSplitKeys); } final TableName tableName = htd.getTableName(); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()); String[] args = {dir.toString(), tableName.toString()}; loader.run(args); Table table = new HTable(util.getConfiguration(), tableName); try { assertEquals(expectedRows, util.countRows(table)); } finally { table.close(); } // verify staging folder has been cleaned up Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration()); if (fs.exists(stagingBasePath)) { FileStatus[] files = fs.listStatus(stagingBasePath); for (FileStatus file : files) { assertTrue( "Folder=" + file.getPath() + " is not cleaned up.", file.getPath().getName() != "DONOTERASE"); } } util.deleteTable(tableName); }
public static void main(String[] args) throws Exception { conf.set("hbase.zookeeper.quorum", "hadoop271.itversity.com"); conf.set("hbase.zookeeper.property.clientPort", "2181"); Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TableName.valueOf("demo")); Scan scan1 = new Scan(); ResultScanner scanner1 = table.getScanner(scan1); for (Result res : scanner1) { System.out.println(Bytes.toString(res.getRow())); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes()))); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes()))); } scanner1.close(); Put put = new Put("3".getBytes()); put.addColumn("cf1".getBytes(), "column1".getBytes(), "value1".getBytes()); put.addColumn("cf1".getBytes(), "column2".getBytes(), "value2".getBytes()); table.put(put); Get get = new Get("3".getBytes()); Result getResult = table.get(get); System.out.println("Printing colunns for rowkey 3"); System.out.println(Bytes.toString(getResult.getValue("cf1".getBytes(), "column1".getBytes()))); System.out.println(Bytes.toString(getResult.getValue("cf1".getBytes(), "column2".getBytes()))); scanner1 = table.getScanner(scan1); System.out.println("Before Delete"); for (Result res : scanner1) { System.out.println(Bytes.toString(res.getRow())); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes()))); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes()))); } scanner1.close(); Delete del = new Delete("3".getBytes()); table.delete(del); System.out.println("After Delete"); scanner1 = table.getScanner(scan1); for (Result res : scanner1) { System.out.println(Bytes.toString(res.getRow())); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes()))); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes()))); } scanner1.close(); table.close(); connection.close(); }
// vv HushHTablePoolProvider public void putTable(Table table, boolean quiet) throws IOException { if (table != null) { try { table.close(); } catch (Throwable t) { if (!quiet) throw t; } } }
public void put(String tablename, Put p) { try { final Table table = connection.getTable(TableName.valueOf(tablename)); table.put(p); table.close(); } catch (IOException e) { e.printStackTrace(); } }
private static void cleanup() { try { table.close(); connection.close(); } catch (IOException e) { System.out.println("Error: while clean up database connection " + e); System.exit(-1); } }
@Test public void testTTL() throws Exception { TableName tableName = TableName.valueOf("testTTL"); if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { TEST_UTIL.deleteTable(tableName); } HTableDescriptor desc = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(F).setMaxVersions(10).setTimeToLive(1); desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); Table t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName); long now = EnvironmentEdgeManager.currentTime(); ManualEnvironmentEdge me = new ManualEnvironmentEdge(); me.setValue(now); EnvironmentEdgeManagerTestHelper.injectEdge(me); // 2s in the past long ts = now - 2000; // Set the TTL override to 3s Put p = new Put(R); p.setAttribute("ttl", new byte[] {}); p.add(F, tableName.getName(), Bytes.toBytes(3000L)); t.put(p); p = new Put(R); p.add(F, Q, ts, Q); t.put(p); p = new Put(R); p.add(F, Q, ts + 1, Q); t.put(p); // these two should be expired but for the override // (their ts was 2s in the past) Get g = new Get(R); g.setMaxVersions(10); Result r = t.get(g); // still there? assertEquals(2, r.size()); TEST_UTIL.flush(tableName); TEST_UTIL.compact(tableName, true); g = new Get(R); g.setMaxVersions(10); r = t.get(g); // still there? assertEquals(2, r.size()); // roll time forward 2s. me.setValue(now + 2000); // now verify that data eventually does expire g = new Get(R); g.setMaxVersions(10); r = t.get(g); // should be gone now assertEquals(0, r.size()); t.close(); }
/** * Confirm ImportTsv via data in online table. * * @param dataAvailable */ private static void validateTable( Configuration conf, TableName tableName, String family, int valueMultiplier, boolean dataAvailable) throws IOException { LOG.debug("Validating table."); Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName); boolean verified = false; long pause = conf.getLong("hbase.client.pause", 5 * 1000); int numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); for (int i = 0; i < numRetries; i++) { try { Scan scan = new Scan(); // Scan entire family. scan.addFamily(Bytes.toBytes(family)); if (dataAvailable) { ResultScanner resScanner = table.getScanner(scan); for (Result res : resScanner) { LOG.debug("Getting results " + res.size()); assertTrue(res.size() == 2); List<Cell> kvs = res.listCells(); assertTrue(CellUtil.matchingRow(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRow(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue( CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); assertTrue( CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. verified = true; } } else { ResultScanner resScanner = table.getScanner(scan); Result[] next = resScanner.next(2); assertEquals(0, next.length); verified = true; } break; } catch (NullPointerException e) { // If here, a cell was empty. Presume its because updates came in // after the scanner had been opened. Wait a while and retry. } try { Thread.sleep(pause); } catch (InterruptedException e) { // continue } } table.close(); connection.close(); assertTrue(verified); }
@Override protected void closeHTable() { if (table != null) { try { table.close(); } catch (Exception e) { LOG.error("Error in closing the table " + table.getName(), e); } } }
public static void verifyMobRowCount( final HBaseTestingUtility util, final TableName tableName, long expectedRows) throws IOException { Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName); try { assertEquals(expectedRows, countMobRows(table)); } finally { table.close(); } }
public void doScan() throws IOException { Table tableRef = connection.getTable(TableName.valueOf(table)); Scan scan = new Scan(); ResultScanner scanner = tableRef.getScanner(scan); long now = System.currentTimeMillis(); if (verbose) System.out.println("Starting scan"); for (Result res : scanner) { if (verbose) System.out.println(res); } if (verbose) System.out.printf("Scan finished: %d ms\n\n", System.currentTimeMillis() - now); tableRef.close(); }
/** * Returns the number of rows in a given table. HBase must be up and the table should be present * (will wait for timeout for a while otherwise) * * @return # of rows in the specified table */ protected int tableRowCount(Configuration conf, TableName table) throws IOException { Table t = TEST_UTIL.getConnection().getTable(table); Scan st = new Scan(); ResultScanner rst = t.getScanner(st); int count = 0; for (@SuppressWarnings("unused") Result rt : rst) { count++; } t.close(); return count; }
@Test(timeout = 60000) public void testExceptionFromCoprocessorDuringPut() throws Exception { // set configure to indicate which cp should be loaded Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast. conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, BuggyRegionObserver.class.getName()); conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true); TEST_UTIL.startMiniCluster(2); try { // When we try to write to TEST_TABLE, the buggy coprocessor will // cause a NullPointerException, which will cause the regionserver (which // hosts the region we attempted to write to) to abort. final byte[] TEST_FAMILY = Bytes.toBytes("aaa"); Table table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, TEST_FAMILY); TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME); // Note which regionServer will abort (after put is attempted). final HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME); try { final byte[] ROW = Bytes.toBytes("aaa"); Put put = new Put(ROW); put.add(TEST_FAMILY, ROW, ROW); table.put(put); } catch (IOException e) { // The region server is going to be aborted. // We may get an exception if we retry, // which is not guaranteed. } // Wait 10 seconds for the regionserver to abort: expected result is that // it will abort. boolean aborted = false; for (int i = 0; i < 10; i++) { aborted = regionServer.isAborted(); if (aborted) { break; } try { Thread.sleep(1000); } catch (InterruptedException e) { fail("InterruptedException while waiting for regionserver " + "zk node to be deleted."); } } Assert.assertTrue("The region server should have aborted", aborted); table.close(); } finally { TEST_UTIL.shutdownMiniCluster(); } }
public static void main(String[] args) throws IOException { Configuration conf = HBaseClientHelper.loadDefaultConfiguration(); Connection connection = ConnectionFactory.createConnection(conf); try { Table table = connection.getTable(TableName.valueOf("testtable")); try { // 1 Put Put p = new Put(Bytes.toBytes("row1")); p.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); table.put(p); // 2 Get Get g = new Get(Bytes.toBytes("row1")); Result r = table.get(g); byte[] value = r.getValue(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1")); String valueStr = Bytes.toString(value); System.out.println("GET: " + valueStr); // 3 Scan Scan s = new Scan(); s.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1")); ResultScanner scanner = table.getScanner(s); try { for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { System.out.println("Found row: " + rr); } // The other approach is to use a foreach loop. Scanners are // iterable! // for (Result rr : scanner) { // System.out.println("Found row: " + rr); // } } finally { scanner.close(); } // Close your table and cluster connection. } finally { if (table != null) table.close(); } } finally { connection.close(); } }
@Test public void testBaseCases() throws Exception { TableName tableName = TableName.valueOf("baseCases"); if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { TEST_UTIL.deleteTable(tableName); } Table t = TEST_UTIL.createTable(tableName, F, 1); // set the version override to 2 Put p = new Put(R); p.setAttribute("versions", new byte[] {}); p.add(F, tableName.getName(), Bytes.toBytes(2)); t.put(p); long now = EnvironmentEdgeManager.currentTime(); // insert 2 versions p = new Put(R); p.add(F, Q, now, Q); t.put(p); p = new Put(R); p.add(F, Q, now + 1, Q); t.put(p); Get g = new Get(R); g.setMaxVersions(10); Result r = t.get(g); assertEquals(2, r.size()); TEST_UTIL.flush(tableName); TEST_UTIL.compact(tableName, true); // both version are still visible even after a flush/compaction g = new Get(R); g.setMaxVersions(10); r = t.get(g); assertEquals(2, r.size()); // insert a 3rd version p = new Put(R); p.add(F, Q, now + 2, Q); t.put(p); g = new Get(R); g.setMaxVersions(10); r = t.get(g); // still only two version visible assertEquals(2, r.size()); t.close(); }
/** * This test tests 1, merging region not online; 2, merging same two regions; 3, merging unknown * regions. They are in one test case so that we don't have to create many tables, and these tests * are simple. */ @Test public void testMerge() throws Exception { LOG.info("Starting testMerge"); final TableName tableName = TableName.valueOf("testMerge"); try { // Create table and load data. Table table = createTableAndLoadData(master, tableName); RegionStates regionStates = master.getAssignmentManager().getRegionStates(); List<HRegionInfo> regions = regionStates.getRegionsOfTable(tableName); // Fake offline one region HRegionInfo a = regions.get(0); HRegionInfo b = regions.get(1); regionStates.regionOffline(a); try { // Merge offline region. Region a is offline here admin.mergeRegions(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false); fail("Offline regions should not be able to merge"); } catch (IOException ie) { System.out.println(ie); assertTrue( "Exception should mention regions not online", StringUtils.stringifyException(ie).contains("regions not online") && ie instanceof MergeRegionException); } try { // Merge the same region: b and b. admin.mergeRegions(b.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), true); fail("A region should not be able to merge with itself, even forcifully"); } catch (IOException ie) { assertTrue( "Exception should mention regions not online", StringUtils.stringifyException(ie).contains("region to itself") && ie instanceof MergeRegionException); } try { // Merge unknown regions admin.mergeRegions(Bytes.toBytes("-f1"), Bytes.toBytes("-f2"), true); fail("Unknown region could not be merged"); } catch (IOException ie) { assertTrue("UnknownRegionException should be thrown", ie instanceof UnknownRegionException); } table.close(); } finally { TEST_UTIL.deleteTable(tableName); } }
/** * Not really restarting the master. Simulate it by clear of new region state since it is not * persisted, will be lost after master restarts. */ @Test public void testMergeAndRestartingMaster() throws Exception { LOG.info("Starting testMergeAndRestartingMaster"); final TableName tableName = TableName.valueOf("testMergeAndRestartingMaster"); // Create table and load data. Table table = createTableAndLoadData(master, tableName); try { MyMasterRpcServices.enabled.set(true); // Merge 1st and 2nd region mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1, INITIAL_REGION_NUM - 1); } finally { MyMasterRpcServices.enabled.set(false); } table.close(); }
public void put(Optional<Request> putRequest) { if (!valid) { Logger.error("CANNOT PUT! NO VALID CONNECTION"); return; } putRequest.ifPresent( pr -> pr.getPut() .ifPresent( p -> { try { final Table table = connection.getTable(TableName.valueOf(pr.table)); table.put(p); table.close(); } catch (IOException e) { e.printStackTrace(); } })); }
public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); HBaseHelper helper = HBaseHelper.getHelper(conf); helper.dropTable("testtable"); helper.createTable("testtable", "colfam1"); Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TableName.valueOf("testtable")); List<Put> puts = new ArrayList<Put>(); // vv PutListErrorExample2 Put put1 = new Put(Bytes.toBytes("row1")); put1.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); puts.add(put1); Put put2 = new Put(Bytes.toBytes("row2")); put2.addColumn(Bytes.toBytes("BOGUS"), Bytes.toBytes("qual1"), Bytes.toBytes("val2")); puts.add(put2); Put put3 = new Put(Bytes.toBytes("row2")); put3.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val3")); puts.add(put3); /*[*/ Put put4 = new Put(Bytes.toBytes("row2")); puts.add( put4); /*]*/ // co PutListErrorExample2-1-AddErrorPut Add put with no content at all to // list. /*[*/ try { /*]*/ table.put(puts); /*[*/ } catch (Exception e) { System.err.println("Error: " + e); // table.flushCommits(); // todo: FIX! /*]*/ // co PutListErrorExample2-2-Catch Catch local exception and commit queued updates. /*[*/ } /*]*/ // ^^ PutListErrorExample2 table.close(); connection.close(); helper.close(); }
@Test public void testWholesomeMerge() throws Exception { LOG.info("Starting testWholesomeMerge"); final TableName tableName = TableName.valueOf("testWholesomeMerge"); // Create table and load data. Table table = createTableAndLoadData(master, tableName); // Merge 1st and 2nd region mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1, INITIAL_REGION_NUM - 1); // Merge 2nd and 3th region PairOfSameType<HRegionInfo> mergedRegions = mergeRegionsAndVerifyRegionNum(master, tableName, 1, 2, INITIAL_REGION_NUM - 2); verifyRowCount(table, ROWSIZE); // Randomly choose one of the two merged regions HRegionInfo hri = RandomUtils.nextBoolean() ? mergedRegions.getFirst() : mergedRegions.getSecond(); MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); AssignmentManager am = cluster.getMaster().getAssignmentManager(); RegionStates regionStates = am.getRegionStates(); long start = EnvironmentEdgeManager.currentTime(); while (!regionStates.isRegionInState(hri, State.MERGED)) { assertFalse( "Timed out in waiting one merged region to be in state MERGED", EnvironmentEdgeManager.currentTime() - start > 60000); Thread.sleep(500); } // We should not be able to assign it again am.assign(hri, true); assertFalse("Merged region can't be assigned", regionStates.isRegionInTransition(hri)); assertTrue(regionStates.isRegionInState(hri, State.MERGED)); // We should not be able to unassign it either am.unassign(hri, null); assertFalse("Merged region can't be unassigned", regionStates.isRegionInTransition(hri)); assertTrue(regionStates.isRegionInState(hri, State.MERGED)); table.close(); }
@Test public void test() throws IOException, InterruptedException { testUtil .getHBaseAdmin() .createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()).build()); Table table = testUtil.createTable(tableName, families); table.put( new Put(Bytes.toBytes("k")).addColumn(family, Bytes.toBytes("q"), Bytes.toBytes("v"))); MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster(); List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads(); Region region = null; for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) { HRegionServer hrs = rsts.get(i).getRegionServer(); for (Region r : hrs.getOnlineRegions(tableName)) { region = r; break; } } assertNotNull(region); Thread.sleep(2000); RegionStoreSequenceIds ids = testUtil .getHBaseCluster() .getMaster() .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId()); // This will be the sequenceid just before that of the earliest edit in memstore. long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId(); assertTrue(storeSequenceId > 0); testUtil.getHBaseAdmin().flush(tableName); Thread.sleep(2000); ids = testUtil .getHBaseCluster() .getMaster() .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); assertTrue( ids.getLastFlushedSequenceId() + " > " + storeSequenceId, ids.getLastFlushedSequenceId() > storeSequenceId); assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId()); table.close(); }
/** * Add by linjy on 2016-01-06 * * @param totalCount 总数量 * @param city 城市名 保存分析每天,每个城市的职位数的数量的结果到HBase 中 */ private void saveDayCityPCount(List<Map<String, String>> resultLists) { // String dateTime = DateUtils.getYest0day("yyyyMMdd"); try { Connection conn = HBaseUtils.getConnection(); Table table = conn.getTable(TableName.valueOf("SalaryInfoResult".getBytes())); logger.info("保存每天,每个城市的职位数的数量的结果开始......统计结果数量:" + resultLists.size()); List<Put> puts = new ArrayList<Put>(); for (Map<String, String> map : resultLists) { String totalCount = map.get("totalcount"); String city = map.get("city"); String positionName = map.get("positionname"); String createDate = map.get("createdate"); String rowKey = createDate + getPostitionNameSX(positionName) + city; Put put = new Put(rowKey.getBytes()); put.addColumn( "ResultInfoFamily".getBytes(), "resultCount".getBytes(), totalCount.getBytes()); put.addColumn("ResultInfoFamily".getBytes(), "city".getBytes(), city.getBytes()); put.addColumn( "ResultInfoFamily".getBytes(), "positionName".getBytes(), positionName.getBytes()); put.addColumn( "ResultInfoFamily".getBytes(), "createDate".getBytes(), createDate.getBytes()); put.addColumn( "ResultInfoFamily".getBytes(), "insertDate".getBytes(), (DateUtils.getDateFormat(new Date(), "yyyyMMdd")).getBytes()); put.addColumn("ResultInfoFamily".getBytes(), "flag".getBytes(), "0".getBytes()); puts.add(put); // System.out.println(positionName + " " + rowKey); } // 插入数据 table.put(puts); // 刷新缓冲区 table.close(); logger.info("保存每天,每个城市的职位数的数量的结果结束......"); } catch (Exception e) { e.printStackTrace(); logger.error("出入结果数据到HBase总出错,出错原因:" + e.getMessage()); } }
/** * Write a random data file and a non-file in a dir with a valid family name but not part of the * table families. we should we able to bulkload without getting the unmatched family exception. * HBASE-13037/HBASE-13227 */ private void testNonHfileFolder(String tableName, boolean preCreateTable) throws Exception { Path dir = util.getDataTestDirOnTestFS(tableName); FileSystem fs = util.getTestFileSystem(); dir = dir.makeQualified(fs); Path familyDir = new Path(dir, Bytes.toString(FAMILY)); HFileTestUtil.createHFile( util.getConfiguration(), fs, new Path(familyDir, "hfile_0"), FAMILY, QUALIFIER, Bytes.toBytes("begin"), Bytes.toBytes("end"), 500); createRandomDataFile(fs, new Path(familyDir, "012356789"), 16 * 1024); final String NON_FAMILY_FOLDER = "_logs"; Path nonFamilyDir = new Path(dir, NON_FAMILY_FOLDER); fs.mkdirs(nonFamilyDir); fs.mkdirs(new Path(nonFamilyDir, "non-file")); createRandomDataFile(fs, new Path(nonFamilyDir, "012356789"), 16 * 1024); Table table = null; try { if (preCreateTable) { table = util.createTable(TableName.valueOf(tableName), FAMILY); } else { table = util.getConnection().getTable(TableName.valueOf(tableName)); } final String[] args = {dir.toString(), tableName}; new LoadIncrementalHFiles(util.getConfiguration()).run(args); assertEquals(500, util.countRows(table)); } finally { if (table != null) { table.close(); } fs.delete(dir, true); } }
protected HRegionInfo createRegion( Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException { Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HTableDescriptor htd = htbl.getTableDescriptor(); HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey); LOG.info("manually adding regioninfo and hdfs data: " + hri.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName()); fs.mkdirs(p); Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE); FSDataOutputStream out = fs.create(riPath); out.write(hri.toDelimitedByteArray()); out.close(); // add to meta. MetaTableAccessor.addRegionToMeta(meta, hri); meta.close(); return hri; }
@Test public void testAclTableEntries() throws Exception { String userTestNamespace = "userTestNsp"; Table acl = UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); try { ListMultimap<String, TablePermission> perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE); perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE); for (Map.Entry<String, TablePermission> entry : perms.entries()) { LOG.debug(entry); } assertEquals(6, perms.size()); // Grant and check state in ACL table grantOnNamespace(UTIL, userTestNamespace, TEST_NAMESPACE, Permission.Action.WRITE); Result result = acl.get(new Get(Bytes.toBytes(userTestNamespace))); assertTrue(result != null); perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE); assertEquals(7, perms.size()); List<TablePermission> namespacePerms = perms.get(userTestNamespace); assertTrue(perms.containsKey(userTestNamespace)); assertEquals(1, namespacePerms.size()); assertEquals(TEST_NAMESPACE, namespacePerms.get(0).getNamespace()); assertEquals(null, namespacePerms.get(0).getFamily()); assertEquals(null, namespacePerms.get(0).getQualifier()); assertEquals(1, namespacePerms.get(0).getActions().length); assertEquals(Permission.Action.WRITE, namespacePerms.get(0).getActions()[0]); // Revoke and check state in ACL table revokeFromNamespace(UTIL, userTestNamespace, TEST_NAMESPACE, Permission.Action.WRITE); perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE); assertEquals(6, perms.size()); } finally { acl.close(); } }