/** * Performs a full scan of a catalog table. * * @param catalogTracker * @param visitor Visitor invoked against each row. * @param startrow Where to start the scan. Pass null if want to begin scan at first row. * @param scanRoot True if we are to scan <code>-ROOT-</code> rather than <code>.META.</code>, the * default (pass false to scan .META.) * @throws IOException */ static void fullScan( CatalogTracker catalogTracker, final Visitor visitor, final byte[] startrow, final boolean scanRoot) throws IOException { Scan scan = new Scan(); if (startrow != null) scan.setStartRow(startrow); if (startrow == null && !scanRoot) { int caching = catalogTracker .getConnection() .getConfiguration() .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100); scan.setCaching(caching); } scan.addFamily(HConstants.CATALOG_FAMILY); HTable metaTable = scanRoot ? getRootHTable(catalogTracker) : getMetaHTable(catalogTracker); ResultScanner scanner = metaTable.getScanner(scan); try { Result data; while ((data = scanner.next()) != null) { if (data.isEmpty()) continue; // Break if visit returns false. if (!visitor.visit(data)) break; } } finally { scanner.close(); metaTable.close(); } return; }
public static String get(String keyspace, String rowKey, String column, long timestamp) throws Exception { String columnValue = null; HTable htable = new HTable(keyspace); Get get = new Get(rowKey.getBytes()); get = get.setTimeStamp(timestamp); get = get.setMaxVersions(); Result res = htable.get(get); KeyValue[] data = res.raw(); for (int i = 0; i < data.length; i++) { KeyValue d = data[i]; String family = new String(data[i].getFamily()); String qualifier = new String(data[i].getQualifier()); if (qualifier.toLowerCase().equals(column.toLowerCase())) { columnValue = new String(d.getValue()); System.out.println( data[i].toString() + " Family:" + family + " Qualifier:" + qualifier + " Value:" + columnValue); break; } } return columnValue; }
@Override public Event intercept(Event event) { // TODO Auto-generated method stub Map<String, String> headers = event.getHeaders(); String Filename = headers.get("file"); String fileType = getFileType(new String(event.getBody())); Configuration conf = HBaseConfiguration.create(); HTable table = null; try { table = new HTable(conf, "fs"); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } Put put = new Put(Bytes.toBytes(fileType + "_" + Filename)); put.add(Bytes.toBytes("fn"), Bytes.toBytes("ST"), Bytes.toBytes("PICKED")); try { table.put(put); } catch (RetriesExhaustedWithDetailsException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (InterruptedIOException e) { // TODO Auto-generated catch block e.printStackTrace(); } try { table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return event; }
@Test public void bulkLoadHFileTest() throws Exception { String testName = TestRegionObserverInterface.class.getName() + ".bulkLoadHFileTest"; TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest"); Configuration conf = util.getConfiguration(); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"}, tableName, new Boolean[] {false, false}); FileSystem fs = util.getTestFileSystem(); final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs); Path familyDir = new Path(dir, Bytes.toString(A)); createHFile(util.getConfiguration(), fs, new Path(familyDir, Bytes.toString(A)), A, A); // Bulk load new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName)); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"}, tableName, new Boolean[] {true, true}); } finally { util.deleteTable(tableName); table.close(); } }
/* * (non-Javadoc) * * @see com.activequant.archive.IArchiveWriter#commit() */ public void commit() throws IOException { synchronized (puts) { htable.put(puts); puts.clear(); htable.flushCommits(); } }
@Test public void testRowMutation() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"}, tableName, new Boolean[] {false, false, false, false, false}); Put put = new Put(ROW); put.add(A, A, A); put.add(B, B, B); put.add(C, C, C); Delete delete = new Delete(ROW); delete.deleteColumn(A, A); delete.deleteColumn(B, B); delete.deleteColumn(C, C); RowMutations arm = new RowMutations(ROW); arm.add(put); arm.add(delete); table.mutateRow(arm); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"}, tableName, new Boolean[] {false, false, true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
@Test public void testCheckAndPutHooks() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { Put p = new Put(Bytes.toBytes(0)); p.add(A, A, A); table.put(p); table.flushCommits(); p = new Put(Bytes.toBytes(0)); p.add(A, A, A); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreCheckAndPut", "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"}, tableName, new Boolean[] {false, false, false}); table.checkAndPut(Bytes.toBytes(0), A, A, A, p); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreCheckAndPut", "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"}, tableName, new Boolean[] {true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
private void putAndWait(byte[] row, byte[] fam, HTable source, HTable... targets) throws Exception { Put put = new Put(row); put.add(fam, row, row); source.put(put); Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } boolean replicatedToAll = true; for (HTable target : targets) { Result res = target.get(get); if (res.size() == 0) { LOG.info("Row not available"); replicatedToAll = false; break; } else { assertArrayEquals(res.value(), row); } } if (replicatedToAll) { break; } else { Thread.sleep(SLEEP_TIME); } } }
/* * 为表添加数据(适合知道有多少列族的固定表) * * @rowKey rowKey * * @tableName 表名 * * @column1 第一个列族列表 * * @value1 第一个列的值的列表 * * @column2 第二个列族列表 * * @value2 第二个列的值的列表 */ public static void addData( String rowKey, String tableName, String[] column1, String[] value1, String[] column2, String[] value2) throws IOException { Put put = new Put(Bytes.toBytes(rowKey)); // 设置rowkey HTable table = new HTable(conf, Bytes.toBytes(tableName)); // HTabel负责跟记录相关的操作如增删改查等// // 获取表 HColumnDescriptor[] columnFamilies = table .getTableDescriptor() // 获取所有的列族 .getColumnFamilies(); for (int i = 0; i < columnFamilies.length; i++) { String familyName = columnFamilies[i].getNameAsString(); // 获取列族名 if (familyName.equals("article")) { // article列族put数据 for (int j = 0; j < column1.length; j++) { put.add(Bytes.toBytes(familyName), Bytes.toBytes(column1[j]), Bytes.toBytes(value1[j])); } } if (familyName.equals("author")) { // author列族put数据 for (int j = 0; j < column2.length; j++) { put.add(Bytes.toBytes(familyName), Bytes.toBytes(column2[j]), Bytes.toBytes(value2[j])); } } } table.put(put); System.out.println("add data Success!"); }
private void checkRow(byte[] row, int count, HTable... tables) throws IOException { Get get = new Get(row); for (HTable table : tables) { Result res = table.get(get); assertEquals(count, res.size()); } }
private void deleteAndWait(byte[] row, HTable source, HTable... targets) throws Exception { Delete del = new Delete(row); source.delete(del); Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for del replication"); } boolean removedFromAll = true; for (HTable target : targets) { Result res = target.get(get); if (res.size() >= 1) { LOG.info("Row not deleted"); removedFromAll = false; break; } } if (removedFromAll) { break; } else { Thread.sleep(SLEEP_TIME); } } }
/** * @param args * @author Nagamallikarjuna * @throws IOException */ public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); HTable table = new HTable(conf, "stocks"); File file = new File("/home/naga/bigdata/hadoop-1.0.3/daily"); BufferedReader br = new BufferedReader(new FileReader(file)); String line = br.readLine(); Put data = null; while (line != null) { String parts[] = line.trim().split("\\t"); if (parts.length == 9) { String key = parts[1] + ":" + parts[2]; data = new Put(key.getBytes()); data.add("cf".getBytes(), "exchange".getBytes(), parts[0].getBytes()); data.add("cf".getBytes(), "open".getBytes(), parts[3].getBytes()); data.add("cf".getBytes(), "high".getBytes(), parts[4].getBytes()); data.add("cf".getBytes(), "low".getBytes(), parts[5].getBytes()); data.add("cf".getBytes(), "close".getBytes(), parts[6].getBytes()); data.add("cf".getBytes(), "volume".getBytes(), parts[7].getBytes()); data.add("cf".getBytes(), "adj_close".getBytes(), parts[8].getBytes()); table.put(data); } line = br.readLine(); } br.close(); table.close(); }
private void verify(String tableName) throws IOException { HTable table = new HTable(new Configuration(UTIL.getConfiguration()), tableName); boolean verified = false; long pause = UTIL.getConfiguration().getLong("hbase.client.pause", 5 * 1000); int numRetries = UTIL.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); for (int i = 0; i < numRetries; i++) { try { LOG.info("Verification attempt #" + i); verifyAttempt(table); verified = true; break; } catch (NullPointerException e) { // If here, a cell was empty. Presume its because updates came in // after the scanner had been opened. Wait a while and retry. LOG.debug("Verification attempt failed: " + e.getMessage()); } try { Thread.sleep(pause); } catch (InterruptedException e) { // continue } } assertTrue(verified); table.close(); }
static int insertData(Configuration conf, TableName tableName, String column, double prob) throws IOException { Random rng = new Random(); int count = 0; HTable table = new HTable(conf, tableName); byte[] k = new byte[3]; byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column)); for (byte b1 = 'a'; b1 < 'z'; b1++) { for (byte b2 = 'a'; b2 < 'z'; b2++) { for (byte b3 = 'a'; b3 < 'z'; b3++) { if (rng.nextDouble() < prob) { k[0] = b1; k[1] = b2; k[2] = b3; Put put = new Put(k); put.setDurability(Durability.SKIP_WAL); put.add(famAndQf[0], famAndQf[1], k); table.put(put); count++; } } } } table.flushCommits(); table.close(); return count; }
@Test public void insert_rowkey_prefix_date() throws IOException { System.out.println(errorTable); errorTable.setAutoFlushTo(false); List<Put> puts = new ArrayList<Put>(); long t1 = System.currentTimeMillis(); for (int i = 0; i < 10000000; i++) { String uuid = UUID.randomUUID().toString().replaceAll("-", "").substring(0, 8); Put put = new Put(Bytes.toBytes("20150705" + "_" + uuid)); put.add( fBytes, Bytes.toBytes("stacktrace"), Bytes.toBytes("java.io.IOException:file not found" + UUID.randomUUID().toString())); // puts.add(put); errorTable.put(put); if (i % 10000 == 0) { errorTable.flushCommits(); } } errorTable.flushCommits(); long t2 = System.currentTimeMillis(); System.out.println("count=" + puts.size() + ",t2-t1=" + (t2 - t1)); // errorTable.close(); }
public void fillTable( String table, int startRow, int endRow, int numCols, int pad, boolean setTimestamp, boolean random, String... colfams) throws IOException { HTable tbl = new HTable(conf, table); Random rnd = new Random(); for (int row = startRow; row <= endRow; row++) { for (int col = 0; col < numCols; col++) { Put put = new Put(Bytes.toBytes("row-" + padNum(row, pad))); for (String cf : colfams) { String colName = "col-" + (padNum(col, pad)); String val = "val-" + (random ? Integer.toString(rnd.nextInt(numCols)) : padNum(row, pad) + "." + padNum(col, pad)); if (setTimestamp) { put.add(Bytes.toBytes(cf), Bytes.toBytes(colName), col, Bytes.toBytes(val)); } else { put.add(Bytes.toBytes(cf), Bytes.toBytes(colName), Bytes.toBytes(val)); } } tbl.put(put); } } tbl.close(); }
public static void main(String[] args) throws IOException, NoSuchAlgorithmException { Configuration config = HBaseConfiguration.create(); config.set("hbase.zookeeper.quorum", "distillery"); HTable meadcam = new HTable(config, "meadcam"); if (args[0].equals("delta")) { MeadRows meadrows = new MeadRows(meadcam); System.out.println("Average Delta: " + averageDeltas(meadrows)); } if (args[0].equals("get")) { MeadRow row = new MeadRow(meadcam, args[1]); System.out.println("==== " + row.getBrewId() + " ===="); System.out.println("Date:\t " + row.getDate()); System.out.println("Delta:\t " + row.getDelta()); System.out.println("RMS:\t " + row.getRMS()); } if (args[0].equals("latest")) { System.out.println("Getting latest..."); String brewId = args[1]; // long minutes = Long.parseLong(args[2]); MeadRows meadrows = new MeadRows(meadcam, brewId); System.out.println("Average: " + Float.toString(averageDeltas(meadrows))); } meadcam.close(); }
protected ArrayList<HRegionInfo> getRegionInfoList(String tableName) throws IOException { Set<HRegionInfo> hRegionInfoSet = new TreeSet<>(); try (HTable table = new HTable(conf, tableName)) { hRegionInfoSet.addAll(table.getRegionLocations().keySet()); } return new ArrayList<>(hRegionInfoSet); }
@Test public void testIncrementHook() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { Increment inc = new Increment(Bytes.toBytes(0)); inc.addColumn(A, A, 1); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"}, tableName, new Boolean[] {false, false, false}); table.increment(inc); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"}, tableName, new Boolean[] {true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
public void flush() throws IOException { if (!buffer.isEmpty()) { htable.put(buffer); buffer.clear(); } htable.flushCommits(); }
@Test public void testAppendHook() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { Append app = new Append(Bytes.toBytes(0)); app.add(A, A, A); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"}, tableName, new Boolean[] {false, false, false}); table.append(app); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"}, tableName, new Boolean[] {true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
/** * @param t Table to use (will be closed when done). * @param p * @throws IOException */ private static void put(final HTable t, final Put p) throws IOException { try { t.put(p); } finally { t.close(); } }
protected void batchUpdate(DeleteBuffer kvBuff, boolean flushCommits) throws HiveException { try { HTable htable = HTableFactory.getHTable(configMap); // Disable auto flush when specified so in the config map if (disableAutoFlush) htable.setAutoFlushTo(false); // Overwrite the write buffer size when config map specifies to do so if (writeBufferSizeBytes > 0) htable.setWriteBufferSize(writeBufferSizeBytes); System.out.println("deleting" + kvBuff.deleteList + "size" + kvBuff.deleteList.size()); if (flushCommits) htable.flushCommits(); numDeleteRecords += kvBuff.deleteList.size(); if (kvBuff.deleteList.size() > 0) LOG.info( " Doing Batch Delete " + kvBuff.deleteList.size() + " records; Total delete records = " + numDeleteRecords + " ; Start = " + (new String(kvBuff.deleteList.get(0).getRow())) + " ; End = " + (new String(kvBuff.deleteList.get(kvBuff.deleteList.size() - 1).getRow()))); else LOG.info(" Doing Batch Delete with ZERO 0 records"); getReporter() .getCounter(BatchDeleteUDAFCounter.NUMBER_OF_SUCCESSFUL_DELETES) .increment(kvBuff.deleteList.size()); getReporter().getCounter(BatchDeleteUDAFCounter.NUMBER_OF_BATCH_OPERATIONS).increment(1); htable.delete(kvBuff.deleteList); kvBuff.deleteList.clear(); } catch (IOException e) { throw new HiveException(e); } }
public static void main(String[] args) { Integer id = Integer.parseInt(args[0]); System.out.println(id); byte[] k = new byte[5]; k[0] = (byte) 1; byte[] col = getColIdbyte(id); for (int i = 0; i < 4; i++) { k[i + 1] = col[i]; } System.out.println(Bytes.toStringBinary(k)); Get get = new Get(k); // get.addColumn(Bytes.toBytes("A:i")); HTable table; try { table = new HTable(hconf, "new"); Result result = table.get(get); KeyValue[] g = result.raw(); System.out.println("dfgsdfgsdfgsdfgsdfffffffffffffffffffffffffffffffffff"); System.out.println("dfgsdfgsdfgsdfgsdfffffffffffffffffffffffffffffffffff"); System.out.println("dfgsdfgsdfgsdfgsdfffffffffffffffffffffffffffffffffff"); System.out.println("dfgsdfgsdfgsdfgsdfffffffffffffffffffffffffffffffffff"); System.out.println("dfgsdfgsdfgsdfgsdfffffffffffffffffffffffffffffffffff"); System.out.println("dfgsdfgsdfgsdfgsdfffffffffffffffffffffffffffffffffff"); for (int i = 0; i < g.length; i++) { System.out.println(Bytes.toStringBinary(g[i].getValue()) + " kkkkkkkkkkkkkkkkkkkkkkk"); } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
public static void main(String[] args) throws Exception { if (args.length < 2) { throw new Exception("Table name not specified."); } Configuration conf = HBaseConfiguration.create(); HTable table = new HTable(conf, args[0]); String startKey = args[1]; TimeCounter executeTimer = new TimeCounter(); executeTimer.begin(); executeTimer.enter(); Expression exp = ExpressionFactory.eq( ExpressionFactory.toLong( ExpressionFactory.toString(ExpressionFactory.columnValue("family", "longStr2"))), ExpressionFactory.constant(Long.parseLong("99"))); ExpressionFilter expressionFilter = new ExpressionFilter(exp); Scan scan = new Scan(Bytes.toBytes(startKey), expressionFilter); int count = 0; ResultScanner scanner = table.getScanner(scan); Result r = scanner.next(); while (r != null) { count++; r = scanner.next(); } System.out.println("++ Scanning finished with count : " + count + " ++"); scanner.close(); executeTimer.leave(); executeTimer.end(); System.out.println("++ Time cost for scanning: " + executeTimer.getTimeString() + " ++"); }
/** * 遍历多行 * * @param tableName 表名 * @param start_rowkey 开始行键 * @param stop_rowkey 结束行键 * @return 行列表 */ public ArrayList<HbaseRow> scanRows(String tableName, String start_rowkey, String stop_rowkey) { ResultScanner rowstmp = null; ArrayList<HbaseRow> rows = null; try { Scan scan = new Scan(); scan.setStartRow(Bytes.toBytes(start_rowkey)); scan.setStopRow(Bytes.toBytes(stop_rowkey)); HTable table = new HTable(conf, Bytes.toBytes(tableName)); rowstmp = table.getScanner(scan); rows = new ArrayList<>(); for (Result rowtmp : rowstmp) { HbaseRow row = new HbaseRow(); row.rowkey = Bytes.toString(rowtmp.getRow()); for (Cell cell : rowtmp.listCells()) { HbaseColumn col = new HbaseColumn(cell); row.cols.add(col); } rows.add(row); } } catch (Exception e) { logger.error("scanRows failed", e); } finally { rowstmp.close(); } return rows; }
@Override protected void updateMeta(final byte[] oldRegion1, final byte[] oldRegion2, HRegion newRegion) throws IOException { byte[][] regionsToDelete = {oldRegion1, oldRegion2}; for (int r = 0; r < regionsToDelete.length; r++) { if (Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) { latestRegion = null; } Delete delete = new Delete(regionsToDelete[r]); table.delete(delete); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r])); } } newRegion.getRegionInfo().setOffline(true); Put put = new Put(newRegion.getRegionName()); put.add( HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(newRegion.getRegionInfo())); table.put(put); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName())); } }
/* * (non-Javadoc) * * @see com.hazelcast.core.MapStore#storeAll(java.util.Map) */ @Override public void storeAll(Map<String, String> pairs) { HTable table = null; try { List<Put> puts = new ArrayList<Put>(pairs.size()); for (Map.Entry<String, String> pair : pairs.entrySet()) { try { byte[] rowId = prefixDate ? IdUtil.bucketizeId(pair.getKey()) : Bytes.toBytes(pair.getKey()); Put p = new Put(rowId); if (outputFormatType == StoreFormatType.SMILE) { p.add(family, qualifier, jsonSmileConverter.convertToSmile(pair.getValue())); } else { p.add(family, qualifier, Bytes.toBytes(pair.getValue())); } puts.add(p); } catch (NumberFormatException nfe) { LOG.error("Encountered bad key: " + pair.getKey(), nfe); } } table = (HTable) pool.getTable(tableName); table.setAutoFlush(false); table.put(puts); table.flushCommits(); } catch (IOException e) { LOG.error("Error during puts", e); } finally { if (table != null) { pool.putTable(table); } } }
public Blog(String blogid) throws IOException { Configuration conf = HBaseConfiguration.create(); table = new HTable(conf, "blogs"); // 1. Get the row whose row key is blogid from above Get g = new Get(Bytes.toBytes(blogid)); Result r = table.get(g); // 2. Extract the rowkey, blog text (column "body") and blog title // (column "meta:title") key = r.getRow(); keyStr = Bytes.toString(key); blogText = Bytes.toString(r.getValue(Bytes.toBytes("body"), Bytes.toBytes(""))); blogTitle = Bytes.toString(r.getValue(Bytes.toBytes("meta"), Bytes.toBytes("title"))); Long reverseTimestamp = Long.parseLong(keyStr.substring(4)); Long epoch = Math.abs(reverseTimestamp - Long.MAX_VALUE); dateOfPost = new Date(epoch); // Get an iterator for the comments Scan s = new Scan(); s.addFamily(Bytes.toBytes("comment")); // Use a PrefixFilter PrefixFilter filter = new PrefixFilter(key); s.setFilter(filter); scanner = table.getScanner(s); resultIterator = scanner.iterator(); }
/** * @param t Table to use (will be closed when done). * @param g Get to run * @throws IOException */ private static Result get(final HTable t, final Get g) throws IOException { try { return t.get(g); } finally { t.close(); } }