@Before public void setUp() throws Exception { TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(namespace).build()); try (Table table = TEST_UTIL.createTable( tableName, new String[] {Bytes.toString(TEST_FAMILY), Bytes.toString(TEST_FAMILY_2)})) { TEST_UTIL.waitTableEnabled(tableName); List<Put> puts = new ArrayList<Put>(5); Put put_1 = new Put(TEST_ROW); put_1.addColumn(TEST_FAMILY, Q1, value1); Put put_2 = new Put(TEST_ROW_2); put_2.addColumn(TEST_FAMILY, Q2, value2); Put put_3 = new Put(TEST_ROW_3); put_3.addColumn(TEST_FAMILY_2, Q1, value1); puts.add(put_1); puts.add(put_2); puts.add(put_3); table.put(puts); } assertEquals(1, AccessControlLists.getTablePermissions(conf, tableName).size()); try { assertEquals( 1, AccessControlClient.getUserPermissions(connection, tableName.toString()).size()); } catch (Throwable e) { LOG.error("Error during call of AccessControlClient.getUserPermissions. ", e); } // setupOperations(); }
@Test(timeout = 120000) public void testChangeTable() throws Exception { HTableDescriptor hdt = HTU.createTableDescriptor("testChangeTable"); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); Table table = HTU.createTable(hdt, new byte[][] {f}, null); // basic test: it should work. Put p = new Put(row); p.addColumn(f, row, row); table.put(p); Get g = new Get(row); Result r = table.get(g); Assert.assertFalse(r.isStale()); // Add a CF, it should work. HTableDescriptor bHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName()); HColumnDescriptor hcd = new HColumnDescriptor(row); hdt.addFamily(hcd); HTU.getHBaseAdmin().disableTable(hdt.getTableName()); HTU.getHBaseAdmin().modifyTable(hdt.getTableName(), hdt); HTU.getHBaseAdmin().enableTable(hdt.getTableName()); HTableDescriptor nHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName()); Assert.assertEquals( "fams=" + Arrays.toString(nHdt.getColumnFamilies()), bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length); p = new Put(row); p.addColumn(row, row, row); table.put(p); g = new Get(row); r = table.get(g); Assert.assertFalse(r.isStale()); try { SlowMeCopro.cdl.set(new CountDownLatch(1)); g = new Get(row); g.setConsistency(Consistency.TIMELINE); r = table.get(g); Assert.assertTrue(r.isStale()); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } Admin admin = HTU.getHBaseAdmin(); nHdt = admin.getTableDescriptor(hdt.getTableName()); Assert.assertEquals( "fams=" + Arrays.toString(nHdt.getColumnFamilies()), bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length); admin.disableTable(hdt.getTableName()); admin.deleteTable(hdt.getTableName()); admin.close(); }
@Test public void testStartStopRow() throws Exception { final TableName TABLENAME1 = TableName.valueOf("testStartStopRow1"); final TableName TABLENAME2 = TableName.valueOf("testStartStopRow2"); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] COLUMN1 = Bytes.toBytes("c1"); final byte[] ROW0 = Bytes.toBytesBinary("\\x01row0"); final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1"); final byte[] ROW2 = Bytes.toBytesBinary("\\x01row2"); Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY); Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); // put rows into the first table Put p = new Put(ROW0); p.addColumn(FAMILY, COLUMN1, COLUMN1); t1.put(p); p = new Put(ROW1); p.addColumn(FAMILY, COLUMN1, COLUMN1); t1.put(p); p = new Put(ROW2); p.addColumn(FAMILY, COLUMN1, COLUMN1); t1.put(p); CopyTable copy = new CopyTable(); assertEquals( 0, ToolRunner.run( new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { "--new.name=" + TABLENAME2, "--startrow=\\x01row1", "--stoprow=\\x01row2", TABLENAME1.getNameAsString() })); // verify the data was copied into table 2 // row1 exist, row0, row2 do not exist Get g = new Get(ROW1); Result r = t2.get(g); assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); g = new Get(ROW0); r = t2.get(g); assertEquals(0, r.size()); g = new Get(ROW2); r = t2.get(g); assertEquals(0, r.size()); t1.close(); t2.close(); TEST_UTIL.deleteTable(TABLENAME1); TEST_UTIL.deleteTable(TABLENAME2); }
public void addData() throws IOException { List<Put> list = new ArrayList<Put>(); Put put1 = new Put(("row1").getBytes()); put1.addColumn(columnFamily1.getBytes(), "title".getBytes(), "title".getBytes()); put1.addColumn(columnFamily1.getBytes(), "content".getBytes(), "content".getBytes()); put1.addColumn(columnFamily2.getBytes(), "user".getBytes(), "user".getBytes()); put1.addColumn(columnFamily2.getBytes(), "time".getBytes(), "time".getBytes()); list.add(put1); Put put2 = new Put(("row2").getBytes()); put2.addColumn(columnFamily1.getBytes(), "thumbUrl".getBytes(), "title".getBytes()); put2.addColumn(columnFamily1.getBytes(), "author".getBytes(), "content".getBytes()); put2.addColumn(columnFamily2.getBytes(), "age".getBytes(), "user".getBytes()); list.add(put2); Put put3 = new Put(("row3").getBytes()); put3.addColumn(columnFamily1.getBytes(), "title".getBytes(), "title".getBytes()); put3.addColumn(columnFamily1.getBytes(), "author".getBytes(), "content".getBytes()); put3.addColumn(columnFamily2.getBytes(), "age".getBytes(), "user".getBytes()); put3.addColumn(columnFamily2.getBytes(), "time".getBytes(), "time".getBytes()); list.add(put3); table.put(list); System.out.println("data add success!"); }
@VisibleForTesting Put addLocation(final Put p, final ServerName sn, long openSeqNum) { p.addColumn( HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getHostAndPort())); p.addColumn( HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode())); p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum)); return p; }
/** * Import all of the ways from the parsed XML into the HBase table as a segment. Ways are OSM * values which consist of a list of nodes. Segments are custom values we use which represent a * single node and its neighbors. */ private static void import_ways() { System.out.println("Importing ways (segments)..."); Table segmentTable = Util.get_table("segment"); if (segmentTable == null) { System.err.println("Segment table failed to load."); return; } int counter = 0; int batch = 100; List<Put> puts = new ArrayList<>(); for (Way way : ways) { Node previousNode = null; for (Node node : way.getNodes()) { if (previousNode == null) { previousNode = node; continue; } Put p = new Put(Bytes.toBytes(previousNode.getGeohash())); p.addColumn( NODE, Bytes.toBytes(node.getGeohash()), Bytes.toBytes(String.valueOf(way.getTagsAsSerializedJSON()))); puts.add(p); p = new Put(Bytes.toBytes(node.getGeohash())); p.addColumn( NODE, Bytes.toBytes(previousNode.getGeohash()), Bytes.toBytes(String.valueOf(way.getTagsAsSerializedJSON()))); puts.add(p); } counter += 1; if (counter % batch == 0) { try { System.out.print("\rBatch " + counter + " / " + ways.size()); segmentTable.put(puts); puts.clear(); } catch (IOException e) { System.out.println("Segment put failed"); e.printStackTrace(); } } } try { segmentTable.put(puts); } catch (IOException e) { System.out.println("Segment put failed"); e.printStackTrace(); } System.out.println("Added all segments!"); }
public static void main(String[] args) throws Exception { // vv CellScannerExample Put put = new Put(Bytes.toBytes("testrow")); put.addColumn(Bytes.toBytes("fam-1"), Bytes.toBytes("qual-1"), Bytes.toBytes("val-1")); put.addColumn(Bytes.toBytes("fam-1"), Bytes.toBytes("qual-2"), Bytes.toBytes("val-2")); put.addColumn(Bytes.toBytes("fam-2"), Bytes.toBytes("qual-3"), Bytes.toBytes("val-3")); CellScanner scanner = put.cellScanner(); while (scanner.advance()) { Cell cell = scanner.current(); System.out.println("Cell: " + cell); } // ^^ CellScannerExample }
@Test public void testMultipleCellsInOneFamilyAreConverted() { byte[] row = dataHelper.randomData("rk-"); byte[] family = dataHelper.randomData("f1"); byte[] qualifier1 = dataHelper.randomData("qual1"); byte[] qualifier2 = dataHelper.randomData("qual2"); byte[] value1 = dataHelper.randomData("v1"); byte[] value2 = dataHelper.randomData("v2"); long timestamp1 = 1L; long timestamp2 = 2L; Put hbasePut = new Put(row); hbasePut.addColumn(family, qualifier1, timestamp1, value1); hbasePut.addColumn(family, qualifier2, timestamp2, value2); MutateRowRequest.Builder rowMutationBuilder = adapter.adapt(hbasePut); Assert.assertArrayEquals(row, rowMutationBuilder.getRowKey().toByteArray()); Assert.assertEquals(2, rowMutationBuilder.getMutationsCount()); Mutation mutation = rowMutationBuilder.getMutations(0); Assert.assertEquals(MutationCase.SET_CELL, mutation.getMutationCase()); SetCell setCell = mutation.getSetCell(); Assert.assertArrayEquals(family, setCell.getFamilyNameBytes().toByteArray()); Assert.assertArrayEquals(qualifier1, setCell.getColumnQualifier().toByteArray()); Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(timestamp1), setCell.getTimestampMicros()); Assert.assertArrayEquals(value1, setCell.getValue().toByteArray()); Mutation mod2 = rowMutationBuilder.getMutations(1); SetCell setCell2 = mod2.getSetCell(); Assert.assertArrayEquals(family, setCell2.getFamilyNameBytes().toByteArray()); Assert.assertArrayEquals(qualifier2, setCell2.getColumnQualifier().toByteArray()); Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(timestamp2), setCell2.getTimestampMicros()); Assert.assertArrayEquals(value2, setCell2.getValue().toByteArray()); }
@Test(timeout = 30000) public void testCreateDeleteTable() throws IOException { // Create table then get the single region for our new table. HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable"); hdt.setRegionReplication(NB_SERVERS); hdt.addCoprocessor(SlowMeCopro.class.getName()); Table table = HTU.createTable(hdt, new byte[][] {f}, null); Put p = new Put(row); p.addColumn(f, row, row); table.put(p); Get g = new Get(row); Result r = table.get(g); Assert.assertFalse(r.isStale()); try { // But if we ask for stale we will get it SlowMeCopro.cdl.set(new CountDownLatch(1)); g = new Get(row); g.setConsistency(Consistency.TIMELINE); r = table.get(g); Assert.assertTrue(r.isStale()); SlowMeCopro.cdl.get().countDown(); } finally { SlowMeCopro.cdl.get().countDown(); SlowMeCopro.sleepTime.set(0); } HTU.getHBaseAdmin().disableTable(hdt.getTableName()); HTU.deleteTable(hdt.getTableName()); }
public static Put getSinglePut( String rowId, String columnFamily, String columnQualifier, String value) { Put put = new Put(Bytes.toBytes(rowId)); put.addColumn( Bytes.toBytes(columnFamily), Bytes.toBytes(columnQualifier), Bytes.toBytes(value)); return put; }
@Override public void setLogPosition(String queueId, String filename, long position) { try { byte[] rowKey = queueIdToRowKey(queueId); // Check that the log exists. addLog() must have been called before setLogPosition(). Get checkLogExists = new Get(rowKey); checkLogExists.addColumn(CF_QUEUE, Bytes.toBytes(filename)); if (!replicationTable.exists(checkLogExists)) { String errMsg = "Could not set position of non-existent log from queueId=" + queueId + ", filename=" + filename; abortable.abort(errMsg, new ReplicationException(errMsg)); return; } // Update the log offset if it exists Put walAndOffset = new Put(rowKey); walAndOffset.addColumn(CF_QUEUE, Bytes.toBytes(filename), Bytes.toBytes(position)); safeQueueUpdate(walAndOffset); } catch (IOException | ReplicationException e) { String errMsg = "Failed writing log position queueId=" + queueId + "filename=" + filename + " position=" + position; abortable.abort(errMsg, e); } }
@Test public void testSingleCellIsConverted() { byte[] row = dataHelper.randomData("rk-"); byte[] family = dataHelper.randomData("f"); byte[] qualifier = dataHelper.randomData("qual"); byte[] value = dataHelper.randomData("v1"); long timestamp = 2L; Put hbasePut = new Put(row); hbasePut.addColumn(family, qualifier, timestamp, value); MutateRowRequest.Builder rowMutationBuilder = adapter.adapt(hbasePut); Assert.assertArrayEquals(row, rowMutationBuilder.getRowKey().toByteArray()); Assert.assertEquals(1, rowMutationBuilder.getMutationsCount()); Mutation mutation = rowMutationBuilder.getMutations(0); Assert.assertEquals(MutationCase.SET_CELL, mutation.getMutationCase()); SetCell setCell = mutation.getSetCell(); Assert.assertArrayEquals(family, setCell.getFamilyNameBytes().toByteArray()); Assert.assertArrayEquals(qualifier, setCell.getColumnQualifier().toByteArray()); Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(timestamp), setCell.getTimestampMicros()); Assert.assertArrayEquals(value, setCell.getValue().toByteArray()); }
private void loadData(Table table) throws IOException { for (int i = 0; i < ROWSIZE; i++) { Put put = new Put(ROWS[i]); put.addColumn(FAMILYNAME, QUALIFIER, Bytes.toBytes(i)); table.put(put); } }
public static void updateExistingEventsToEndAtLastLocation( Table VTEvent_Table, long imo, VesselLocation lastlocation) throws IOException { // update existing events that started BEFORE the first new location and end after the first // to end as the last location // Find existing events that started BEFORE the first new location and end after the first Scan getEventStartedBeforeAndEndAfter = new Scan(); ; getEventStartedBeforeAndEndAfter .setStartRow( Bytes.toBytes( LpadNum(imo, 7) + LpadNum(Long.MAX_VALUE - lastlocation.recordtime, 19) + "0000000000")) .setStopRow(Bytes.toBytes(LpadNum(imo, 7) + LpadNum(Long.MAX_VALUE, 19) + "9999999999")) .addColumn(details, exittime); getEventStartedBeforeAndEndAfter.setCaching(100); Filter ExistTimeValuefilter = new ValueFilter( CompareFilter.CompareOp.GREATER, new BinaryComparator( Bytes.toBytes(new DateTime(lastlocation.recordtime).toString(rawformatter)))); getEventStartedBeforeAndEndAfter.setFilter(ExistTimeValuefilter); ResultScanner Result_eventcross = VTEvent_Table.getScanner(getEventStartedBeforeAndEndAfter); List<Put> puts = new ArrayList<Put>(); for (Result res : Result_eventcross) { // vessel event table // rowkey: imo(7)+timestamp(19 desc)+polygonid(8) // qualifier:entrytime,entrycoordinates,exittime,exitcoordinates,destination byte[] rowkey = res.getRow(); Put updateevent = new Put(rowkey); updateevent.addColumn( details, exittime, Bytes.toBytes(new DateTime(lastlocation.recordtime).toString(rawformatter))); updateevent.addColumn(details, coordinates, Bytes.toBytes(lastlocation.coordinates)); updateevent.addColumn(details, destination, Bytes.toBytes(lastlocation.destination)); puts.add(updateevent); } Result_eventcross.close(); VTEvent_Table.put(puts); }
public Optional<Put> getPut() { if (valid()) { Put p = new Put(key); columns.forEach(c -> p.addColumn(c.family.getBytes(), c.qualifier.getBytes(), c.value)); return Optional.of(p); } return Optional.empty(); }
public static void main(String[] args) throws Exception { conf.set("hbase.zookeeper.quorum", "hadoop271.itversity.com"); conf.set("hbase.zookeeper.property.clientPort", "2181"); Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TableName.valueOf("demo")); Scan scan1 = new Scan(); ResultScanner scanner1 = table.getScanner(scan1); for (Result res : scanner1) { System.out.println(Bytes.toString(res.getRow())); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes()))); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes()))); } scanner1.close(); Put put = new Put("3".getBytes()); put.addColumn("cf1".getBytes(), "column1".getBytes(), "value1".getBytes()); put.addColumn("cf1".getBytes(), "column2".getBytes(), "value2".getBytes()); table.put(put); Get get = new Get("3".getBytes()); Result getResult = table.get(get); System.out.println("Printing colunns for rowkey 3"); System.out.println(Bytes.toString(getResult.getValue("cf1".getBytes(), "column1".getBytes()))); System.out.println(Bytes.toString(getResult.getValue("cf1".getBytes(), "column2".getBytes()))); scanner1 = table.getScanner(scan1); System.out.println("Before Delete"); for (Result res : scanner1) { System.out.println(Bytes.toString(res.getRow())); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes()))); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes()))); } scanner1.close(); Delete del = new Delete("3".getBytes()); table.delete(del); System.out.println("After Delete"); scanner1 = table.getScanner(scan1); for (Result res : scanner1) { System.out.println(Bytes.toString(res.getRow())); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column1".getBytes()))); System.out.println(Bytes.toString(res.getValue("cf1".getBytes(), "column2".getBytes()))); } scanner1.close(); table.close(); connection.close(); }
/** Test copy of table from sourceTable to targetTable all rows from family a */ @Test public void testRenameFamily() throws Exception { TableName sourceTable = TableName.valueOf("sourceTable"); TableName targetTable = TableName.valueOf("targetTable"); byte[][] families = {FAMILY_A, FAMILY_B}; Table t = TEST_UTIL.createTable(sourceTable, families); Table t2 = TEST_UTIL.createTable(targetTable, families); Put p = new Put(ROW1); p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data11")); p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data12")); p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data13")); t.put(p); p = new Put(ROW2); p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Dat21")); p.addColumn(FAMILY_A, QUALIFIER, Bytes.toBytes("Data22")); p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23")); t.put(p); long currentTime = System.currentTimeMillis(); String[] args = new String[] { "--new.name=" + targetTable, "--families=a:b", "--all.cells", "--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000), "--versions=1", sourceTable.getNameAsString() }; assertNull(t2.get(new Get(ROW1)).getRow()); assertTrue(runCopy(args)); assertNotNull(t2.get(new Get(ROW1)).getRow()); Result res = t2.get(new Get(ROW1)); byte[] b1 = res.getValue(FAMILY_B, QUALIFIER); assertEquals("Data13", new String(b1)); assertNotNull(t2.get(new Get(ROW2)).getRow()); res = t2.get(new Get(ROW2)); b1 = res.getValue(FAMILY_A, QUALIFIER); // Data from the family of B is not copied assertNull(b1); }
public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); HBaseHelper helper = HBaseHelper.getHelper(conf); helper.dropTable("testtable"); helper.createTable("testtable", "colfam1"); Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TableName.valueOf("testtable")); List<Put> puts = new ArrayList<Put>(); // vv PutListErrorExample2 Put put1 = new Put(Bytes.toBytes("row1")); put1.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); puts.add(put1); Put put2 = new Put(Bytes.toBytes("row2")); put2.addColumn(Bytes.toBytes("BOGUS"), Bytes.toBytes("qual1"), Bytes.toBytes("val2")); puts.add(put2); Put put3 = new Put(Bytes.toBytes("row2")); put3.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val3")); puts.add(put3); /*[*/ Put put4 = new Put(Bytes.toBytes("row2")); puts.add( put4); /*]*/ // co PutListErrorExample2-1-AddErrorPut Add put with no content at all to // list. /*[*/ try { /*]*/ table.put(puts); /*[*/ } catch (Exception e) { System.err.println("Error: " + e); // table.flushCommits(); // todo: FIX! /*]*/ // co PutListErrorExample2-2-Catch Catch local exception and commit queued updates. /*[*/ } /*]*/ // ^^ PutListErrorExample2 table.close(); connection.close(); helper.close(); }
private void putData(Region region, int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { Put put = new Put(Bytes.toBytes("" + i)); put.setDurability(Durability.SKIP_WAL); for (byte[] family : families) { put.addColumn(family, qf, null); } region.put(put); } }
public static void main(String[] args) throws Exception { // vv FingerprintExample Put put = new Put(Bytes.toBytes("testrow")); put.addColumn(Bytes.toBytes("fam-1"), Bytes.toBytes("qual-1"), Bytes.toBytes("val-1")); put.addColumn(Bytes.toBytes("fam-1"), Bytes.toBytes("qual-2"), Bytes.toBytes("val-2")); put.addColumn(Bytes.toBytes("fam-2"), Bytes.toBytes("qual-3"), Bytes.toBytes("val-3")); String id = String.format( "Hostname: %s, App: %s", InetAddress.getLocalHost().getHostName(), System.getProperty("sun.java.command")); put.setId(id); System.out.println("Put.size: " + put.size()); System.out.println("Put.id: " + put.getId()); System.out.println("Put.fingerprint: " + put.getFingerprint()); System.out.println("Put.toMap: " + put.toMap()); System.out.println("Put.toJSON: " + put.toJSON()); System.out.println("Put.toString: " + put.toString()); // ^^ FingerprintExample }
@Override public void addLog(String queueId, String filename) throws ReplicationException { try { if (!checkQueueExists(queueId)) { // Each queue will have an Owner, OwnerHistory, and a collection of [WAL:offset] key values Put putNewQueue = new Put(Bytes.toBytes(buildQueueRowKey(queueId))); putNewQueue.addColumn(CF_QUEUE, COL_QUEUE_OWNER, serverNameBytes); putNewQueue.addColumn(CF_QUEUE, COL_QUEUE_OWNER_HISTORY, EMPTY_STRING_BYTES); putNewQueue.addColumn(CF_QUEUE, Bytes.toBytes(filename), INITIAL_OFFSET_BYTES); replicationTable.put(putNewQueue); } else { // Otherwise simply add the new log and offset as a new column Put putNewLog = new Put(queueIdToRowKey(queueId)); putNewLog.addColumn(CF_QUEUE, Bytes.toBytes(filename), INITIAL_OFFSET_BYTES); safeQueueUpdate(putNewLog); } } catch (IOException | ReplicationException e) { String errMsg = "Failed adding log queueId=" + queueId + " filename=" + filename; abortable.abort(errMsg, e); } }
private void populateTable(Table tbl) throws IOException { byte[] values = {'A', 'B', 'C', 'D'}; List<Put> puts = new ArrayList<>(); for (int i = 0; i < values.length; i++) { for (int j = 0; j < values.length; j++) { Put put = new Put(new byte[] {values[i], values[j]}); put.addColumn(Bytes.toBytes("fam"), new byte[] {}, new byte[] {values[i], values[j]}); puts.add(put); } } tbl.put(puts); }
static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps) throws Exception { List<Put> puts = new ArrayList<Put>(); for (int i = 0; i < labelExps.length; i++) { Put put = new Put(Bytes.toBytes("row" + (i + 1))); put.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, ZERO); put.setCellVisibility(new CellVisibility(labelExps[i])); puts.add(put); } Table table = TEST_UTIL.createTable(tableName, TEST_FAMILY); table.put(puts); return table; }
public void prepareTestData() throws Exception { try { util.getHBaseAdmin().disableTable(TABLE); util.getHBaseAdmin().deleteTable(TABLE); } catch (Exception e) { // ignore table not found } table = util.createTable(TABLE, FAM); { Put put = new Put(ROW); put.addColumn(FAM, A, Bytes.add(B, C)); // B, C are friends of A put.addColumn(FAM, B, Bytes.add(D, E, F)); // D, E, F are friends of B put.addColumn(FAM, C, G); // G is a friend of C table.put(put); rowSize = put.size(); } Put put = new Put(ROW2); put.addColumn(FAM, D, E); put.addColumn(FAM, F, G); table.put(put); row2Size = put.size(); }
private void doCopyTableTest(boolean bulkload) throws Exception { final TableName TABLENAME1 = TableName.valueOf("testCopyTable1"); final TableName TABLENAME2 = TableName.valueOf("testCopyTable2"); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] COLUMN1 = Bytes.toBytes("c1"); try (Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY); Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); ) { // put rows into the first table for (int i = 0; i < 10; i++) { Put p = new Put(Bytes.toBytes("row" + i)); p.addColumn(FAMILY, COLUMN1, COLUMN1); t1.put(p); } CopyTable copy = new CopyTable(); int code; if (bulkload) { code = ToolRunner.run( new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { "--new.name=" + TABLENAME2.getNameAsString(), "--bulkload", TABLENAME1.getNameAsString() }); } else { code = ToolRunner.run( new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { "--new.name=" + TABLENAME2.getNameAsString(), TABLENAME1.getNameAsString() }); } assertEquals("copy job failed", 0, code); // verify the data was copied into table 2 for (int i = 0; i < 10; i++) { Get g = new Get(Bytes.toBytes("row" + i)); Result r = t2.get(g); assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); } } finally { TEST_UTIL.deleteTable(TABLENAME1); TEST_UTIL.deleteTable(TABLENAME2); } }
/** * Update a record in the database. Any field/value pairs in the specified values HashMap will be * written into the record with the specified record key, overwriting any existing values with the * same field name. * * @param table The name of the table * @param key The record key of the record to write * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, HashMap<String, ByteIterator> values) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } if (debug) { System.out.println("Setting up put for key: " + key); } Put p = new Put(Bytes.toBytes(key)); p.setDurability(durability); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { byte[] value = entry.getValue().toArray(); if (debug) { System.out.println( "Adding field/value " + entry.getKey() + "/" + Bytes.toStringBinary(value) + " to put request"); } p.addColumn(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value); } try { if (clientSideBuffering) { Preconditions.checkNotNull(bufferedMutator); bufferedMutator.mutate(p); } else { currentTable.put(p); } } catch (IOException e) { if (debug) { System.err.println("Error doing put: " + e); } return Status.ERROR; } catch (ConcurrentModificationException e) { // do nothing for now...hope this is rare return Status.ERROR; } return Status.OK; }
/** * Attempt to claim the given queue with a checkAndPut on the OWNER column. We check that the * recently killed server is still the OWNER before we claim it. * * @param queue The queue that we are trying to claim * @param originalServer The server that originally owned the queue * @return Whether we successfully claimed the queue * @throws IOException */ private boolean attemptToClaimQueue(Result queue, String originalServer) throws IOException { Put putQueueNameAndHistory = new Put(queue.getRow()); putQueueNameAndHistory.addColumn(CF_QUEUE, COL_QUEUE_OWNER, Bytes.toBytes(serverName)); String newOwnerHistory = buildClaimedQueueHistory( Bytes.toString(queue.getValue(CF_QUEUE, COL_QUEUE_OWNER_HISTORY)), originalServer); putQueueNameAndHistory.addColumn( CF_QUEUE, COL_QUEUE_OWNER_HISTORY, Bytes.toBytes(newOwnerHistory)); RowMutations claimAndRenameQueue = new RowMutations(queue.getRow()); claimAndRenameQueue.add(putQueueNameAndHistory); // Attempt to claim ownership for this queue by checking if the current OWNER is the original // server. If it is not then another RS has already claimed it. If it is we set ourselves as the // new owner and update the queue's history boolean success = replicationTable.checkAndMutate( queue.getRow(), CF_QUEUE, COL_QUEUE_OWNER, CompareFilter.CompareOp.EQUAL, Bytes.toBytes(originalServer), claimAndRenameQueue); return success; }
public static void main(String[] args) throws IOException { Configuration conf = HBaseClientHelper.loadDefaultConfiguration(); Connection connection = ConnectionFactory.createConnection(conf); try { Table table = connection.getTable(TableName.valueOf("testtable")); try { // 1 Put Put p = new Put(Bytes.toBytes("row1")); p.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); table.put(p); // 2 Get Get g = new Get(Bytes.toBytes("row1")); Result r = table.get(g); byte[] value = r.getValue(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1")); String valueStr = Bytes.toString(value); System.out.println("GET: " + valueStr); // 3 Scan Scan s = new Scan(); s.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1")); ResultScanner scanner = table.getScanner(s); try { for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { System.out.println("Found row: " + rr); } // The other approach is to use a foreach loop. Scanners are // iterable! // for (Result rr : scanner) { // System.out.println("Found row: " + rr); // } } finally { scanner.close(); } // Close your table and cluster connection. } finally { if (table != null) table.close(); } } finally { connection.close(); } }
// reduce(Object, Iterable, Context) method is called for each <key, (collection of values)> in // the sorted inputs @Override protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { // Iterable : allows an object to be the target of the "foreach" statement int sum = 0; for (IntWritable val : values) { sum += val.get(); } String[] keyStr = key.toString().split(":"); Put p = new Put(Bytes.toBytes(Integer.parseInt(keyStr[0]))); // keyStr[0] = id as row p.addColumn(Bytes.toBytes("count"), Bytes.toBytes(keyStr[1]), Bytes.toBytes(sum)); // Put addColumn(byte[] family, byte[] qualifier, byte[] value) // count as family, Y/N as qualifier, sum as value context.write(new ImmutableBytesWritable(p.getRow()), p); }
@Test public void testRetry() { byte[] row = dataHelper.randomData("rk-"); byte[] family1 = dataHelper.randomData("f1"); byte[] qualifier1 = dataHelper.randomData("qual1"); byte[] value1 = dataHelper.randomData("v1"); Put hbasePut = new Put(row, System.currentTimeMillis()); hbasePut.addColumn(family1, qualifier1, value1); MutateRowRequest.Builder rowMutationBuilder = adapter.adapt(hbasePut); MutateRowRequest request = rowMutationBuilder.build(); // Is the Put retryable? Assert.assertTrue(BigtableDataGrpcClient.IS_RETRYABLE_MUTATION.apply(request)); }