private void deleteFamily(String family, String row, long version) throws IOException { Delete del = new Delete(Bytes.toBytes(row)); del.deleteFamily(Bytes.toBytes(family + "_ROWCOL"), version); del.deleteFamily(Bytes.toBytes(family + "_ROW"), version); del.deleteFamily(Bytes.toBytes(family + "_NONE"), version); region.delete(del, null, true); }
@Test public void testRowMutation() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"}, tableName, new Boolean[] {false, false, false, false, false}); Put put = new Put(ROW); put.add(A, A, A); put.add(B, B, B); put.add(C, C, C); Delete delete = new Delete(ROW); delete.deleteColumn(A, A); delete.deleteColumn(B, B); delete.deleteColumn(C, C); RowMutations arm = new RowMutations(ROW); arm.add(put); arm.add(delete); table.mutateRow(arm); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"}, tableName, new Boolean[] {false, false, true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
@Override public Object terminatePartial(AggregationBuffer agg) throws HiveException { DeleteBuffer myagg = (DeleteBuffer) agg; ArrayList<List<String>> ret = new ArrayList<List<String>>(); ArrayList tname = new ArrayList<String>(); tname.add(configMap.get(HTableFactory.TABLE_NAME_TAG)); tname.add(configMap.get(HTableFactory.ZOOKEEPER_QUORUM_TAG)); for (Map.Entry<String, String> entry : configMap.entrySet()) { if (!entry.getKey().equals(HTableFactory.TABLE_NAME_TAG) && !entry.getKey().equals(HTableFactory.ZOOKEEPER_QUORUM_TAG)) { tname.add(entry.getKey()); } } ret.add(tname); for (Delete theDelete : myagg.deleteList) { ArrayList<String> kvList = new ArrayList<String>(); kvList.add(new String(theDelete.getRow())); ret.add(kvList); } return ret; }
@Override protected void updateMeta(final byte[] oldRegion1, final byte[] oldRegion2, HRegion newRegion) throws IOException { byte[][] regionsToDelete = {oldRegion1, oldRegion2}; for (int r = 0; r < regionsToDelete.length; r++) { Delete delete = new Delete(regionsToDelete[r]); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); root.delete(delete, null, true); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r])); } } HRegionInfo newInfo = newRegion.getRegionInfo(); newInfo.setOffline(true); Put put = new Put(newRegion.getRegionName()); put.add( HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(newInfo)); root.put(put); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName())); } }
public void delete( final TransactionState transactionState, final Delete delete, final boolean bool_addLocation) throws IOException { SingleVersionDeleteNotSupported.validateDelete(delete); if (bool_addLocation) addLocation(transactionState, super.getRegionLocation(delete.getRow())); final String regionName = super.getRegionLocation(delete.getRow()).getRegionInfo().getRegionNameAsString(); Batch.Call<TrxRegionService, DeleteTransactionalResponse> callable = new Batch.Call<TrxRegionService, DeleteTransactionalResponse>() { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<DeleteTransactionalResponse> rpcCallback = new BlockingRpcCallback<DeleteTransactionalResponse>(); @Override public DeleteTransactionalResponse call(TrxRegionService instance) throws IOException { org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos .DeleteTransactionalRequest.Builder builder = DeleteTransactionalRequest.newBuilder(); builder.setTransactionId(transactionState.getTransactionId()); builder.setRegionName(ByteString.copyFromUtf8(regionName)); MutationProto m1 = ProtobufUtil.toMutation(MutationType.DELETE, delete); builder.setDelete(m1); instance.delete(controller, builder.build(), rpcCallback); return rpcCallback.get(); } }; byte[] row = delete.getRow(); DeleteTransactionalResponse result = null; try { int retryCount = 0; boolean retry = false; do { Iterator<Map.Entry<byte[], DeleteTransactionalResponse>> it = super.coprocessorService(TrxRegionService.class, row, row, callable) .entrySet() .iterator(); if (it.hasNext()) { result = it.next().getValue(); retry = false; } if (result == null || result.getException().contains("closing region")) { Thread.sleep(TransactionalTable.delay); retry = true; transactionState.setRetried(true); retryCount++; } } while (retryCount < TransactionalTable.retries && retry == true); } catch (Throwable t) { if (LOG.isErrorEnabled()) LOG.error("ERROR while calling delete ", t); throw new IOException("ERROR while calling coprocessor ", t); } if (result == null) throw new IOException(retryErrMsg); else if (result.hasException()) throw new IOException(result.getException()); }
/* * 删除指定的列 * * @tableName 表名 * * @rowKey rowKey * * @familyName 列族名 * * @columnName 列名 */ public static void deleteColumn( String tableName, String rowKey, String falilyName, String columnName) throws IOException { HTable table = new HTable(conf, Bytes.toBytes(tableName)); Delete deleteColumn = new Delete(Bytes.toBytes(rowKey)); deleteColumn.deleteColumns(Bytes.toBytes(falilyName), Bytes.toBytes(columnName)); table.delete(deleteColumn); System.out.println(falilyName + ":" + columnName + "is deleted!"); }
@Override public void process(long now, HRegion region, List<Mutation> mutations, WALEdit walEdit) throws IOException { // Override the time to avoid race-condition in the unit test caused by // inacurate timer on some machines now = myTimer.getAndIncrement(); // Scan both rows List<Cell> kvs1 = new ArrayList<Cell>(); List<Cell> kvs2 = new ArrayList<Cell>(); doScan(region, new Scan(row1, row1), kvs1); doScan(region, new Scan(row2, row2), kvs2); // Assert swapped if (swapped) { assertEquals(rowSize, kvs2.size()); assertEquals(row2Size, kvs1.size()); } else { assertEquals(rowSize, kvs1.size()); assertEquals(row2Size, kvs2.size()); } swapped = !swapped; // Add and delete keyvalues List<List<Cell>> kvs = new ArrayList<List<Cell>>(); kvs.add(kvs1); kvs.add(kvs2); byte[][] rows = new byte[][] {row1, row2}; for (int i = 0; i < kvs.size(); ++i) { for (Cell kv : kvs.get(i)) { // Delete from the current row and add to the other row Delete d = new Delete(rows[i]); KeyValue kvDelete = new KeyValue( rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv.getTimestamp(), KeyValue.Type.Delete); d.addDeleteMarker(kvDelete); Put p = new Put(rows[1 - i]); KeyValue kvAdd = new KeyValue( rows[1 - i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), now, CellUtil.cloneValue(kv)); p.add(kvAdd); mutations.add(d); walEdit.add(kvDelete); mutations.add(p); walEdit.add(kvAdd); } } }
/** @param d Delete to clone. */ public Delete(final Delete d) { this.row = d.getRow(); this.ts = d.getTimeStamp(); this.familyMap.putAll(d.getFamilyCellMap()); this.durability = d.durability; for (Map.Entry<String, byte[]> entry : d.getAttributesMap().entrySet()) { this.setAttribute(entry.getKey(), entry.getValue()); } }
public void addKey(byte[] key) throws HiveException { Delete theDelete = new Delete(key); // Disable WAL writes when specified in config map if (disableWAL) theDelete.setDurability(Durability.SKIP_WAL); deleteList.add(theDelete); getReporter().getCounter(BatchDeleteUDAFCounter.DELETE_ADDED).increment(1); System.out.println("Added delete:" + key.toString()); }
public void removeSingleElement(Object key, Element value) { final Delete delete = new Delete(ByteArraySerializer.fromObject(key)); delete.deleteColumns(Bytes.toBytes(VALUES), (byte[]) value.getId()); try { backingTable.delete(delete); } catch (IOException e) { LOG.severe("Cannot delete from backing table"); e.printStackTrace(); } }
/** * 删除列 * * @param tableName 表名 * @param rowKey 行键名 * @param falilyName 列族名 * @param columnName 列名 */ public void deleteColumn(String tableName, String rowKey, String falilyName, String columnName) { try { HTable table = new HTable(conf, Bytes.toBytes(tableName)); Delete deleteColumn = new Delete(Bytes.toBytes(rowKey)); deleteColumn.deleteColumns(Bytes.toBytes(falilyName), Bytes.toBytes(columnName)); table.delete(deleteColumn); } catch (Exception e) { logger.error("deleteColumn failed", e); } }
@Override protected void undoState(Iterable<byte[]> rows, int size) throws IOException { List<Delete> deletes = Lists.newArrayListWithCapacity(size); for (byte[] row : rows) { Delete delete = new Delete(keyDistributor.getDistributedKey(row)); delete.deleteColumns(QueueEntryRow.COLUMN_FAMILY, stateColumnName); deletes.add(delete); } hTable.delete(deletes); hTable.flushCommits(); }
protected void doReconstructionLog( final Path oldCoreLogFile, final long minSeqId, final long maxSeqId, final Progressable reporter) throws UnsupportedEncodingException, IOException { Path trxPath = new Path(oldCoreLogFile.getParent(), THLog.HREGION_OLD_THLOGFILE_NAME); // We can ignore doing anything with the Trx Log table, it is // not-transactional. if (super.getTableDesc().getNameAsString().equals(HBaseBackedTransactionLogger.TABLE_NAME)) { return; } THLogRecoveryManager recoveryManager = new THLogRecoveryManager(this); Map<Long, WALEdit> commitedTransactionsById = recoveryManager.getCommitsFromLog(trxPath, minSeqId, reporter); if (commitedTransactionsById != null && commitedTransactionsById.size() > 0) { LOG.debug("found " + commitedTransactionsById.size() + " COMMITED transactions to recover."); for (Entry<Long, WALEdit> entry : commitedTransactionsById.entrySet()) { LOG.debug( "Writing " + entry.getValue().size() + " updates for transaction " + entry.getKey()); WALEdit b = entry.getValue(); for (KeyValue kv : b.getKeyValues()) { // FIXME need to convert these into puts and deletes. Not sure this is // the write way. // Could probably combine multiple KV's into single put/delete. // Also timestamps? if (kv.getType() == KeyValue.Type.Put.getCode()) { Put put = new Put(); put.add(kv); super.put(put); } else if (kv.isDelete()) { Delete del = new Delete(kv.getRow()); if (kv.isDeleteFamily()) { del.deleteFamily(kv.getFamily()); } else if (kv.isDeleteType()) { del.deleteColumn(kv.getFamily(), kv.getQualifier()); } } } } LOG.debug("Flushing cache"); // We must trigger a cache flush, // otherwise we will would ignore the log on subsequent failure if (!super.flushcache()) { LOG.warn("Did not flush cache"); } } }
@Override public void removeLog(String queueId, String filename) { try { byte[] rowKey = queueIdToRowKey(queueId); Delete delete = new Delete(rowKey); delete.addColumns(CF_QUEUE, Bytes.toBytes(filename)); safeQueueUpdate(delete); } catch (IOException | ReplicationException e) { String errMsg = "Failed removing log queueId=" + queueId + " filename=" + filename; abortable.abort(errMsg, e); } }
public void deleteTableColoumnQualifier(String rowName, String qualifierName, String family) { Delete del = new Delete(Bytes.toBytes(rowName)); try { del = del.deleteColumn(Bytes.toBytes(family), Bytes.toBytes(qualifierName)); table.delete(del); System.out.println("Coloumn " + rowName + ":" + qualifierName + " deleted"); } catch (IOException e) { System.out.println("Exception deleting coloumn Qualifier"); } }
public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); HTable hTable = new HTable(conf, "HBaseSamples"); Delete delete = new Delete(toBytes("rowToDelete")); hTable.delete(delete); Delete delete1 = new Delete(toBytes("anotherRow")); delete1.deleteColumns(toBytes("metrics"), toBytes("loan")); hTable.delete(delete1); hTable.close(); }
/** * Deletes daughters references in offlined split parent. * * @param catalogTracker * @param parent Parent row we're to remove daughter reference from * @throws NotAllMetaRegionsOnlineException * @throws IOException */ public static void deleteDaughtersReferencesInParent( CatalogTracker catalogTracker, final HRegionInfo parent) throws NotAllMetaRegionsOnlineException, IOException { Delete delete = new Delete(parent.getRegionName()); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); deleteFromMetaTable(catalogTracker, delete); LOG.info( "Deleted daughters references, qualifier=" + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) + " and qualifier=" + Bytes.toStringBinary(HConstants.SPLITB_QUALIFIER) + ", from parent " + parent.getRegionNameAsString()); }
/** * Transactional version of {@link HTable#delete(Delete)} * * @param transactionState Identifier of the transaction * @see HTable#delete(Delete) * @throws IOException */ public void delete(TransactionState transactionState, Delete delete) throws IOException { final long startTimestamp = transactionState.getStartTimestamp(); boolean issueGet = false; final Put deleteP = new Put(delete.getRow(), startTimestamp); final Get deleteG = new Get(delete.getRow()); Map<byte[], List<KeyValue>> fmap = delete.getFamilyMap(); if (fmap.isEmpty()) { issueGet = true; } for (List<KeyValue> kvl : fmap.values()) { for (KeyValue kv : kvl) { switch (KeyValue.Type.codeToType(kv.getType())) { case DeleteColumn: deleteP.add(kv.getFamily(), kv.getQualifier(), startTimestamp, null); break; case DeleteFamily: deleteG.addFamily(kv.getFamily()); issueGet = true; break; case Delete: if (kv.getTimestamp() == HConstants.LATEST_TIMESTAMP) { deleteP.add(kv.getFamily(), kv.getQualifier(), startTimestamp, null); break; } else { throw new UnsupportedOperationException( "Cannot delete specific versions on Snapshot Isolation."); } } } } if (issueGet) { Result result = this.get(deleteG); for (Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> entryF : result.getMap().entrySet()) { byte[] family = entryF.getKey(); for (Entry<byte[], NavigableMap<Long, byte[]>> entryQ : entryF.getValue().entrySet()) { byte[] qualifier = entryQ.getKey(); deleteP.add(family, qualifier, null); } } } transactionState.addRow( new RowKeyFamily(delete.getRow(), getTableName(), deleteP.getFamilyMap())); put(deleteP); }
@Override public List<Mutation> toRowMutations() { // TODO: change to List<Mutation> once it implements Row List<Mutation> mutations = new ArrayList<Mutation>(3); if (deleteRow != null) { // Include only deleteRow mutation if present because it takes precedence over all others mutations.add(deleteRow); } else { // Because we cannot enforce a not null constraint on a KV column (since we don't know if // the row exists when // we upsert it), se instead add a KV that is always emtpy. This allows us to imitate SQL // semantics given the // way HBase works. setValues.add( SchemaUtil.getEmptyColumnFamily(getColumnFamilies()), QueryConstants.EMPTY_COLUMN_BYTES, ts, ByteUtil.EMPTY_BYTE_ARRAY); mutations.add(setValues); if (!unsetValues.isEmpty()) { mutations.add(unsetValues); } } return mutations; }
public static void main(String[] args) throws IOException { conf = HBaseConfiguration.create(); HTable table = new HTable(conf, "testtable"); byte[] row = Bytes.toBytes("myrow-1"); byte[] family = Bytes.toBytes("colfam1"); byte[] qual = Bytes.toBytes("q1"); Delete delete = new Delete(row); delete.deleteFamily(Bytes.toBytes("colfam1")); delete.deleteColumn(family, qual); table.delete(delete); System.out.println("deleted"); table.close(); }
/** * Helper method for a {@link KeyValueBuilder} that catches an IOException from a {@link Put} when * adding a {@link KeyValue} generated by the KeyValueBuilder. * * @throws RuntimeException if there is an IOException thrown from the underlying {@link Put} */ @SuppressWarnings("javadoc") public static void deleteQuietly(Delete delete, KeyValueBuilder builder, KeyValue kv) { try { delete.addDeleteMarker(kv); } catch (IOException e) { throw new RuntimeException( "KeyValue Builder " + builder + " created an invalid kv: " + kv + "!"); } }
@SuppressWarnings("deprecation") @Override public void rollbackRow(byte[] row, long startId, Integer lockId) throws IOException { byte[] family = DominoConst.INNER_FAMILY; Get get = new Get(row); get.setTimeStamp(startId); get.addFamily(family); Result r = region.get(get, lockId); if (r == null || r.isEmpty()) return; byte[] colBytes = r.getValue(family, DominoConst.COLUMNS_COL); if (colBytes == null || colBytes.length == 0) return; Delete del = new Delete(row); Columns cols = new Columns(colBytes); for (Column col : cols.cols) { del.deleteColumn(col.family, col.qualifier, startId); } del.deleteColumn(family, DominoConst.COLUMNS_COL, startId); del.deleteColumn(family, DominoConst.STATUS_COL, startId); mutateRow(del, lockId); }
@TimeDepend @Test public void testMaxVersion2() throws Exception { recreateTable(); fillData(); Delete delete = new Delete(rowKey_ForTest); delete.deleteColumn(ColumnFamilyNameBytes, QName1, 3L); table.delete(delete); Get get = new Get(rowKey_ForTest); get.addColumn(ColumnFamilyNameBytes, QName1); get.setMaxVersions(1); Result result = table.get(get); Assert.assertEquals(1, result.raw().length); get.setTimeStamp(3L); result = table.get(get); Assert.assertEquals(0, result.raw().length); get.setTimeStamp(2L); result = table.get(get); Assert.assertEquals(1, result.raw().length); get.setTimeStamp(1L); result = table.get(get); Assert.assertEquals(1, result.raw().length); get.setTimeStamp(0L); result = table.get(get); Assert.assertEquals(0, result.raw().length); get.setTimeRange(1, 4); result = table.get(get); Assert.assertEquals(1, result.raw().length); recreateTable(); }
private static void deleteTable(byte[] tableName) { HTablePool pool = new HTablePool(); HTableInterface table = null; try { table = pool.getTable(tableName); Scan scan = new Scan(); ResultScanner scanner = table.getScanner(scan); for (Result r : scanner) { Delete delete = new Delete(r.getRow()); NavigableMap<byte[], NavigableMap<byte[], byte[]>> map = r.getNoVersionMap(); for (Map.Entry<byte[], NavigableMap<byte[], byte[]>> family : map.entrySet()) { delete.deleteFamily(family.getKey()); } table.delete(delete); } Delete delete = new Delete(new byte[0]); table.delete(delete); } catch (Exception e) { // ignore } finally { if (table != null) pool.putTable(table); } }
/** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } if (debug) { System.out.println("Doing delete for key: " + key); } final Delete d = new Delete(Bytes.toBytes(key)); d.setDurability(durability); try { if (clientSideBuffering) { Preconditions.checkNotNull(bufferedMutator); bufferedMutator.mutate(d); } else { currentTable.delete(d); } } catch (IOException e) { if (debug) { System.err.println("Error doing delete: " + e); } return Status.ERROR; } return Status.OK; }
@Override public void setValue(PColumn column, byte[] byteValue) { deleteRow = null; byte[] family = column.getFamilyName().getBytes(); byte[] qualifier = column.getName().getBytes(); PDataType type = column.getDataType(); // Check null, since some types have no byte representation for null if (byteValue == null || byteValue.length == 0) { if (!column.isNullable()) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " may not be null"); } removeIfPresent(setValues, family, qualifier); unsetValues.deleteColumns(family, qualifier, ts); } else { Integer byteSize = column.getByteSize(); if (type.isFixedWidth()) { // TODO: handle multi-byte characters if (byteValue.length != byteSize) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " must be " + byteSize + " bytes (" + type.toObject(byteValue) + ")"); } } else if (byteSize != null && byteValue.length > byteSize) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " may not exceed " + byteSize + " bytes (" + type.toObject(byteValue) + ")"); } removeIfPresent(unsetValues, family, qualifier); setValues.add(family, qualifier, ts, byteValue); } }
/** @param d Delete to clone. */ public Delete(final Delete d) { this.row = d.getRow(); this.ts = d.getTimeStamp(); this.lockId = d.getLockId(); this.familyMap.putAll(d.getFamilyMap()); }
/** * Test transactional delete operations. * * @throws Exception */ @Test public void testValidTransactionalDelete() throws Exception { try (HTable hTable = createTable( Bytes.toBytes("TestValidTransactionalDelete"), new byte[][] {TestBytes.family, TestBytes.family2})) { TransactionAwareHTable txTable = new TransactionAwareHTable(hTable); TransactionContext txContext = new TransactionContext(new InMemoryTxSystemClient(txManager), txTable); txContext.start(); Put put = new Put(TestBytes.row); put.add(TestBytes.family, TestBytes.qualifier, TestBytes.value); put.add(TestBytes.family2, TestBytes.qualifier, TestBytes.value2); txTable.put(put); txContext.finish(); txContext.start(); Result result = txTable.get(new Get(TestBytes.row)); txContext.finish(); byte[] value = result.getValue(TestBytes.family, TestBytes.qualifier); assertArrayEquals(TestBytes.value, value); value = result.getValue(TestBytes.family2, TestBytes.qualifier); assertArrayEquals(TestBytes.value2, value); // test full row delete txContext.start(); Delete delete = new Delete(TestBytes.row); txTable.delete(delete); txContext.finish(); txContext.start(); result = txTable.get(new Get(TestBytes.row)); txContext.finish(); assertTrue(result.isEmpty()); // test column delete // load 10 rows txContext.start(); int rowCount = 10; for (int i = 0; i < rowCount; i++) { Put p = new Put(Bytes.toBytes("row" + i)); for (int j = 0; j < 10; j++) { p.add(TestBytes.family, Bytes.toBytes(j), TestBytes.value); } txTable.put(p); } txContext.finish(); // verify loaded rows txContext.start(); for (int i = 0; i < rowCount; i++) { Get g = new Get(Bytes.toBytes("row" + i)); Result r = txTable.get(g); assertFalse(r.isEmpty()); for (int j = 0; j < 10; j++) { assertArrayEquals(TestBytes.value, r.getValue(TestBytes.family, Bytes.toBytes(j))); } } txContext.finish(); // delete odds columns from odd rows and even columns from even rows txContext.start(); for (int i = 0; i < rowCount; i++) { Delete d = new Delete(Bytes.toBytes("row" + i)); for (int j = 0; j < 10; j++) { if (i % 2 == j % 2) { LOG.info("Deleting row={}, column={}", i, j); d.deleteColumns(TestBytes.family, Bytes.toBytes(j)); } } txTable.delete(d); } txContext.finish(); // verify deleted columns txContext.start(); for (int i = 0; i < rowCount; i++) { Get g = new Get(Bytes.toBytes("row" + i)); Result r = txTable.get(g); assertEquals(5, r.size()); for (Map.Entry<byte[], byte[]> entry : r.getFamilyMap(TestBytes.family).entrySet()) { int col = Bytes.toInt(entry.getKey()); LOG.info("Got row={}, col={}", i, col); // each row should only have the opposite mod (odd=even, even=odd) assertNotEquals(i % 2, col % 2); assertArrayEquals(TestBytes.value, entry.getValue()); } } txContext.finish(); // test family delete // load 10 rows txContext.start(); for (int i = 0; i < rowCount; i++) { Put p = new Put(Bytes.toBytes("famrow" + i)); p.add(TestBytes.family, TestBytes.qualifier, TestBytes.value); p.add(TestBytes.family2, TestBytes.qualifier2, TestBytes.value2); txTable.put(p); } txContext.finish(); // verify all loaded rows txContext.start(); for (int i = 0; i < rowCount; i++) { Get g = new Get(Bytes.toBytes("famrow" + i)); Result r = txTable.get(g); assertEquals(2, r.size()); assertArrayEquals(TestBytes.value, r.getValue(TestBytes.family, TestBytes.qualifier)); assertArrayEquals(TestBytes.value2, r.getValue(TestBytes.family2, TestBytes.qualifier2)); } txContext.finish(); // delete family1 for even rows, family2 for odd rows txContext.start(); for (int i = 0; i < rowCount; i++) { Delete d = new Delete(Bytes.toBytes("famrow" + i)); d.deleteFamily((i % 2 == 0) ? TestBytes.family : TestBytes.family2); txTable.delete(d); } txContext.finish(); // verify deleted families txContext.start(); for (int i = 0; i < rowCount; i++) { Get g = new Get(Bytes.toBytes("famrow" + i)); Result r = txTable.get(g); assertEquals(1, r.size()); if (i % 2 == 0) { assertNull(r.getValue(TestBytes.family, TestBytes.qualifier)); assertArrayEquals(TestBytes.value2, r.getValue(TestBytes.family2, TestBytes.qualifier2)); } else { assertArrayEquals(TestBytes.value, r.getValue(TestBytes.family, TestBytes.qualifier)); assertNull(r.getValue(TestBytes.family2, TestBytes.qualifier2)); } } txContext.finish(); } }
@Test public void testRegionObserver() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRegionObserver"); // recreate table every time in order to reset the status of the // coprocessor. HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { verifyMethodResult( SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDelete", "hadPostStartRegionOperation", "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, tableName, new Boolean[] {false, false, false, false, false, false, false, false}); Put put = new Put(ROW); put.add(A, A, A); put.add(B, B, B); put.add(C, C, C); table.put(put); verifyMethodResult( SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPostStartRegionOperation", "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, TEST_TABLE, new Boolean[] {false, false, true, true, true, true, false, true, true, true}); verifyMethodResult( SimpleRegionObserver.class, new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"}, tableName, new Integer[] {1, 1, 0, 0}); Get get = new Get(ROW); get.addColumn(A, A); get.addColumn(B, B); get.addColumn(C, C); table.get(get); verifyMethodResult( SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDelete", "hadPrePreparedDeleteTS" }, tableName, new Boolean[] {true, true, true, true, false, false}); Delete delete = new Delete(ROW); delete.deleteColumn(A, A); delete.deleteColumn(B, B); delete.deleteColumn(C, C); table.delete(delete); verifyMethodResult( SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPrePreparedDeleteTS" }, tableName, new Boolean[] {true, true, true, true, true, true, true, true}); } finally { util.deleteTable(tableName); table.close(); } verifyMethodResult( SimpleRegionObserver.class, new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"}, tableName, new Integer[] {1, 1, 1, 1}); }
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); context = JAXBContext.newInstance( CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); if (!admin.tableExists(TABLE)) { HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[1])); admin.createTable(htd); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE); // Insert first half for (byte[] ROW : ROWS_ONE) { Put p = new Put(ROW); for (byte[] QUALIFIER : QUALIFIERS_ONE) { p.add(FAMILIES[0], QUALIFIER, VALUES[0]); } table.put(p); } for (byte[] ROW : ROWS_TWO) { Put p = new Put(ROW); for (byte[] QUALIFIER : QUALIFIERS_TWO) { p.add(FAMILIES[1], QUALIFIER, VALUES[1]); } table.put(p); } // Insert second half (reverse families) for (byte[] ROW : ROWS_ONE) { Put p = new Put(ROW); for (byte[] QUALIFIER : QUALIFIERS_ONE) { p.add(FAMILIES[1], QUALIFIER, VALUES[0]); } table.put(p); } for (byte[] ROW : ROWS_TWO) { Put p = new Put(ROW); for (byte[] QUALIFIER : QUALIFIERS_TWO) { p.add(FAMILIES[0], QUALIFIER, VALUES[1]); } table.put(p); } // Delete the second qualifier from all rows and families for (byte[] ROW : ROWS_ONE) { Delete d = new Delete(ROW); d.deleteColumns(FAMILIES[0], QUALIFIERS_ONE[1]); d.deleteColumns(FAMILIES[1], QUALIFIERS_ONE[1]); table.delete(d); } for (byte[] ROW : ROWS_TWO) { Delete d = new Delete(ROW); d.deleteColumns(FAMILIES[0], QUALIFIERS_TWO[1]); d.deleteColumns(FAMILIES[1], QUALIFIERS_TWO[1]); table.delete(d); } colsPerRow -= 2; // Delete the second rows from both groups, one column at a time for (byte[] QUALIFIER : QUALIFIERS_ONE) { Delete d = new Delete(ROWS_ONE[1]); d.deleteColumns(FAMILIES[0], QUALIFIER); d.deleteColumns(FAMILIES[1], QUALIFIER); table.delete(d); } for (byte[] QUALIFIER : QUALIFIERS_TWO) { Delete d = new Delete(ROWS_TWO[1]); d.deleteColumns(FAMILIES[0], QUALIFIER); d.deleteColumns(FAMILIES[1], QUALIFIER); table.delete(d); } numRows -= 2; } }