@Override protected void updateMeta(final byte[] oldRegion1, final byte[] oldRegion2, HRegion newRegion) throws IOException { byte[][] regionsToDelete = {oldRegion1, oldRegion2}; for (int r = 0; r < regionsToDelete.length; r++) { Delete delete = new Delete(regionsToDelete[r]); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); root.delete(delete, null, true); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r])); } } HRegionInfo newInfo = newRegion.getRegionInfo(); newInfo.setOffline(true); Put put = new Put(newRegion.getRegionName()); put.add( HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(newInfo)); root.put(put); if (LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName())); } }
/* * 删除指定的列 * * @tableName 表名 * * @rowKey rowKey * * @familyName 列族名 * * @columnName 列名 */ public static void deleteColumn( String tableName, String rowKey, String falilyName, String columnName) throws IOException { HTable table = new HTable(conf, Bytes.toBytes(tableName)); Delete deleteColumn = new Delete(Bytes.toBytes(rowKey)); deleteColumn.deleteColumns(Bytes.toBytes(falilyName), Bytes.toBytes(columnName)); table.delete(deleteColumn); System.out.println(falilyName + ":" + columnName + "is deleted!"); }
/** * 删除列 * * @param tableName 表名 * @param rowKey 行键名 * @param falilyName 列族名 * @param columnName 列名 */ public void deleteColumn(String tableName, String rowKey, String falilyName, String columnName) { try { HTable table = new HTable(conf, Bytes.toBytes(tableName)); Delete deleteColumn = new Delete(Bytes.toBytes(rowKey)); deleteColumn.deleteColumns(Bytes.toBytes(falilyName), Bytes.toBytes(columnName)); table.delete(deleteColumn); } catch (Exception e) { logger.error("deleteColumn failed", e); } }
public void removeSingleElement(Object key, Element value) { final Delete delete = new Delete(ByteArraySerializer.fromObject(key)); delete.deleteColumns(Bytes.toBytes(VALUES), (byte[]) value.getId()); try { backingTable.delete(delete); } catch (IOException e) { LOG.severe("Cannot delete from backing table"); e.printStackTrace(); } }
@Override protected void undoState(Iterable<byte[]> rows, int size) throws IOException { List<Delete> deletes = Lists.newArrayListWithCapacity(size); for (byte[] row : rows) { Delete delete = new Delete(keyDistributor.getDistributedKey(row)); delete.deleteColumns(QueueEntryRow.COLUMN_FAMILY, stateColumnName); deletes.add(delete); } hTable.delete(deletes); hTable.flushCommits(); }
public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); HTable hTable = new HTable(conf, "HBaseSamples"); Delete delete = new Delete(toBytes("rowToDelete")); hTable.delete(delete); Delete delete1 = new Delete(toBytes("anotherRow")); delete1.deleteColumns(toBytes("metrics"), toBytes("loan")); hTable.delete(delete1); hTable.close(); }
/** * Deletes daughters references in offlined split parent. * * @param catalogTracker * @param parent Parent row we're to remove daughter reference from * @throws NotAllMetaRegionsOnlineException * @throws IOException */ public static void deleteDaughtersReferencesInParent( CatalogTracker catalogTracker, final HRegionInfo parent) throws NotAllMetaRegionsOnlineException, IOException { Delete delete = new Delete(parent.getRegionName()); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); deleteFromMetaTable(catalogTracker, delete); LOG.info( "Deleted daughters references, qualifier=" + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) + " and qualifier=" + Bytes.toStringBinary(HConstants.SPLITB_QUALIFIER) + ", from parent " + parent.getRegionNameAsString()); }
@Override public void setValue(PColumn column, byte[] byteValue) { deleteRow = null; byte[] family = column.getFamilyName().getBytes(); byte[] qualifier = column.getName().getBytes(); PDataType type = column.getDataType(); // Check null, since some types have no byte representation for null if (byteValue == null || byteValue.length == 0) { if (!column.isNullable()) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " may not be null"); } removeIfPresent(setValues, family, qualifier); unsetValues.deleteColumns(family, qualifier, ts); } else { Integer byteSize = column.getByteSize(); if (type.isFixedWidth()) { // TODO: handle multi-byte characters if (byteValue.length != byteSize) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " must be " + byteSize + " bytes (" + type.toObject(byteValue) + ")"); } } else if (byteSize != null && byteValue.length > byteSize) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " may not exceed " + byteSize + " bytes (" + type.toObject(byteValue) + ")"); } removeIfPresent(unsetValues, family, qualifier); setValues.add(family, qualifier, ts, byteValue); } }
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); context = JAXBContext.newInstance( CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class); marshaller = context.createMarshaller(); unmarshaller = context.createUnmarshaller(); client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); if (!admin.tableExists(TABLE)) { HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[1])); admin.createTable(htd); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE); // Insert first half for (byte[] ROW : ROWS_ONE) { Put p = new Put(ROW); for (byte[] QUALIFIER : QUALIFIERS_ONE) { p.add(FAMILIES[0], QUALIFIER, VALUES[0]); } table.put(p); } for (byte[] ROW : ROWS_TWO) { Put p = new Put(ROW); for (byte[] QUALIFIER : QUALIFIERS_TWO) { p.add(FAMILIES[1], QUALIFIER, VALUES[1]); } table.put(p); } // Insert second half (reverse families) for (byte[] ROW : ROWS_ONE) { Put p = new Put(ROW); for (byte[] QUALIFIER : QUALIFIERS_ONE) { p.add(FAMILIES[1], QUALIFIER, VALUES[0]); } table.put(p); } for (byte[] ROW : ROWS_TWO) { Put p = new Put(ROW); for (byte[] QUALIFIER : QUALIFIERS_TWO) { p.add(FAMILIES[0], QUALIFIER, VALUES[1]); } table.put(p); } // Delete the second qualifier from all rows and families for (byte[] ROW : ROWS_ONE) { Delete d = new Delete(ROW); d.deleteColumns(FAMILIES[0], QUALIFIERS_ONE[1]); d.deleteColumns(FAMILIES[1], QUALIFIERS_ONE[1]); table.delete(d); } for (byte[] ROW : ROWS_TWO) { Delete d = new Delete(ROW); d.deleteColumns(FAMILIES[0], QUALIFIERS_TWO[1]); d.deleteColumns(FAMILIES[1], QUALIFIERS_TWO[1]); table.delete(d); } colsPerRow -= 2; // Delete the second rows from both groups, one column at a time for (byte[] QUALIFIER : QUALIFIERS_ONE) { Delete d = new Delete(ROWS_ONE[1]); d.deleteColumns(FAMILIES[0], QUALIFIER); d.deleteColumns(FAMILIES[1], QUALIFIER); table.delete(d); } for (byte[] QUALIFIER : QUALIFIERS_TWO) { Delete d = new Delete(ROWS_TWO[1]); d.deleteColumns(FAMILIES[0], QUALIFIER); d.deleteColumns(FAMILIES[1], QUALIFIER); table.delete(d); } numRows -= 2; } }
/** * Test transactional delete operations. * * @throws Exception */ @Test public void testValidTransactionalDelete() throws Exception { try (HTable hTable = createTable( Bytes.toBytes("TestValidTransactionalDelete"), new byte[][] {TestBytes.family, TestBytes.family2})) { TransactionAwareHTable txTable = new TransactionAwareHTable(hTable); TransactionContext txContext = new TransactionContext(new InMemoryTxSystemClient(txManager), txTable); txContext.start(); Put put = new Put(TestBytes.row); put.add(TestBytes.family, TestBytes.qualifier, TestBytes.value); put.add(TestBytes.family2, TestBytes.qualifier, TestBytes.value2); txTable.put(put); txContext.finish(); txContext.start(); Result result = txTable.get(new Get(TestBytes.row)); txContext.finish(); byte[] value = result.getValue(TestBytes.family, TestBytes.qualifier); assertArrayEquals(TestBytes.value, value); value = result.getValue(TestBytes.family2, TestBytes.qualifier); assertArrayEquals(TestBytes.value2, value); // test full row delete txContext.start(); Delete delete = new Delete(TestBytes.row); txTable.delete(delete); txContext.finish(); txContext.start(); result = txTable.get(new Get(TestBytes.row)); txContext.finish(); assertTrue(result.isEmpty()); // test column delete // load 10 rows txContext.start(); int rowCount = 10; for (int i = 0; i < rowCount; i++) { Put p = new Put(Bytes.toBytes("row" + i)); for (int j = 0; j < 10; j++) { p.add(TestBytes.family, Bytes.toBytes(j), TestBytes.value); } txTable.put(p); } txContext.finish(); // verify loaded rows txContext.start(); for (int i = 0; i < rowCount; i++) { Get g = new Get(Bytes.toBytes("row" + i)); Result r = txTable.get(g); assertFalse(r.isEmpty()); for (int j = 0; j < 10; j++) { assertArrayEquals(TestBytes.value, r.getValue(TestBytes.family, Bytes.toBytes(j))); } } txContext.finish(); // delete odds columns from odd rows and even columns from even rows txContext.start(); for (int i = 0; i < rowCount; i++) { Delete d = new Delete(Bytes.toBytes("row" + i)); for (int j = 0; j < 10; j++) { if (i % 2 == j % 2) { LOG.info("Deleting row={}, column={}", i, j); d.deleteColumns(TestBytes.family, Bytes.toBytes(j)); } } txTable.delete(d); } txContext.finish(); // verify deleted columns txContext.start(); for (int i = 0; i < rowCount; i++) { Get g = new Get(Bytes.toBytes("row" + i)); Result r = txTable.get(g); assertEquals(5, r.size()); for (Map.Entry<byte[], byte[]> entry : r.getFamilyMap(TestBytes.family).entrySet()) { int col = Bytes.toInt(entry.getKey()); LOG.info("Got row={}, col={}", i, col); // each row should only have the opposite mod (odd=even, even=odd) assertNotEquals(i % 2, col % 2); assertArrayEquals(TestBytes.value, entry.getValue()); } } txContext.finish(); // test family delete // load 10 rows txContext.start(); for (int i = 0; i < rowCount; i++) { Put p = new Put(Bytes.toBytes("famrow" + i)); p.add(TestBytes.family, TestBytes.qualifier, TestBytes.value); p.add(TestBytes.family2, TestBytes.qualifier2, TestBytes.value2); txTable.put(p); } txContext.finish(); // verify all loaded rows txContext.start(); for (int i = 0; i < rowCount; i++) { Get g = new Get(Bytes.toBytes("famrow" + i)); Result r = txTable.get(g); assertEquals(2, r.size()); assertArrayEquals(TestBytes.value, r.getValue(TestBytes.family, TestBytes.qualifier)); assertArrayEquals(TestBytes.value2, r.getValue(TestBytes.family2, TestBytes.qualifier2)); } txContext.finish(); // delete family1 for even rows, family2 for odd rows txContext.start(); for (int i = 0; i < rowCount; i++) { Delete d = new Delete(Bytes.toBytes("famrow" + i)); d.deleteFamily((i % 2 == 0) ? TestBytes.family : TestBytes.family2); txTable.delete(d); } txContext.finish(); // verify deleted families txContext.start(); for (int i = 0; i < rowCount; i++) { Get g = new Get(Bytes.toBytes("famrow" + i)); Result r = txTable.get(g); assertEquals(1, r.size()); if (i % 2 == 0) { assertNull(r.getValue(TestBytes.family, TestBytes.qualifier)); assertArrayEquals(TestBytes.value2, r.getValue(TestBytes.family2, TestBytes.qualifier2)); } else { assertArrayEquals(TestBytes.value, r.getValue(TestBytes.family, TestBytes.qualifier)); assertNull(r.getValue(TestBytes.family2, TestBytes.qualifier2)); } } txContext.finish(); } }
@Override protected RegionScanner doPostScannerOpen( final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException { byte[] isUngroupedAgg = scan.getAttribute(BaseScannerRegionObserver.UNGROUPED_AGG); if (isUngroupedAgg == null) { return s; } final ScanProjector p = ScanProjector.deserializeProjectorFromScan(scan); final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); RegionScanner theScanner = s; if (p != null || j != null) { theScanner = new HashJoinRegionScanner(s, p, j, ScanUtil.getTenantId(scan), c.getEnvironment()); } final RegionScanner innerScanner = theScanner; byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID); PTable projectedTable = null; List<Expression> selectExpressions = null; byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE); boolean isUpsert = false; boolean isDelete = false; byte[] deleteCQ = null; byte[] deleteCF = null; byte[][] values = null; byte[] emptyCF = null; ImmutableBytesWritable ptr = null; if (upsertSelectTable != null) { isUpsert = true; projectedTable = deserializeTable(upsertSelectTable); selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS)); values = new byte[projectedTable.getPKColumns().size()][]; ptr = new ImmutableBytesWritable(); } else { byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG); isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0; if (!isDelete) { deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF); deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ); } emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF); } int batchSize = 0; long ts = scan.getTimeRange().getMax(); HRegion region = c.getEnvironment().getRegion(); List<Mutation> mutations = Collections.emptyList(); if (isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null) { // TODO: size better mutations = Lists.newArrayListWithExpectedSize(1024); batchSize = c.getEnvironment() .getConfiguration() .getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); } Aggregators aggregators = ServerAggregators.deserialize( scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration()); Aggregator[] rowAggregators = aggregators.getAggregators(); boolean hasMore; boolean hasAny = false; MultiKeyValueTuple result = new MultiKeyValueTuple(); if (logger.isInfoEnabled()) { logger.info("Starting ungrouped coprocessor scan " + scan); } long rowCount = 0; region.startRegionOperation(); try { do { List<Cell> results = new ArrayList<Cell>(); // Results are potentially returned even when the return value of s.next is false // since this is an indication of whether or not there are more values after the // ones returned hasMore = innerScanner.nextRaw(results); if (!results.isEmpty()) { rowCount++; result.setKeyValues(results); try { if (isDelete) { // FIXME: the version of the Delete constructor without the lock args was introduced // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version // of the client. Cell firstKV = results.get(0); Delete delete = new Delete( firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), ts); mutations.add(delete); } else if (isUpsert) { Arrays.fill(values, null); int i = 0; List<PColumn> projectedColumns = projectedTable.getColumns(); for (; i < projectedTable.getPKColumns().size(); i++) { Expression expression = selectExpressions.get(i); if (expression.evaluate(result, ptr)) { values[i] = ptr.copyBytes(); // If SortOrder from expression in SELECT doesn't match the // column being projected into then invert the bits. if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) { SortOrder.invert(values[i], 0, values[i], 0, values[i].length); } } } projectedTable.newKey(ptr, values); PRow row = projectedTable.newRow(kvBuilder, ts, ptr); for (; i < projectedColumns.size(); i++) { Expression expression = selectExpressions.get(i); if (expression.evaluate(result, ptr)) { PColumn column = projectedColumns.get(i); Object value = expression.getDataType().toObject(ptr, column.getSortOrder()); // We are guaranteed that the two column will have the same type. if (!column .getDataType() .isSizeCompatible( ptr, value, column.getDataType(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) { throw new ValueTypeIncompatibleException( column.getDataType(), column.getMaxLength(), column.getScale()); } column .getDataType() .coerceBytes( ptr, value, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder()); byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); row.setValue(column, bytes); } } for (Mutation mutation : row.toRowMutations()) { mutations.add(mutation); } } else if (deleteCF != null && deleteCQ != null) { // No need to search for delete column, since we project only it // if no empty key value is being set if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) { Delete delete = new Delete( results.get(0).getRowArray(), results.get(0).getRowOffset(), results.get(0).getRowLength()); delete.deleteColumns(deleteCF, deleteCQ, ts); mutations.add(delete); } } if (emptyCF != null) { /* * If we've specified an emptyCF, then we need to insert an empty * key value "retroactively" for any key value that is visible at * the timestamp that the DDL was issued. Key values that are not * visible at this timestamp will not ever be projected up to * scans past this timestamp, so don't need to be considered. * We insert one empty key value per row per timestamp. */ Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size()); for (Cell kv : results) { long kvts = kv.getTimestamp(); if (!timeStamps.contains(kvts)) { Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); put.add( emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY); mutations.add(put); } } } // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config if (!mutations.isEmpty() && batchSize > 0 && mutations.size() % batchSize == 0) { commitBatch(region, mutations, indexUUID); mutations.clear(); } } catch (ConstraintViolationException e) { // Log and ignore in count logger.error( "Failed to create row in " + region.getRegionNameAsString() + " with values " + SchemaUtil.toString(values), e); continue; } aggregators.aggregate(rowAggregators, result); hasAny = true; } } while (hasMore); } finally { innerScanner.close(); region.closeRegionOperation(); } if (logger.isInfoEnabled()) { logger.info("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan); } if (!mutations.isEmpty()) { commitBatch(region, mutations, indexUUID); } final boolean hadAny = hasAny; KeyValue keyValue = null; if (hadAny) { byte[] value = aggregators.toBytes(rowAggregators); keyValue = KeyValueUtil.newKeyValue( UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); } final KeyValue aggKeyValue = keyValue; RegionScanner scanner = new BaseRegionScanner() { private boolean done = !hadAny; @Override public HRegionInfo getRegionInfo() { return innerScanner.getRegionInfo(); } @Override public boolean isFilterDone() { return done; } @Override public void close() throws IOException { innerScanner.close(); } @Override public boolean next(List<Cell> results) throws IOException { if (done) return false; done = true; results.add(aggKeyValue); return false; } @Override public long getMaxResultSize() { return scan.getMaxResultSize(); } }; return scanner; }