/** * Creates a HBase {@link Increment} from a Storm {@link Tuple} * * @param tuple The {@link Tuple} * @param increment The amount to increment the counter by * @return {@link Increment} */ public Increment getIncrementFromTuple(final Tuple tuple, final long increment) { byte[] rowKey = Bytes.toBytes(tuple.getStringByField(tupleRowKeyField)); Increment inc = new Increment(rowKey); inc.setWriteToWAL(writeToWAL); if (columnFamilies.size() > 0) { for (String cf : columnFamilies.keySet()) { byte[] cfBytes = Bytes.toBytes(cf); for (String cq : columnFamilies.get(cf)) { byte[] val; try { val = Bytes.toBytes(tuple.getStringByField(cq)); } catch (IllegalArgumentException ex) { // if cq isn't a tuple field, use cq for counter instead of tuple // value val = Bytes.toBytes(cq); } inc.addColumn(cfBytes, val, increment); } } } return inc; }
@Test public void testIncrementHook() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook"); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { Increment inc = new Increment(Bytes.toBytes(0)); inc.addColumn(A, A, 1); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"}, tableName, new Boolean[] {false, false, false}); table.increment(inc); verifyMethodResult( SimpleRegionObserver.class, new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"}, tableName, new Boolean[] {true, true, true}); } finally { util.deleteTable(tableName); table.close(); } }
/** * From a {@link TIncrement} create an {@link Increment}. * * @param tincrement the Thrift version of an increment * @return an increment that the {@link TIncrement} represented. */ public static Increment incrementFromThrift(TIncrement tincrement) { Increment inc = new Increment(tincrement.getRow()); byte[][] famAndQf = KeyValue.parseColumn(tincrement.getColumn()); if (famAndQf.length != 2) return null; inc.addColumn(famAndQf[0], famAndQf[1], tincrement.getAmmount()); return inc; }
/** * Increment the counter for the given family and column by the specified amount * * <p>If the family and column already exist in the Increment the counter value is incremented by * the specified amount rather than overridden, as it is in HBase's {@link * Increment#addColumn(byte[], byte[], long)} method * * @param inc The {@link Increment} to update * @param family The column family * @param qualifier The column qualifier * @param amount The amount to increment the counter by */ public static void addIncrement( Increment inc, final byte[] family, final byte[] qualifier, final Long amount) { NavigableMap<byte[], Long> set = inc.getFamilyMap().get(family); if (set == null) { set = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR); } // If qualifier exists, increment amount Long counter = set.get(qualifier); if (counter == null) { counter = 0L; } set.put(qualifier, amount + counter); inc.getFamilyMap().put(family, set); }
@Test public void testIncrement() throws Exception { byte[] row1 = Bytes.toBytes("row1"); byte[] col1 = Bytes.toBytes("col1"); byte[] col2 = Bytes.toBytes("col2"); byte[] col3 = Bytes.toBytes("col3"); // Setting up region final WALFactory wals = new WALFactory(CONF, null, "TestIncrement"); byte[] tableName = Bytes.toBytes("TestIncrement"); final WAL wal = wals.getWAL(tableName); HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT); // col1: amount = 1, 1 write back to WAL Increment inc1 = new Increment(row1); inc1.addColumn(FAMILY, col1, 1); Result res = region.increment(inc1); assertEquals(1, res.size()); assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1))); verifyWALCount(wals, wal, 1); // col1: amount = 0, 0 write back to WAL inc1 = new Increment(row1); inc1.addColumn(FAMILY, col1, 0); res = region.increment(inc1); assertEquals(1, res.size()); assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1))); verifyWALCount(wals, wal, 1); // col1: amount = 0, col2: amount = 0, col3: amount = 0 // 0 write back to WAL inc1 = new Increment(row1); inc1.addColumn(FAMILY, col1, 0); inc1.addColumn(FAMILY, col2, 0); inc1.addColumn(FAMILY, col3, 0); res = region.increment(inc1); assertEquals(3, res.size()); assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1))); assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col2))); assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col3))); verifyWALCount(wals, wal, 1); // col1: amount = 5, col2: amount = 4, col3: amount = 3 // 1 write back to WAL inc1 = new Increment(row1); inc1.addColumn(FAMILY, col1, 5); inc1.addColumn(FAMILY, col2, 4); inc1.addColumn(FAMILY, col3, 3); res = region.increment(inc1); assertEquals(3, res.size()); assertEquals(6, Bytes.toLong(res.getValue(FAMILY, col1))); assertEquals(4, Bytes.toLong(res.getValue(FAMILY, col2))); assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3))); verifyWALCount(wals, wal, 2); }
/** * Use PreIncrement hook of BaseRegionObserver to overcome deficiencies in Increment * implementation (HBASE-10254): 1) Lack of recognition and identification of when the key value * to increment doesn't exist 2) Lack of the ability to set the timestamp of the updated key * value. Works the same as existing region.increment(), except assumes there is a single column * to increment and uses Phoenix LONG encoding. * * @author jtaylor * @since 3.0.0 */ @Override public Result preIncrement( final ObserverContext<RegionCoprocessorEnvironment> e, final Increment increment) throws IOException { RegionCoprocessorEnvironment env = e.getEnvironment(); // We need to set this to prevent region.increment from being called e.bypass(); e.complete(); HRegion region = env.getRegion(); byte[] row = increment.getRow(); TimeRange tr = increment.getTimeRange(); region.startRegionOperation(); try { Integer lid = region.getLock(null, row, true); try { long maxTimestamp = tr.getMax(); if (maxTimestamp == HConstants.LATEST_TIMESTAMP) { maxTimestamp = EnvironmentEdgeManager.currentTimeMillis(); tr = new TimeRange(tr.getMin(), maxTimestamp); } Get get = new Get(row); get.setTimeRange(tr.getMin(), tr.getMax()); for (Map.Entry<byte[], NavigableMap<byte[], Long>> entry : increment.getFamilyMap().entrySet()) { byte[] cf = entry.getKey(); for (byte[] cq : entry.getValue().keySet()) { get.addColumn(cf, cq); } } Result result = region.get(get); if (result.isEmpty()) { return getErrorResult( row, maxTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode()); } KeyValue currentValueKV = Sequence.getCurrentValueKV(result); KeyValue incrementByKV = Sequence.getIncrementByKV(result); KeyValue cacheSizeKV = Sequence.getCacheSizeKV(result); long value = PDataType.LONG .getCodec() .decodeLong(currentValueKV.getBuffer(), currentValueKV.getValueOffset(), null); long incrementBy = PDataType.LONG .getCodec() .decodeLong(incrementByKV.getBuffer(), incrementByKV.getValueOffset(), null); int cacheSize = PDataType.INTEGER .getCodec() .decodeInt(cacheSizeKV.getBuffer(), cacheSizeKV.getValueOffset(), null); value += incrementBy * cacheSize; byte[] valueBuffer = new byte[PDataType.LONG.getByteSize()]; PDataType.LONG.getCodec().encodeLong(value, valueBuffer, 0); Put put = new Put(row, currentValueKV.getTimestamp()); // Hold timestamp constant for sequences, so that clients always only see the latest value // regardless of when they connect. KeyValue newCurrentValueKV = KeyValueUtil.newKeyValue( row, currentValueKV.getFamily(), currentValueKV.getQualifier(), currentValueKV.getTimestamp(), valueBuffer); put.add(newCurrentValueKV); @SuppressWarnings("unchecked") Pair<Mutation, Integer>[] mutations = new Pair[1]; mutations[0] = new Pair<Mutation, Integer>(put, lid); region.batchMutate(mutations); return Sequence.replaceCurrentValueKV(result, newCurrentValueKV); } finally { region.releaseRowLock(lid); } } catch (Throwable t) { ServerUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t); return null; // Impossible } finally { region.closeRegionOperation(); } }