Ejemplo n.º 1
0
 public static void setTimeRange(Scan scan, TimeRange range) {
   try {
     scan.setTimeRange(range.getMin(), range.getMax());
   } catch (IOException e) {
     throw new RuntimeException(e);
   }
 }
Ejemplo n.º 2
0
  private RowFilter createTimeRangeFilter(TimeRange timeRange) {
    TimestampRange.Builder rangeBuilder = TimestampRange.newBuilder();

    long lowerBound =
        BigtableConstants.BIGTABLE_TIMEUNIT.convert(
            timeRange.getMin(), BigtableConstants.HBASE_TIMEUNIT);
    rangeBuilder.setStartTimestampMicros(lowerBound);

    if (timeRange.getMax() != Long.MAX_VALUE) {
      long upperBound =
          BigtableConstants.BIGTABLE_TIMEUNIT.convert(
              timeRange.getMax(), BigtableConstants.HBASE_TIMEUNIT);
      rangeBuilder.setEndTimestampMicros(upperBound);
    }

    return RowFilter.newBuilder().setTimestampRangeFilter(rangeBuilder).build();
  }
Ejemplo n.º 3
0
 /**
  * Transactional version of {@link HTable#get(Get)}
  *
  * @param transactionState Identifier of the transaction
  * @see HTable#get(Get)
  * @throws IOException
  */
 public Result get(TransactionState transactionState, final Get get) throws IOException {
   final long readTimestamp = transactionState.getStartTimestamp();
   final Get tsget = new Get(get.getRow());
   TimeRange timeRange = get.getTimeRange();
   long startTime = timeRange.getMin();
   long endTime = Math.min(timeRange.getMax(), readTimestamp + 1);
   //      int maxVersions = get.getMaxVersions();
   tsget
       .setTimeRange(startTime, endTime)
       .setMaxVersions((int) (versionsAvg + CACHE_VERSIONS_OVERHEAD));
   Map<byte[], NavigableSet<byte[]>> kvs = get.getFamilyMap();
   for (Map.Entry<byte[], NavigableSet<byte[]>> entry : kvs.entrySet()) {
     byte[] family = entry.getKey();
     NavigableSet<byte[]> qualifiers = entry.getValue();
     if (qualifiers == null || qualifiers.isEmpty()) {
       tsget.addFamily(family);
     } else {
       for (byte[] qualifier : qualifiers) {
         tsget.addColumn(family, qualifier);
       }
     }
   }
   //      Result result;
   //      Result filteredResult;
   //      do {
   //         result = super.get(tsget);
   //         filteredResult = filter(super.get(tsget), readTimestamp, maxVersions);
   //      } while (!result.isEmpty() && filteredResult == null);
   getsPerformed++;
   Result result =
       filter(
           transactionState,
           super.get(tsget),
           readTimestamp,
           (int) (versionsAvg + CACHE_VERSIONS_OVERHEAD));
   return result == null ? new Result() : result;
   //      Scan scan = new Scan(get);
   //      scan.setRetainDeletesInOutput(true);
   //      ResultScanner rs = this.getScanner(transactionState, scan);
   //      Result r = rs.next();
   //      if (r == null) {
   //         r = new Result();
   //      }
   //      return r;
 }
Ejemplo n.º 4
0
 /**
  * Use PreIncrement hook of BaseRegionObserver to overcome deficiencies in Increment
  * implementation (HBASE-10254): 1) Lack of recognition and identification of when the key value
  * to increment doesn't exist 2) Lack of the ability to set the timestamp of the updated key
  * value. Works the same as existing region.increment(), except assumes there is a single column
  * to increment and uses Phoenix LONG encoding.
  *
  * @author jtaylor
  * @since 3.0.0
  */
 @Override
 public Result preIncrement(
     final ObserverContext<RegionCoprocessorEnvironment> e, final Increment increment)
     throws IOException {
   RegionCoprocessorEnvironment env = e.getEnvironment();
   // We need to set this to prevent region.increment from being called
   e.bypass();
   e.complete();
   HRegion region = env.getRegion();
   byte[] row = increment.getRow();
   TimeRange tr = increment.getTimeRange();
   region.startRegionOperation();
   try {
     Integer lid = region.getLock(null, row, true);
     try {
       long maxTimestamp = tr.getMax();
       if (maxTimestamp == HConstants.LATEST_TIMESTAMP) {
         maxTimestamp = EnvironmentEdgeManager.currentTimeMillis();
         tr = new TimeRange(tr.getMin(), maxTimestamp);
       }
       Get get = new Get(row);
       get.setTimeRange(tr.getMin(), tr.getMax());
       for (Map.Entry<byte[], NavigableMap<byte[], Long>> entry :
           increment.getFamilyMap().entrySet()) {
         byte[] cf = entry.getKey();
         for (byte[] cq : entry.getValue().keySet()) {
           get.addColumn(cf, cq);
         }
       }
       Result result = region.get(get);
       if (result.isEmpty()) {
         return getErrorResult(
             row, maxTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode());
       }
       KeyValue currentValueKV = Sequence.getCurrentValueKV(result);
       KeyValue incrementByKV = Sequence.getIncrementByKV(result);
       KeyValue cacheSizeKV = Sequence.getCacheSizeKV(result);
       long value =
           PDataType.LONG
               .getCodec()
               .decodeLong(currentValueKV.getBuffer(), currentValueKV.getValueOffset(), null);
       long incrementBy =
           PDataType.LONG
               .getCodec()
               .decodeLong(incrementByKV.getBuffer(), incrementByKV.getValueOffset(), null);
       int cacheSize =
           PDataType.INTEGER
               .getCodec()
               .decodeInt(cacheSizeKV.getBuffer(), cacheSizeKV.getValueOffset(), null);
       value += incrementBy * cacheSize;
       byte[] valueBuffer = new byte[PDataType.LONG.getByteSize()];
       PDataType.LONG.getCodec().encodeLong(value, valueBuffer, 0);
       Put put = new Put(row, currentValueKV.getTimestamp());
       // Hold timestamp constant for sequences, so that clients always only see the latest value
       // regardless of when they connect.
       KeyValue newCurrentValueKV =
           KeyValueUtil.newKeyValue(
               row,
               currentValueKV.getFamily(),
               currentValueKV.getQualifier(),
               currentValueKV.getTimestamp(),
               valueBuffer);
       put.add(newCurrentValueKV);
       @SuppressWarnings("unchecked")
       Pair<Mutation, Integer>[] mutations = new Pair[1];
       mutations[0] = new Pair<Mutation, Integer>(put, lid);
       region.batchMutate(mutations);
       return Sequence.replaceCurrentValueKV(result, newCurrentValueKV);
     } finally {
       region.releaseRowLock(lid);
     }
   } catch (Throwable t) {
     ServerUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t);
     return null; // Impossible
   } finally {
     region.closeRegionOperation();
   }
 }