Esempio n. 1
0
  // Retrieves the row from the table and check if it exists and has not been flagged as deleted
  protected Result getRow(
      RecordId recordId, Long version, int numberOfVersions, List<FieldType> fields)
      throws RecordException {
    Result result;
    Get get = new Get(recordId.toBytes());
    get.setFilter(REAL_RECORDS_FILTER);

    try {
      // Add the columns for the fields to get
      addFieldsToGet(get, fields);

      if (version != null)
        get.setTimeRange(0, version + 1); // Only retrieve data within this timerange
      get.setMaxVersions(numberOfVersions);

      // Retrieve the data from the repository
      result = recordTable.get(get);

      if (result == null || result.isEmpty()) throw new RecordNotFoundException(recordId);

    } catch (IOException e) {
      throw new RecordException(
          "Exception occurred while retrieving record '" + recordId + "' from HBase table", e);
    }
    return result;
  }
Esempio n. 2
0
  @TimeDepend
  @Test
  public void testGetWith_Ts() throws Exception {
    recreateTable();
    fillData();

    Get get = new Get(rowKey_ForTest);
    get.addColumn(ColumnFamilyNameBytes, QName1);
    get.setMaxVersions(3);

    get.setTimeStamp(3L);
    Result result = table.get(get);
    Assert.assertEquals(1, result.raw().length);

    get.setTimeStamp(2L);
    result = table.get(get);
    Assert.assertEquals(1, result.raw().length);

    get.setTimeStamp(1L);
    result = table.get(get);
    Assert.assertEquals(1, result.raw().length);

    get.setTimeStamp(0L);
    result = table.get(get);
    Assert.assertEquals(0, result.raw().length);

    get.setTimeRange(1, 4);
    result = table.get(get);
    Assert.assertEquals(3, result.raw().length);

    recreateTable();
  }
Esempio n. 3
0
 @SuppressWarnings("deprecation")
 @Override
 public DResult get(Get get, long startId) throws IOException {
   if (get.hasFamilies()) get.addFamily(DominoConst.INNER_FAMILY);
   get.setTimeRange(0, startId + 1); // [x, y)
   get.setMaxVersions();
   Result preRead = region.get(get);
   List<KeyValue> status = preRead.getColumn(DominoConst.INNER_FAMILY, DominoConst.STATUS_COL);
   if (status == null || status.size() == 0) {
     Result ret = MVCC.handleResult(this, getTrxMetaTable(), preRead, startId, null);
     return new DResult(ret, null);
   }
   Integer lockId = region.getLock(null, get.getRow(), true);
   try {
     Result r =
         MVCC.handleResult(this, getTrxMetaTable(), region.get(get, lockId), startId, lockId);
     return new DResult(r, null);
   } catch (TransactionOutOfDateException oode) {
     return new DResult(null, oode.getMessage());
   } catch (InvalidRowStatusException e) {
     return new DResult(null, e.getMessage());
   } finally {
     region.releaseRowLock(lockId);
   }
 }
Esempio n. 4
0
 /**
  * Transactional version of {@link HTable#get(Get)}
  *
  * @param transactionState Identifier of the transaction
  * @see HTable#get(Get)
  * @throws IOException
  */
 public Result get(TransactionState transactionState, final Get get) throws IOException {
   final long readTimestamp = transactionState.getStartTimestamp();
   final Get tsget = new Get(get.getRow());
   TimeRange timeRange = get.getTimeRange();
   long startTime = timeRange.getMin();
   long endTime = Math.min(timeRange.getMax(), readTimestamp + 1);
   //      int maxVersions = get.getMaxVersions();
   tsget
       .setTimeRange(startTime, endTime)
       .setMaxVersions((int) (versionsAvg + CACHE_VERSIONS_OVERHEAD));
   Map<byte[], NavigableSet<byte[]>> kvs = get.getFamilyMap();
   for (Map.Entry<byte[], NavigableSet<byte[]>> entry : kvs.entrySet()) {
     byte[] family = entry.getKey();
     NavigableSet<byte[]> qualifiers = entry.getValue();
     if (qualifiers == null || qualifiers.isEmpty()) {
       tsget.addFamily(family);
     } else {
       for (byte[] qualifier : qualifiers) {
         tsget.addColumn(family, qualifier);
       }
     }
   }
   //      Result result;
   //      Result filteredResult;
   //      do {
   //         result = super.get(tsget);
   //         filteredResult = filter(super.get(tsget), readTimestamp, maxVersions);
   //      } while (!result.isEmpty() && filteredResult == null);
   getsPerformed++;
   Result result =
       filter(
           transactionState,
           super.get(tsget),
           readTimestamp,
           (int) (versionsAvg + CACHE_VERSIONS_OVERHEAD));
   return result == null ? new Result() : result;
   //      Scan scan = new Scan(get);
   //      scan.setRetainDeletesInOutput(true);
   //      ResultScanner rs = this.getScanner(transactionState, scan);
   //      Result r = rs.next();
   //      if (r == null) {
   //         r = new Result();
   //      }
   //      return r;
 }
Esempio n. 5
0
 @Override
 public GetBuilder setTimeRange(long minStamp, long maxStamp) throws IOException {
   get.setTimeRange(minStamp, maxStamp);
   return this;
 }
 /**
  * Use PreIncrement hook of BaseRegionObserver to overcome deficiencies in Increment
  * implementation (HBASE-10254): 1) Lack of recognition and identification of when the key value
  * to increment doesn't exist 2) Lack of the ability to set the timestamp of the updated key
  * value. Works the same as existing region.increment(), except assumes there is a single column
  * to increment and uses Phoenix LONG encoding.
  *
  * @author jtaylor
  * @since 3.0.0
  */
 @Override
 public Result preIncrement(
     final ObserverContext<RegionCoprocessorEnvironment> e, final Increment increment)
     throws IOException {
   RegionCoprocessorEnvironment env = e.getEnvironment();
   // We need to set this to prevent region.increment from being called
   e.bypass();
   e.complete();
   HRegion region = env.getRegion();
   byte[] row = increment.getRow();
   TimeRange tr = increment.getTimeRange();
   region.startRegionOperation();
   try {
     Integer lid = region.getLock(null, row, true);
     try {
       long maxTimestamp = tr.getMax();
       if (maxTimestamp == HConstants.LATEST_TIMESTAMP) {
         maxTimestamp = EnvironmentEdgeManager.currentTimeMillis();
         tr = new TimeRange(tr.getMin(), maxTimestamp);
       }
       Get get = new Get(row);
       get.setTimeRange(tr.getMin(), tr.getMax());
       for (Map.Entry<byte[], NavigableMap<byte[], Long>> entry :
           increment.getFamilyMap().entrySet()) {
         byte[] cf = entry.getKey();
         for (byte[] cq : entry.getValue().keySet()) {
           get.addColumn(cf, cq);
         }
       }
       Result result = region.get(get);
       if (result.isEmpty()) {
         return getErrorResult(
             row, maxTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode());
       }
       KeyValue currentValueKV = Sequence.getCurrentValueKV(result);
       KeyValue incrementByKV = Sequence.getIncrementByKV(result);
       KeyValue cacheSizeKV = Sequence.getCacheSizeKV(result);
       long value =
           PDataType.LONG
               .getCodec()
               .decodeLong(currentValueKV.getBuffer(), currentValueKV.getValueOffset(), null);
       long incrementBy =
           PDataType.LONG
               .getCodec()
               .decodeLong(incrementByKV.getBuffer(), incrementByKV.getValueOffset(), null);
       int cacheSize =
           PDataType.INTEGER
               .getCodec()
               .decodeInt(cacheSizeKV.getBuffer(), cacheSizeKV.getValueOffset(), null);
       value += incrementBy * cacheSize;
       byte[] valueBuffer = new byte[PDataType.LONG.getByteSize()];
       PDataType.LONG.getCodec().encodeLong(value, valueBuffer, 0);
       Put put = new Put(row, currentValueKV.getTimestamp());
       // Hold timestamp constant for sequences, so that clients always only see the latest value
       // regardless of when they connect.
       KeyValue newCurrentValueKV =
           KeyValueUtil.newKeyValue(
               row,
               currentValueKV.getFamily(),
               currentValueKV.getQualifier(),
               currentValueKV.getTimestamp(),
               valueBuffer);
       put.add(newCurrentValueKV);
       @SuppressWarnings("unchecked")
       Pair<Mutation, Integer>[] mutations = new Pair[1];
       mutations[0] = new Pair<Mutation, Integer>(put, lid);
       region.batchMutate(mutations);
       return Sequence.replaceCurrentValueKV(result, newCurrentValueKV);
     } finally {
       region.releaseRowLock(lid);
     }
   } catch (Throwable t) {
     ServerUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t);
     return null; // Impossible
   } finally {
     region.closeRegionOperation();
   }
 }
  /**
   * Override the preAppend for checkAndPut and checkAndDelete, as we need the ability to a) set the
   * TimeRange for the Get being done and b) return something back to the client to indicate
   * success/failure
   */
  @SuppressWarnings("deprecation")
  @Override
  public Result preAppend(
      final ObserverContext<RegionCoprocessorEnvironment> e, final Append append)
      throws IOException {
    byte[] opBuf = append.getAttribute(OPERATION_ATTRIB);
    if (opBuf == null) {
      return null;
    }
    Op op = Op.values()[opBuf[0]];

    long clientTimestamp = HConstants.LATEST_TIMESTAMP;
    byte[] clientTimestampBuf = append.getAttribute(MAX_TIMERANGE_ATTRIB);
    if (clientTimestampBuf != null) {
      clientTimestamp = Bytes.toLong(clientTimestampBuf);
    }
    boolean hadClientTimestamp = (clientTimestamp != HConstants.LATEST_TIMESTAMP);
    if (hadClientTimestamp) {
      // Prevent race condition of creating two sequences at the same timestamp
      // by looking for a sequence at or after the timestamp at which it'll be
      // created.
      if (op == Op.CREATE_SEQUENCE) {
        clientTimestamp++;
      }
    } else {
      clientTimestamp = EnvironmentEdgeManager.currentTimeMillis();
      clientTimestampBuf = Bytes.toBytes(clientTimestamp);
    }

    RegionCoprocessorEnvironment env = e.getEnvironment();
    // We need to set this to prevent region.append from being called
    e.bypass();
    e.complete();
    HRegion region = env.getRegion();
    byte[] row = append.getRow();
    region.startRegionOperation();
    try {
      Integer lid = region.getLock(null, row, true);
      try {
        KeyValue keyValue = append.getFamilyMap().values().iterator().next().iterator().next();
        byte[] family = keyValue.getFamily();
        byte[] qualifier = keyValue.getQualifier();

        Get get = new Get(row);
        get.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimestamp);
        get.addColumn(family, qualifier);
        Result result = region.get(get);
        if (result.isEmpty()) {
          if (op == Op.DROP_SEQUENCE || op == Op.RESET_SEQUENCE) {
            return getErrorResult(
                row, clientTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode());
          }
        } else {
          if (op == Op.CREATE_SEQUENCE) {
            return getErrorResult(
                row, clientTimestamp, SQLExceptionCode.SEQUENCE_ALREADY_EXIST.getErrorCode());
          }
        }
        Mutation m = null;
        switch (op) {
          case RESET_SEQUENCE:
            KeyValue currentValueKV = result.raw()[0];
            long expectedValue =
                PDataType.LONG
                    .getCodec()
                    .decodeLong(append.getAttribute(CURRENT_VALUE_ATTRIB), 0, null);
            long value =
                PDataType.LONG
                    .getCodec()
                    .decodeLong(currentValueKV.getBuffer(), currentValueKV.getValueOffset(), null);
            // Timestamp should match exactly, or we may have the wrong sequence
            if (expectedValue != value || currentValueKV.getTimestamp() != clientTimestamp) {
              return new Result(
                  Collections.singletonList(
                      KeyValueUtil.newKeyValue(
                          row,
                          PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES,
                          QueryConstants.EMPTY_COLUMN_BYTES,
                          currentValueKV.getTimestamp(),
                          ByteUtil.EMPTY_BYTE_ARRAY)));
            }
            m = new Put(row, currentValueKV.getTimestamp());
            m.getFamilyMap().putAll(append.getFamilyMap());
            break;
          case DROP_SEQUENCE:
            m = new Delete(row, clientTimestamp, null);
            break;
          case CREATE_SEQUENCE:
            m = new Put(row, clientTimestamp);
            m.getFamilyMap().putAll(append.getFamilyMap());
            break;
        }
        if (!hadClientTimestamp) {
          for (List<KeyValue> kvs : m.getFamilyMap().values()) {
            for (KeyValue kv : kvs) {
              kv.updateLatestStamp(clientTimestampBuf);
            }
          }
        }
        @SuppressWarnings("unchecked")
        Pair<Mutation, Integer>[] mutations = new Pair[1];
        mutations[0] = new Pair<Mutation, Integer>(m, lid);
        region.batchMutate(mutations);
        long serverTimestamp = MetaDataUtil.getClientTimeStamp(m);
        // Return result with single KeyValue. The only piece of information
        // the client cares about is the timestamp, which is the timestamp of
        // when the mutation was actually performed (useful in the case of .
        return new Result(
            Collections.singletonList(
                KeyValueUtil.newKeyValue(
                    row,
                    PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES,
                    QueryConstants.EMPTY_COLUMN_BYTES,
                    serverTimestamp,
                    SUCCESS_VALUE)));
      } finally {
        region.releaseRowLock(lid);
      }
    } catch (Throwable t) {
      ServerUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t);
      return null; // Impossible
    } finally {
      region.closeRegionOperation();
    }
  }
Esempio n. 8
0
  private Result filter(
      TransactionState state, Result result, long startTimestamp, int localVersions)
      throws IOException {
    if (result == null) {
      return null;
    }
    List<KeyValue> kvs = result.list();
    if (kvs == null) {
      return result;
    }
    Map<ByteArray, Map<ByteArray, Integer>> occurrences =
        new HashMap<TransactionalTable.ByteArray, Map<ByteArray, Integer>>();
    Map<ByteArray, Map<ByteArray, Long>> minTimestamp =
        new HashMap<TransactionalTable.ByteArray, Map<ByteArray, Long>>();
    List<KeyValue> nonDeletes = new ArrayList<KeyValue>();
    List<KeyValue> filtered = new ArrayList<KeyValue>();
    Map<ByteArray, Set<ByteArray>> read = new HashMap<ByteArray, Set<ByteArray>>();
    DeleteTracker tracker = new DeleteTracker();
    for (KeyValue kv : kvs) {
      ByteArray family = new ByteArray(kv.getFamily());
      ByteArray qualifier = new ByteArray(kv.getQualifier());
      Set<ByteArray> readQualifiers = read.get(family);
      if (readQualifiers == null) {
        readQualifiers = new HashSet<TransactionalTable.ByteArray>();
        read.put(family, readQualifiers);
      } else if (readQualifiers.contains(qualifier)) continue;
      //         RowKey rk = new RowKey(kv.getRow(), getTableName());
      if (state.tsoclient.validRead(kv.getTimestamp(), startTimestamp)) {
        if (!tracker.addDeleted(kv)) nonDeletes.add(kv);
        {
          // Read valid value
          readQualifiers.add(qualifier);

          //                statistics
          //               elementsGotten++;
          Map<ByteArray, Integer> occurrencesCols = occurrences.get(family);
          Integer times = null;
          if (occurrencesCols != null) {
            times = occurrencesCols.get(qualifier);
          }
          if (times != null) {
            //                  elementsRead += times;
            versionsAvg = times > versionsAvg ? times : alpha * versionsAvg + (1 - alpha) * times;
            //                  extraVersionsAvg = times > extraVersionsAvg ? times : alpha *
            // extraVersionsAvg + (1 - alpha) * times;
          } else {
            //                  elementsRead++;
            versionsAvg = alpha * versionsAvg + (1 - alpha);
            //                  extraVersionsAvg = alpha * extraVersionsAvg + (1 - alpha);
          }
        }
      } else {
        Map<ByteArray, Integer> occurrencesCols = occurrences.get(family);
        Map<ByteArray, Long> minTimestampCols = minTimestamp.get(family);
        if (occurrencesCols == null) {
          occurrencesCols = new HashMap<TransactionalTable.ByteArray, Integer>();
          minTimestampCols = new HashMap<TransactionalTable.ByteArray, Long>();
          occurrences.put(family, occurrencesCols);
          minTimestamp.put(family, minTimestampCols);
        }
        Integer times = occurrencesCols.get(qualifier);
        Long timestamp = minTimestampCols.get(qualifier);
        if (times == null) {
          times = 0;
          timestamp = kv.getTimestamp();
        }
        times++;
        timestamp = Math.min(timestamp, kv.getTimestamp());
        if (times == localVersions) {
          // We need to fetch more versions
          Get get = new Get(kv.getRow());
          get.addColumn(kv.getFamily(), kv.getQualifier());
          get.setMaxVersions(localVersions);
          Result r;
          GOTRESULT:
          do {
            extraGetsPerformed++;
            get.setTimeRange(0, timestamp);
            r = this.get(get);
            List<KeyValue> list = r.list();
            if (list == null) break;
            for (KeyValue t : list) {
              times++;
              timestamp = Math.min(timestamp, t.getTimestamp());
              //                     rk = new RowKey(kv.getRow(), getTableName());
              if (state.tsoclient.validRead(t.getTimestamp(), startTimestamp)) {
                if (!tracker.addDeleted(t)) nonDeletes.add(t);
                readQualifiers.add(qualifier);
                elementsGotten++;
                elementsRead += times;
                versionsAvg =
                    times > versionsAvg ? times : alpha * versionsAvg + (1 - alpha) * times;
                extraVersionsAvg =
                    times > extraVersionsAvg
                        ? times
                        : alpha * extraVersionsAvg + (1 - alpha) * times;
                break GOTRESULT;
              }
            }
          } while (r.size() == localVersions);
        } else {
          occurrencesCols.put(qualifier, times);
          minTimestampCols.put(qualifier, timestamp);
        }
      }
    }
    for (KeyValue kv : nonDeletes) {
      if (!tracker.isDeleted(kv)) {
        filtered.add(kv);
      }
    }
    //      cacheVersions = (int) versionsAvg;
    if (filtered.isEmpty()) {
      return null;
    }
    return new Result(filtered);
  }