コード例 #1
0
ファイル: TestHFileEncryption.java プロジェクト: mringg/hbase
 private long readAndVerifyBlock(long pos, HFileContext ctx, HFileBlock.FSReaderImpl hbr, int size)
     throws IOException {
   HFileBlock b = hbr.readBlockData(pos, -1, -1, false);
   assertEquals(0, HFile.getChecksumFailuresCount());
   b.sanityCheck();
   assertFalse(b.isUnpacked());
   b = b.unpack(ctx, hbr);
   LOG.info(
       "Read a block at "
           + pos
           + " with"
           + " onDiskSizeWithHeader="
           + b.getOnDiskSizeWithHeader()
           + " uncompressedSizeWithoutHeader="
           + b.getOnDiskSizeWithoutHeader()
           + " uncompressedSizeWithoutHeader="
           + b.getUncompressedSizeWithoutHeader());
   DataInputStream dis = b.getByteStream();
   for (int i = 0; i < size; i++) {
     int read = dis.readInt();
     if (read != i) {
       fail("Block data corrupt at element " + i);
     }
   }
   return b.getOnDiskSizeWithHeader();
 }
コード例 #2
0
ファイル: HFileWriterV2.java プロジェクト: RonghaiMa/hbase
 /**
  * Caches the last written HFile block.
  *
  * @param offset the offset of the block we want to cache. Used to determine the cache key.
  */
 private void doCacheOnWrite(long offset) {
   // We don't cache-on-write data blocks on compaction, so assume this is not
   // a compaction.
   final boolean isCompaction = false;
   HFileBlock cacheFormatBlock =
       blockEncoder.diskToCacheFormat(fsBlockWriter.getBlockForCaching(), isCompaction);
   cacheConf
       .getBlockCache()
       .cacheBlock(
           new BlockCacheKey(
               name, offset, blockEncoder.getEncodingInCache(), cacheFormatBlock.getBlockType()),
           cacheFormatBlock);
 }