Esempio n. 1
0
  /** Clean up the current block */
  private void finishBlock() throws IOException {
    if (!fsBlockWriter.isWriting() || fsBlockWriter.blockSizeWritten() == 0) return;

    long startTimeNs = System.nanoTime();
    // Update the first data block offset for scanning.
    if (firstDataBlockOffset == -1) {
      firstDataBlockOffset = outputStream.getPos();
    }
    // Update the last data block offset
    lastDataBlockOffset = outputStream.getPos();
    fsBlockWriter.writeHeaderAndData(outputStream);
    int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader();
    // Generate a shorter faked key into index block. For example, consider a block boundary
    // between the keys "the quick brown fox" and "the who test text".  We can use "the r" as the
    // key for the index block entry since it is > all entries in the previous block and <= all
    // entries in subsequent blocks.
    if (comparator instanceof KeyComparator) {
      byte[] fakeKey =
          ((KeyComparator) comparator).getShortMidpointKey(lastKeyOfPreviousBlock, firstKeyInBlock);
      if (comparator.compare(fakeKey, firstKeyInBlock) > 0) {
        throw new IOException(
            "Unexpected getShortMidpointKey result, fakeKey:"
                + Bytes.toStringBinary(fakeKey)
                + ", firstKeyInBlock:"
                + Bytes.toStringBinary(firstKeyInBlock));
      }
      if (lastKeyOfPreviousBlock != null
          && comparator.compare(lastKeyOfPreviousBlock, fakeKey) >= 0) {
        throw new IOException(
            "Unexpected getShortMidpointKey result, lastKeyOfPreviousBlock:"
                + Bytes.toStringBinary(lastKeyOfPreviousBlock)
                + ", fakeKey:"
                + Bytes.toStringBinary(fakeKey));
      }
      dataBlockIndexWriter.addEntry(fakeKey, lastDataBlockOffset, onDiskSize);
    } else {
      dataBlockIndexWriter.addEntry(firstKeyInBlock, lastDataBlockOffset, onDiskSize);
    }
    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
    HFile.offerWriteLatency(System.nanoTime() - startTimeNs);
    if (cacheConf.shouldCacheDataOnWrite()) {
      doCacheOnWrite(lastDataBlockOffset);
    }
  }
Esempio n. 2
0
  /** Gives inline block writers an opportunity to contribute blocks. */
  private void writeInlineBlocks(boolean closing) throws IOException {
    for (InlineBlockWriter ibw : inlineBlockWriters) {
      while (ibw.shouldWriteBlock(closing)) {
        long offset = outputStream.getPos();
        boolean cacheThisBlock = ibw.getCacheOnWrite();
        ibw.writeInlineBlock(fsBlockWriter.startWriting(ibw.getInlineBlockType()));
        fsBlockWriter.writeHeaderAndData(outputStream);
        ibw.blockWritten(
            offset,
            fsBlockWriter.getOnDiskSizeWithHeader(),
            fsBlockWriter.getUncompressedSizeWithoutHeader());
        totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();

        if (cacheThisBlock) {
          doCacheOnWrite(offset);
        }
      }
    }
  }
Esempio n. 3
0
  /** Clean up the current block */
  private void finishBlock() throws IOException {
    if (!fsBlockWriter.isWriting() || fsBlockWriter.blockSizeWritten() == 0) return;

    // Update the first data block offset for scanning.
    if (firstDataBlockOffset == -1) {
      firstDataBlockOffset = outputStream.getPos();
    }
    // Update the last data block offset
    lastDataBlockOffset = outputStream.getPos();
    fsBlockWriter.writeHeaderAndData(outputStream);
    int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader();

    byte[] indexKey = comparator.calcIndexKey(lastKeyOfPreviousBlock, firstKeyInBlock);
    dataBlockIndexWriter.addEntry(indexKey, lastDataBlockOffset, onDiskSize);
    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
    if (cacheConf.shouldCacheDataOnWrite()) {
      doCacheOnWrite(lastDataBlockOffset);
    }
  }
Esempio n. 4
0
 private int writeBlock(FSDataOutputStream os, HFileContext fileContext, int size)
     throws IOException {
   HFileBlock.Writer hbw = new HFileBlock.Writer(null, fileContext);
   DataOutputStream dos = hbw.startWriting(BlockType.DATA);
   for (int j = 0; j < size; j++) {
     dos.writeInt(j);
   }
   hbw.writeHeaderAndData(os);
   LOG.info(
       "Wrote a block at "
           + os.getPos()
           + " with"
           + " onDiskSizeWithHeader="
           + hbw.getOnDiskSizeWithHeader()
           + " uncompressedSizeWithoutHeader="
           + hbw.getOnDiskSizeWithoutHeader()
           + " uncompressedSizeWithoutHeader="
           + hbw.getUncompressedSizeWithoutHeader());
   return hbw.getOnDiskSizeWithHeader();
 }
Esempio n. 5
0
  @Override
  public void close() throws IOException {
    if (outputStream == null) {
      return;
    }
    // Save data block encoder metadata in the file info.
    blockEncoder.saveMetadata(this);
    // Write out the end of the data blocks, then write meta data blocks.
    // followed by fileinfo, data block index and meta block index.

    finishBlock();
    writeInlineBlocks(true);

    FixedFileTrailer trailer = new FixedFileTrailer(2, HFileReaderV2.MAX_MINOR_VERSION);

    // Write out the metadata blocks if any.
    if (!metaNames.isEmpty()) {
      for (int i = 0; i < metaNames.size(); ++i) {
        // store the beginning offset
        long offset = outputStream.getPos();
        // write the metadata content
        DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META);
        metaData.get(i).write(dos);

        fsBlockWriter.writeHeaderAndData(outputStream);
        totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();

        // Add the new meta block to the meta index.
        metaBlockIndexWriter.addEntry(
            metaNames.get(i), offset, fsBlockWriter.getOnDiskSizeWithHeader());
      }
    }

    // Load-on-open section.

    // Data block index.
    //
    // In version 2, this section of the file starts with the root level data
    // block index. We call a function that writes intermediate-level blocks
    // first, then root level, and returns the offset of the root level block
    // index.

    long rootIndexOffset = dataBlockIndexWriter.writeIndexBlocks(outputStream);
    trailer.setLoadOnOpenOffset(rootIndexOffset);

    // Meta block index.
    metaBlockIndexWriter.writeSingleLevelIndex(
        fsBlockWriter.startWriting(BlockType.ROOT_INDEX), "meta");
    fsBlockWriter.writeHeaderAndData(outputStream);
    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();

    if (this.includeMemstoreTS) {
      appendFileInfo(MAX_MEMSTORE_TS_KEY, Bytes.toBytes(maxMemstoreTS));
      appendFileInfo(KEY_VALUE_VERSION, Bytes.toBytes(KEY_VALUE_VER_WITH_MEMSTORE));
    }

    // File info
    writeFileInfo(trailer, fsBlockWriter.startWriting(BlockType.FILE_INFO));
    fsBlockWriter.writeHeaderAndData(outputStream);
    totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();

    // Load-on-open data supplied by higher levels, e.g. Bloom filters.
    for (BlockWritable w : additionalLoadOnOpenData) {
      fsBlockWriter.writeBlock(w, outputStream);
      totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
    }

    // Now finish off the trailer.
    trailer.setNumDataIndexLevels(dataBlockIndexWriter.getNumLevels());
    trailer.setUncompressedDataIndexSize(dataBlockIndexWriter.getTotalUncompressedSize());
    trailer.setFirstDataBlockOffset(firstDataBlockOffset);
    trailer.setLastDataBlockOffset(lastDataBlockOffset);
    trailer.setComparatorClass(comparator.getClass());
    trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries());

    finishClose(trailer);

    fsBlockWriter.release();
  }