File addBlock(Block b, File f) throws IOException {
   File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
   if (!blockDir.exists()) {
     if (!blockDir.mkdirs()) {
       throw new IOException("Failed to mkdirs " + blockDir);
     }
   }
   File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
   File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
   dfsUsage.incDfsUsed(b.getNumBytes() + metaFile.length());
   return blockFile;
 }
  /**
   * Find out the number of bytes in the block that match its crc.
   *
   * <p>This algorithm assumes that data corruption caused by unexpected datanode shutdown occurs
   * only in the last crc chunk. So it checks only the last chunk.
   *
   * @param blockFile the block file
   * @param genStamp generation stamp of the block
   * @return the number of valid bytes
   */
  private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
    DataInputStream checksumIn = null;
    InputStream blockIn = null;
    try {
      final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
      long blockFileLen = blockFile.length();
      long metaFileLen = metaFile.length();
      int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
      if (!blockFile.exists()
          || blockFileLen == 0
          || !metaFile.exists()
          || metaFileLen < crcHeaderLen) {
        return 0;
      }
      checksumIn =
          new DataInputStream(
              new BufferedInputStream(
                  new FileInputStream(metaFile), HdfsConstants.IO_FILE_BUFFER_SIZE));

      // read and handle the common header here. For now just a version
      final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(checksumIn, metaFile);
      int bytesPerChecksum = checksum.getBytesPerChecksum();
      int checksumSize = checksum.getChecksumSize();
      long numChunks =
          Math.min(
              (blockFileLen + bytesPerChecksum - 1) / bytesPerChecksum,
              (metaFileLen - crcHeaderLen) / checksumSize);
      if (numChunks == 0) {
        return 0;
      }
      IOUtils.skipFully(checksumIn, (numChunks - 1) * checksumSize);
      blockIn = new FileInputStream(blockFile);
      long lastChunkStartPos = (numChunks - 1) * bytesPerChecksum;
      IOUtils.skipFully(blockIn, lastChunkStartPos);
      int lastChunkSize = (int) Math.min(bytesPerChecksum, blockFileLen - lastChunkStartPos);
      byte[] buf = new byte[lastChunkSize + checksumSize];
      checksumIn.readFully(buf, lastChunkSize, checksumSize);
      IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

      checksum.update(buf, 0, lastChunkSize);
      long validFileLength;
      if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
        validFileLength = lastChunkStartPos + lastChunkSize;
      } else { // last chunck is corrupt
        validFileLength = lastChunkStartPos;
      }

      // truncate if extra bytes are present without CRC
      if (blockFile.length() > validFileLength) {
        RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
        try {
          // truncate blockFile
          blockRAF.setLength(validFileLength);
        } finally {
          blockRAF.close();
        }
      }

      return validFileLength;
    } catch (IOException e) {
      FsDatasetImpl.LOG.warn(e);
      return 0;
    } finally {
      IOUtils.closeStream(checksumIn);
      IOUtils.closeStream(blockIn);
    }
  }