/**
  * Get blocks in the specified range. Includes only the complete blocks. Fetch them from the
  * namenode if not cached.
  */
 private synchronized List<LocatedBlock> getFinalizedBlockRange(long offset, long length)
     throws IOException {
   assert (locatedBlocks != null) : "locatedBlocks is null";
   List<LocatedBlock> blockRange = new ArrayList<LocatedBlock>();
   // search cached blocks first
   int blockIdx = locatedBlocks.findBlock(offset);
   if (blockIdx < 0) { // block is not cached
     blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
   }
   long remaining = length;
   long curOff = offset;
   while (remaining > 0) {
     LocatedBlock blk = null;
     if (blockIdx < locatedBlocks.locatedBlockCount()) blk = locatedBlocks.get(blockIdx);
     if (blk == null || curOff < blk.getStartOffset()) {
       LocatedBlocks newBlocks;
       newBlocks = DFSClient.callGetBlockLocations(dfsClient.namenode, src, curOff, remaining);
       locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
       continue;
     }
     assert curOff >= blk.getStartOffset() : "Block not found";
     blockRange.add(blk);
     long bytesRead = blk.getStartOffset() + blk.getBlockSize() - curOff;
     remaining -= bytesRead;
     curOff += bytesRead;
     blockIdx++;
   }
   return blockRange;
 }
  /**
   * TC11: Racing rename
   *
   * @throws IOException an exception might be thrown
   */
  public void testTC11() throws Exception {
    final Path p = new Path("/TC11/foo");
    System.out.println("p=" + p);

    // a. Create file and write one block of data. Close file.
    final int len1 = (int) BLOCK_SIZE;
    {
      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
      AppendTestUtil.write(out, 0, len1);
      out.close();
    }

    // b. Reopen file in "append" mode. Append half block of data.
    FSDataOutputStream out = fs.append(p);
    final int len2 = (int) BLOCK_SIZE / 2;
    AppendTestUtil.write(out, len1, len2);
    out.hflush();

    // c. Rename file to file.new.
    final Path pnew = new Path(p + ".new");
    assertTrue(fs.rename(p, pnew));

    // d. Close file handle that was opened in (b).
    try {
      out.close();
      fail("close() should throw an exception");
    } catch (Exception e) {
      AppendTestUtil.LOG.info("GOOD!", e);
    }

    // wait for the lease recovery
    cluster.setLeasePeriod(1000, 1000);
    AppendTestUtil.sleep(5000);

    // check block sizes
    final long len = fs.getFileStatus(pnew).getLen();
    final LocatedBlocks locatedblocks =
        fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len);
    final int numblock = locatedblocks.locatedBlockCount();
    for (int i = 0; i < numblock; i++) {
      final LocatedBlock lb = locatedblocks.get(i);
      final Block blk = lb.getBlock();
      final long size = lb.getBlockSize();
      if (i < numblock - 1) {
        assertEquals(BLOCK_SIZE, size);
      }
      for (DatanodeInfo datanodeinfo : lb.getLocations()) {
        final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
        final Block metainfo = dn.data.getStoredBlock(blk.getBlockId());
        assertEquals(size, metainfo.getNumBytes());
      }
    }
  }
  private BlockInfo createBlockInfo(Path file, LocatedBlock b) {
    DatanodeInfo[] locations = b.getLocations();
    String[] hosts = new String[locations.length];
    String[] names = new String[locations.length];
    for (int i = 0; i < locations.length; ++i) {
      DatanodeInfo d = locations[i];
      hosts[i] = d.getHost();
      names[i] = d.getName();
    }

    BlockLocation loc = new BlockLocation(names, hosts, b.getStartOffset(), b.getBlockSize());
    return new BlockInfo(loc, file);
  }
  /**
   * Read bytes starting from the specified position.
   *
   * @param position start read from this position
   * @param buffer read buffer
   * @param offset offset into buffer
   * @param length number of bytes to read
   * @return actual number of bytes read
   */
  @Override
  public int read(long position, byte[] buffer, int offset, int length) throws IOException {
    // sanity checks
    dfsClient.checkOpen();
    if (closed) {
      throw new IOException("Stream closed");
    }
    failures = 0;
    long filelen = getFileLength();
    if ((position < 0) || (position >= filelen)) {
      return -1;
    }
    int realLen = length;
    if ((position + length) > filelen) {
      realLen = (int) (filelen - position);
    }

    // determine the block and byte range within the block
    // corresponding to position and realLen
    List<LocatedBlock> blockRange = getBlockRange(position, realLen);
    int remaining = realLen;
    Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap =
        new HashMap<ExtendedBlock, Set<DatanodeInfo>>();
    for (LocatedBlock blk : blockRange) {
      long targetStart = position - blk.getStartOffset();
      long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart);
      try {
        fetchBlockByteRange(
            blk, targetStart, targetStart + bytesToRead - 1, buffer, offset, corruptedBlockMap);
      } finally {
        // Check and report if any block replicas are corrupted.
        // BlockMissingException may be caught if all block replicas are
        // corrupted.
        reportCheckSumFailure(corruptedBlockMap, blk.getLocations().length);
      }

      remaining -= bytesToRead;
      position += bytesToRead;
      offset += bytesToRead;
    }
    assert remaining == 0 : "Wrong number of bytes read.";
    if (dfsClient.stats != null) {
      dfsClient.stats.incrementBytesRead(realLen);
    }
    return realLen;
  }
  /**
   * TC7: Corrupted replicas are present.
   *
   * @throws IOException an exception might be thrown
   */
  public void testTC7() throws Exception {
    final short repl = 2;
    final Path p = new Path("/TC7/foo");
    System.out.println("p=" + p);

    // a. Create file with replication factor of 2. Write half block of data. Close file.
    final int len1 = (int) (BLOCK_SIZE / 2);
    {
      FSDataOutputStream out = fs.create(p, false, buffersize, repl, BLOCK_SIZE);
      AppendTestUtil.write(out, 0, len1);
      out.close();
    }
    DFSTestUtil.waitReplication(fs, p, repl);

    // b. Log into one datanode that has one replica of this block.
    //   Find the block file on this datanode and truncate it to zero size.
    final LocatedBlocks locatedblocks =
        fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
    assertEquals(1, locatedblocks.locatedBlockCount());
    final LocatedBlock lb = locatedblocks.get(0);
    final Block blk = lb.getBlock();
    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final FSDataset data = (FSDataset) dn.getFSDataset();
    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();

    // c. Open file in "append mode".  Append a new block worth of data. Close file.
    final int len2 = (int) BLOCK_SIZE;
    {
      FSDataOutputStream out = fs.append(p);
      AppendTestUtil.write(out, len1, len2);
      out.close();
    }

    // d. Reopen file and read two blocks worth of data.
    AppendTestUtil.check(fs, p, len1 + len2);
  }
  /**
   * Get block at the specified position. Fetch it from the namenode if not cached.
   *
   * @param offset
   * @param updatePosition whether to update current position
   * @return located block
   * @throws IOException
   */
  private synchronized LocatedBlock getBlockAt(long offset, boolean updatePosition)
      throws IOException {
    assert (locatedBlocks != null) : "locatedBlocks is null";

    final LocatedBlock blk;

    // check offset
    if (offset < 0 || offset >= getFileLength()) {
      throw new IOException(
          "offset < 0 || offset > getFileLength(), offset="
              + offset
              + ", updatePosition="
              + updatePosition
              + ", locatedBlocks="
              + locatedBlocks);
    } else if (offset >= locatedBlocks.getFileLength()) {
      // offset to the portion of the last block,
      // which is not known to the name-node yet;
      // getting the last block
      blk = locatedBlocks.getLastLocatedBlock();
    } else {
      // search cached blocks first
      int targetBlockIdx = locatedBlocks.findBlock(offset);
      if (targetBlockIdx < 0) { // block is not cached
        targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
        // fetch more blocks
        LocatedBlocks newBlocks;
        newBlocks = DFSClient.callGetBlockLocations(dfsClient.namenode, src, offset, prefetchSize);
        assert (newBlocks != null) : "Could not find target position " + offset;
        locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
      }
      blk = locatedBlocks.get(targetBlockIdx);
    }

    // update current position
    if (updatePosition) {
      pos = offset;
      blockEnd = blk.getStartOffset() + blk.getBlockSize() - 1;
      currentLocatedBlock = blk;
    }
    return blk;
  }
  /** removes a block from the har part file */
  private void removeHarParityBlock(int block) throws IOException {
    Path harPath = new Path(RAID_PATH, HAR_NAME);
    FileStatus[] listPaths = dfs.listStatus(harPath);

    boolean deleted = false;

    for (FileStatus f : listPaths) {
      if (f.getPath().getName().startsWith("part-")) {
        final Path partPath = new Path(f.getPath().toUri().getPath());
        final LocatedBlocks partBlocks =
            dfs.getClient().namenode.getBlockLocations(partPath.toString(), 0, f.getLen());

        if (partBlocks.locatedBlockCount() <= block) {
          throw new IOException("invalid har block " + block);
        }

        final LocatedBlock partBlock = partBlocks.get(block);
        removeAndReportBlock(dfs, partPath, partBlock);
        LOG.info(
            "removed block "
                + block
                + "/"
                + partBlocks.locatedBlockCount()
                + " of file "
                + partPath.toString()
                + " block size "
                + partBlock.getBlockSize());
        deleted = true;
        break;
      }
    }

    if (!deleted) {
      throw new IOException("cannot find part file in " + harPath.toString());
    }
  }