/** check if DFS can handle corrupted blocks properly */
 @Test
 public void testFileCorruption() throws Exception {
   MiniDFSCluster cluster = null;
   DFSTestUtil util =
       new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build();
   try {
     Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fs = cluster.getFileSystem();
     util.createFiles(fs, "/srcdat");
     // Now deliberately remove the blocks
     File storageDir = cluster.getInstanceStorageDir(2, 0);
     String bpid = cluster.getNamesystem().getBlockPoolId();
     File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
     assertTrue("data directory does not exist", data_dir.exists());
     File[] blocks = data_dir.listFiles();
     assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
     for (int idx = 0; idx < blocks.length; idx++) {
       if (!blocks[idx].getName().startsWith("blk_")) {
         continue;
       }
       System.out.println("Deliberately removing file " + blocks[idx].getName());
       assertTrue("Cannot remove file.", blocks[idx].delete());
     }
     assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
     util.cleanup(fs, "/srcdat");
   } finally {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 }
  /**
   * Test the case that a replica is reported corrupt while it is not in blocksMap. Make sure that
   * ArrayIndexOutOfBounds does not thrown. See Hadoop-4351.
   *
   * <p>TODO HOPS This test fails as it tries to remove a non-existing replica. Calling
   * findAndMarkBlockAsCorrupt from a DataNode that does not store any replica for this specific
   * block will lead to a tuple did not exist exception. The reason for this is that
   * BlockManager.removeStoredBlock is called with a node that does not store a replica and hence
   * the delete will not be able to succeed during commit.
   */
  @Test
  public void testArrayOutOfBoundsException() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new HdfsConfiguration();
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      final Path FILE_PATH = new Path("/tmp.txt");
      final long FILE_LEN = 1L;
      DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short) 2, 1L);

      // get the block
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      File storageDir = cluster.getInstanceStorageDir(0, 0);
      File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      assertTrue("Data directory does not exist", dataDir.exists());
      ExtendedBlock blk = getBlock(bpid, dataDir);
      if (blk == null) {
        storageDir = cluster.getInstanceStorageDir(0, 1);
        dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        blk = getBlock(bpid, dataDir);
      }
      assertFalse(
          "Data directory does not contain any blocks or there was an " + "IO error", blk == null);

      // start a third datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      assertEquals(datanodes.size(), 3);
      DataNode dataNode = datanodes.get(2);

      // report corrupted block by the third datanode
      DatanodeRegistration dnR =
          DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());

      // Get the storage id of one of the storages on the datanode
      String storageId =
          cluster
              .getNamesystem()
              .getBlockManager()
              .getDatanodeManager()
              .getDatanode(dataNode.getDatanodeId())
              .getStorageInfos()[0]
              .getStorageID();

      cluster
          .getNamesystem()
          .getBlockManager()
          .findAndMarkBlockAsCorrupt(blk, new DatanodeInfo(dnR), storageId, "some test reason");

      // open the file
      fs.open(FILE_PATH);

      // clean up
      fs.delete(FILE_PATH, false);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }