예제 #1
0
 /*
  * This test attempts to upgrade the datanode from federation
  * version -35 to upper version
  * This test is for non-federation cluster with single namenode
  */
 public void testNonFederationClusterUpgradeAfterFederationVersion() throws Exception {
   File[] baseDirs;
   UpgradeUtilities.initialize();
   for (int numDirs = 1; numDirs <= 2; numDirs++) {
     conf = new Configuration();
     conf.setInt("dfs.datanode.scan.period.hours", -1);
     conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
     String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
     String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
     log("DataNode upgrade with federation layout version in current", numDirs);
     UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
     try {
       cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createVersionFile(
           DATA_NODE,
           baseDirs,
           new StorageInfo(
               FSConstants.FEDERATION_VERSION,
               UpgradeUtilities.getCurrentNamespaceID(cluster),
               UpgradeUtilities.getCurrentFsscTime(cluster)),
           cluster.getNameNode().getNamespaceID());
       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
       checkResult(DATA_NODE, dataNodeDirs, 0, false);
     } finally {
       if (cluster != null) cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
     }
   }
 }
  /** Test the updation of NeededReplications for the Appended Block */
  @Test(timeout = 60000)
  public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    DistributedFileSystem fileSystem = null;
    try {
      // create a file.
      fileSystem = cluster.getFileSystem();
      Path f = new Path("/testAppend");
      FSDataOutputStream create = fileSystem.create(f, (short) 2);
      create.write("/testAppend".getBytes());
      create.close();

      // Append to the file.
      FSDataOutputStream append = fileSystem.append(f);
      append.write("/testAppend".getBytes());
      append.close();

      // Start a new datanode
      cluster.startDataNodes(conf, 1, true, null, null);

      // Check for replications
      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
    } finally {
      if (null != fileSystem) {
        fileSystem.close();
      }
      cluster.shutdown();
    }
  }
예제 #3
0
 /**
  * Attempts to start a DataNode with the given operation. Starting the DataNode should throw an
  * exception.
  */
 void startDataNodeShouldFail(StartupOption operation) {
   try {
     cluster.startDataNodes(conf, 1, false, operation, null); // should fail
     throw new AssertionError("DataNode should have failed to start");
   } catch (Exception expected) {
     // expected
     assertFalse(cluster.isDataNodeUp());
   }
 }
예제 #4
0
  /*
   * This test attempts to upgrade the datanode from federation version -35 to
   * upper version This test is for federation cluster with 2 namenodes. It
   * changes the layout version and ctime.
   */
  public void testFederationClusterUpgradeAfterFederationVersionWithCTimeChange() throws Exception {
    File[] baseDirs;
    Configuration baseConf = new Configuration();
    UpgradeUtilities.initialize(2, baseConf, true);
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
      conf = new Configuration();
      conf.setInt("dfs.datanode.scan.period.hours", -1);
      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
      log("DataNode upgrade with federation layout version in current and ctime change", numDirs);
      UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs);
      conf.set(
          FSConstants.DFS_FEDERATION_NAMESERVICES,
          baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES));
      try {
        cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2);
        baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
        for (int i = 0; i < 2; i++) {
          UpgradeUtilities.createVersionFile(
              DATA_NODE,
              baseDirs,
              new StorageInfo(
                  FSConstants.FEDERATION_VERSION,
                  cluster.getNameNode(i).getNamespaceID(),
                  cluster.getNameNode(i).versionRequest().getCTime() - 1),
              cluster.getNameNode(i).getNamespaceID());
        }
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);

        for (int i = 0; i < 2; i++) {
          checkResult(DATA_NODE, dataNodeDirs, i, false);
        }
      } finally {
        if (cluster != null) cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
      }
    }
  }
예제 #5
0
  /**
   * This test attempts to upgrade the NameNode and DataNode under a number of valid and invalid
   * conditions.
   */
  public void testUpgrade() throws Exception {
    File[] baseDirs;
    UpgradeUtilities.initialize();
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
      conf = new Configuration();
      conf.setInt("dfs.datanode.scan.period.hours", -1);
      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");

      log("Normal NameNode upgrade", numDirs);
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
      checkResult(NAME_NODE, nameNodeDirs);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);

      log("Normal DataNode upgrade", numDirs);
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkResult(DATA_NODE, dataNodeDirs);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("NameNode upgrade with existing previous dir", numDirs);
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);

      log("DataNode upgrade with existing previous dir", numDirs);
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
      startDataNodeShouldFail(StartupOption.REGULAR);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("DataNode upgrade with future stored layout version in current", numDirs);
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createVersionFile(
          DATA_NODE,
          baseDirs,
          new StorageInfo(
              Integer.MIN_VALUE,
              UpgradeUtilities.getCurrentNamespaceID(cluster),
              UpgradeUtilities.getCurrentFsscTime(cluster)),
          cluster.getNameNode().getNamespaceID());
      startDataNodeShouldFail(StartupOption.REGULAR);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("DataNode upgrade with newer fsscTime in current", numDirs);
      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
      UpgradeUtilities.createVersionFile(
          DATA_NODE,
          baseDirs,
          new StorageInfo(
              UpgradeUtilities.getCurrentLayoutVersion(),
              UpgradeUtilities.getCurrentNamespaceID(cluster),
              Long.MAX_VALUE),
          cluster.getNameNode().getNamespaceID());
      startDataNodeShouldFail(StartupOption.REGULAR);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("NameNode upgrade with no edits file", numDirs);
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      for (File f : baseDirs) {
        FileUtil.fullyDelete(new File(f, "edits"));
      }
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);

      log("NameNode upgrade with no image file", numDirs);
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      for (File f : baseDirs) {
        FileUtil.fullyDelete(new File(f, "fsimage"));
      }
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);

      log("NameNode upgrade with corrupt version file", numDirs);
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      for (File f : baseDirs) {
        UpgradeUtilities.corruptFile(new File(f, "VERSION"));
      }
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);

      log("NameNode upgrade with old layout version in current", numDirs);
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createVersionFile(
          NAME_NODE,
          baseDirs,
          new StorageInfo(
              Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
              UpgradeUtilities.getCurrentNamespaceID(null),
              UpgradeUtilities.getCurrentFsscTime(null)),
          0);
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);

      log("NameNode upgrade with future layout version in current", numDirs);
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createVersionFile(
          NAME_NODE,
          baseDirs,
          new StorageInfo(
              Integer.MIN_VALUE,
              UpgradeUtilities.getCurrentNamespaceID(null),
              UpgradeUtilities.getCurrentFsscTime(null)),
          0);
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);

      log("Normal Datanode upgrade after datanode format", numDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
      UpgradeUtilities.createVersionFile(
          NAME_NODE,
          baseDirs,
          new StorageInfo(
              FSConstants.LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(null), 0),
          0);
      cluster =
          new MiniDFSCluster(
              0,
              conf,
              1,
              false,
              false,
              false,
              StartupOption.UPGRADE,
              null,
              null,
              null,
              false,
              false,
              1,
              false);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    } // end numDir loop
  }
예제 #6
0
  public void testFederationClusterUpgradeAfterFederationVersionWithTopLevelLayout()
      throws Exception {
    File[] baseDirs;
    Configuration baseConf = new Configuration();
    UpgradeUtilities.initialize(2, baseConf, true);
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
      conf = new Configuration();
      conf.setInt("dfs.datanode.scan.period.hours", -1);
      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
      log(
          "DataNode upgrade with federation layout version in current and no ns level layout version",
          numDirs);
      UpgradeUtilities.createFederatedNameNodeStorageDirs(nameNodeDirs);
      conf.set(
          FSConstants.DFS_FEDERATION_NAMESERVICES,
          baseConf.get(FSConstants.DFS_FEDERATION_NAMESERVICES));
      try {
        cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE, false, 2);
        baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
        for (int i = 0; i < 2; i++) {
          UpgradeUtilities.createVersionFile(
              DATA_NODE,
              baseDirs,
              new StorageInfo(
                  FSConstants.FEDERATION_VERSION,
                  cluster.getNameNode(i).getNamespaceID(),
                  cluster.getNameNode(i).versionRequest().getCTime()),
              cluster.getNameNode(i).getNamespaceID(),
              false);
        }
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);

        for (int i = 0; i < 2; i++) {
          checkResult(DATA_NODE, dataNodeDirs, i, false);
        }

        // Finalize upgrade.
        for (int i = 0; i < 2; i++) {
          cluster.getNameNode(i).finalizeUpgrade();
        }
        cluster.restartDataNodes();

        // Wait for datanodes to finalize.
        Thread.sleep(10000);

        for (int nnIndex = 0; nnIndex < 2; nnIndex++) {
          for (int i = 0; i < dataNodeDirs.length; i++) {
            File nsBaseDir =
                NameSpaceSliceStorage.getNsRoot(
                    cluster.getNameNode(nnIndex).getNamespaceID(),
                    new File(dataNodeDirs[i], "current"));
            assertFalse(new File(nsBaseDir, "previous").exists());
          }
        }
      } finally {
        if (cluster != null) cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
      }
    }
  }
예제 #7
0
  public static void main(String[] args) {
    int numDataNodes = 0;
    int numRacks = 0;
    boolean inject = false;
    long startingBlockId = 1;
    int numBlocksPerDNtoInject = 0;
    int replication = 1;

    Configuration conf = new HdfsConfiguration();

    for (int i = 0; i < args.length; i++) { // parse command line
      if (args[i].equals("-n")) {
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("missing number of nodes");
        }
        numDataNodes = Integer.parseInt(args[i]);
      } else if (args[i].equals("-racks")) {
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing number of racks");
        }
        numRacks = Integer.parseInt(args[i]);
      } else if (args[i].equals("-r")) {
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing replicaiton factor");
        }
        replication = Integer.parseInt(args[i]);
      } else if (args[i].equals("-d")) {
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing datanode dirs parameter");
        }
        dataNodeDirs = args[i];
      } else if (args[i].equals("-simulated")) {
        conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
      } else if (args[i].equals("-inject")) {
        if (!conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, false)) {
          System.out.print("-inject is valid only for simulated");
          printUsageExit();
        }
        inject = true;
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing starting block and number of blocks per DN to inject");
        }
        startingBlockId = Integer.parseInt(args[i]);
        if (++i >= args.length || args[i].startsWith("-")) {
          printUsageExit("Missing number of blocks to inject");
        }
        numBlocksPerDNtoInject = Integer.parseInt(args[i]);
      } else {
        printUsageExit();
      }
    }
    if (numDataNodes <= 0 || replication <= 0) {
      printUsageExit("numDataNodes and replication have to be greater than zero");
    }
    if (replication > numDataNodes) {
      printUsageExit("Replication must be less than or equal to numDataNodes");
    }
    String nameNodeAdr = FileSystem.getDefaultUri(conf).getAuthority();
    if (nameNodeAdr == null) {
      System.out.println("No name node address and port in config");
      System.exit(-1);
    }
    boolean simulated = conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, false);
    System.out.println(
        "Starting "
            + numDataNodes
            + (simulated ? " Simulated " : " ")
            + " Data Nodes that will connect to Name Node at "
            + nameNodeAdr);

    System.setProperty("test.build.data", dataNodeDirs);

    MiniDFSCluster mc = new MiniDFSCluster();
    try {
      mc.formatDataNodeDirs();
    } catch (IOException e) {
      System.out.println("Error formating data node dirs:" + e);
    }

    String[] rack4DataNode = null;
    if (numRacks > 0) {
      System.out.println("Using " + numRacks + " racks: ");
      String rackPrefix = getUniqueRackPrefix();

      rack4DataNode = new String[numDataNodes];
      for (int i = 0; i < numDataNodes; ++i) {
        // rack4DataNode[i] = racks[i%numRacks];
        rack4DataNode[i] = rackPrefix + "-" + i % numRacks;
        System.out.println("Data Node " + i + " using " + rack4DataNode[i]);
      }
    }
    try {
      mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR, rack4DataNode);
      if (inject) {
        long blockSize = 10;
        System.out.println(
            "Injecting "
                + numBlocksPerDNtoInject
                + " blocks in each DN starting at blockId "
                + startingBlockId
                + " with blocksize of "
                + blockSize);
        Block[] blocks = new Block[numBlocksPerDNtoInject];
        long blkid = startingBlockId;
        for (int i_dn = 0; i_dn < numDataNodes; ++i_dn) {
          for (int i = 0; i < blocks.length; ++i) {
            blocks[i] = new Block(blkid++, blockSize, CreateEditsLog.BLOCK_GENERATION_STAMP);
          }
          for (int i = 1; i <= replication; ++i) {
            // inject blocks for dn_i into dn_i and replica in dn_i's neighbors
            mc.injectBlocks((i_dn + i - 1) % numDataNodes, Arrays.asList(blocks));
            System.out.println(
                "Injecting blocks of dn " + i_dn + " into dn" + ((i_dn + i - 1) % numDataNodes));
          }
        }
        System.out.println("Created blocks from Bids " + startingBlockId + " to " + (blkid - 1));
      }

    } catch (IOException e) {
      System.out.println("Error creating data node:" + e);
    }
  }
  /** Test replace datanode on failure. */
  @Test
  public void testReplaceDatanodeOnFailure() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // do not consider load factor when selecting a data node
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
    // always replace a datanode
    ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);

    final String[] racks = new String[REPLICATION];
    Arrays.fill(racks, RACK0);
    final MiniDFSCluster cluster =
        new MiniDFSCluster.Builder(conf).racks(racks).numDataNodes(REPLICATION).build();

    try {
      cluster.waitActive();
      final DistributedFileSystem fs = cluster.getFileSystem();
      final Path dir = new Path(DIR);
      final int NUM_WRITERS = 10;
      final int FIRST_BATCH = 5;
      final SlowWriter[] slowwriters = new SlowWriter[NUM_WRITERS];
      for (int i = 1; i <= slowwriters.length; i++) {
        // create slow writers in different speed
        slowwriters[i - 1] = new SlowWriter(fs, new Path(dir, "file" + i), i * 200L);
      }

      for (int i = 0; i < FIRST_BATCH; i++) {
        slowwriters[i].start();
      }

      // Let slow writers write something.
      // Some of them are too slow and will be not yet started.
      sleepSeconds(3);

      // start new datanodes
      cluster.startDataNodes(conf, 2, true, null, new String[] {RACK1, RACK1});
      cluster.waitActive();
      // wait for first block reports for up to 10 seconds
      cluster.waitFirstBRCompleted(0, 10000);

      // stop an old datanode
      MiniDFSCluster.DataNodeProperties dnprop =
          cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));

      for (int i = FIRST_BATCH; i < slowwriters.length; i++) {
        slowwriters[i].start();
      }

      waitForBlockReplication(slowwriters);

      // check replication and interrupt.
      for (SlowWriter s : slowwriters) {
        s.checkReplication();
        s.interruptRunning();
      }

      // close files
      for (SlowWriter s : slowwriters) {
        s.joinAndClose();
      }

      // Verify the file
      LOG.info("Verify the file");
      for (int i = 0; i < slowwriters.length; i++) {
        LOG.info(
            slowwriters[i].filepath
                + ": length="
                + fs.getFileStatus(slowwriters[i].filepath).getLen());
        FSDataInputStream in = null;
        try {
          in = fs.open(slowwriters[i].filepath);
          for (int j = 0, x; (x = in.read()) != -1; j++) {
            Assert.assertEquals(j, x);
          }
        } finally {
          IOUtils.closeStream(in);
        }
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
예제 #9
0
  /**
   * Test the case that a replica is reported corrupt while it is not in blocksMap. Make sure that
   * ArrayIndexOutOfBounds does not thrown. See Hadoop-4351.
   *
   * <p>TODO HOPS This test fails as it tries to remove a non-existing replica. Calling
   * findAndMarkBlockAsCorrupt from a DataNode that does not store any replica for this specific
   * block will lead to a tuple did not exist exception. The reason for this is that
   * BlockManager.removeStoredBlock is called with a node that does not store a replica and hence
   * the delete will not be able to succeed during commit.
   */
  @Test
  public void testArrayOutOfBoundsException() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new HdfsConfiguration();
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      final Path FILE_PATH = new Path("/tmp.txt");
      final long FILE_LEN = 1L;
      DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short) 2, 1L);

      // get the block
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      File storageDir = cluster.getInstanceStorageDir(0, 0);
      File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      assertTrue("Data directory does not exist", dataDir.exists());
      ExtendedBlock blk = getBlock(bpid, dataDir);
      if (blk == null) {
        storageDir = cluster.getInstanceStorageDir(0, 1);
        dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        blk = getBlock(bpid, dataDir);
      }
      assertFalse(
          "Data directory does not contain any blocks or there was an " + "IO error", blk == null);

      // start a third datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      assertEquals(datanodes.size(), 3);
      DataNode dataNode = datanodes.get(2);

      // report corrupted block by the third datanode
      DatanodeRegistration dnR =
          DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());

      // Get the storage id of one of the storages on the datanode
      String storageId =
          cluster
              .getNamesystem()
              .getBlockManager()
              .getDatanodeManager()
              .getDatanode(dataNode.getDatanodeId())
              .getStorageInfos()[0]
              .getStorageID();

      cluster
          .getNamesystem()
          .getBlockManager()
          .findAndMarkBlockAsCorrupt(blk, new DatanodeInfo(dnR), storageId, "some test reason");

      // open the file
      fs.open(FILE_PATH);

      // clean up
      fs.delete(FILE_PATH, false);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }