private void corruptFiles(
      Path dirPath,
      long[] crcs,
      Integer[] corruptBlockIdxs,
      DistributedFileSystem dfs,
      Path[] files,
      int[] numCorruptBlocksInFiles)
      throws IOException {
    int totalCorruptFiles = DFSUtil.getCorruptFiles(dfs).length;
    TestDirectoryRaidDfs.corruptBlocksInDirectory(
        conf, dirPath, crcs, corruptBlockIdxs, fileSys, dfsCluster, false, true);

    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    for (int i = 0; i < numCorruptBlocksInFiles.length; i++) {
      if (numCorruptBlocksInFiles[i] > 0) totalCorruptFiles++;
    }
    assertEquals("files not corrupted", totalCorruptFiles, corruptFiles.length);
    for (int i = 0; i < fileSizes.length; i++) {
      assertEquals(
          "wrong number of corrupt blocks for file " + files[i],
          numCorruptBlocksInFiles[i],
          RaidDFSUtil.corruptBlocksInFile(dfs, files[i].toUri().getPath(), 0, fileSizes[i]).size());
    }
  }
  public void testMultiplePriorities() throws Exception {
    long[] crcs = new long[3];
    int[] seeds = new int[3];
    Path dirPath = new Path("/home/test");
    int stripeLength = 3;
    short repl = 1;
    mySetup(stripeLength);
    Codec codec = Codec.getCodec("rs");
    LOG.info("Starting testMultiplePriorities");
    try {
      // Create test file and raid it.
      Path[] files =
          TestRaidDfs.createTestFiles(
              dirPath, fileSizes, blockSizes, crcs, seeds, fileSys, (short) 1);
      FileStatus stat = fileSys.getFileStatus(dirPath);
      RaidNode.doRaid(
          conf,
          stat,
          new Path(codec.parityDirectory),
          codec,
          new RaidNode.Statistics(),
          RaidUtils.NULL_PROGRESSABLE,
          false,
          repl,
          repl);

      Integer[] corruptBlockIdxs = new Integer[] {0, 2};
      LOG.info("Corrupt block " + corruptBlockIdxs + " of directory " + dirPath);
      TestDirectoryRaidDfs.corruptBlocksInDirectory(
          conf, dirPath, crcs, corruptBlockIdxs, fileSys, dfsCluster, false, true);

      // Create Block Fixer and fix.
      FakeDistBlockIntegrityMonitor distBlockFixer = new FakeDistBlockIntegrityMonitor(conf);
      assertEquals(0, distBlockFixer.submittedJobs.size());
      // waiting for one job to submit
      long startTime = System.currentTimeMillis();
      while (System.currentTimeMillis() - startTime < 120000
          && distBlockFixer.submittedJobs.size() == 0) {
        distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
        LOG.info("Waiting for jobs to submit");
        Thread.sleep(10000);
      }
      int submittedJob = distBlockFixer.submittedJobs.size();
      LOG.info("Already Submitted " + submittedJob + " jobs");
      assertTrue("Should submit more than 1 jobs", submittedJob >= 1);

      // Corrupt two more blocks
      corruptBlockIdxs = new Integer[] {4, 5};
      LOG.info("Corrupt block " + corruptBlockIdxs + " of directory " + dirPath);
      TestDirectoryRaidDfs.corruptBlocksInDirectory(
          conf, dirPath, crcs, corruptBlockIdxs, fileSys, dfsCluster, false, true);

      // A new job should be submitted since two blocks are corrupt.
      startTime = System.currentTimeMillis();
      while (System.currentTimeMillis() - startTime < 120000
          && distBlockFixer.submittedJobs.size() == submittedJob) {
        distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
        LOG.info("Waiting for more jobs to submit");
        Thread.sleep(10000);
      }
      LOG.info("Already Submitted " + distBlockFixer.submittedJobs.size() + " jobs");
      assertTrue(
          "should submit more than 1 jobs",
          distBlockFixer.submittedJobs.size() - submittedJob >= 1);
    } finally {
      myTearDown();
    }
  }
  /**
   * Tests integrity of generated block. Create a file and delete a block entirely. Wait for the
   * block to be regenerated. Now stop RaidNode and corrupt the generated block. Test that
   * corruption in the generated block can be detected by clients.
   */
  private void generatedBlockTestCommon(String testName, int blockToCorrupt, boolean local)
      throws Exception {
    LOG.info("Test " + testName + " started.");
    int stripeLength = 3;
    mySetup(stripeLength);
    long[] crcs = new long[3];
    int[] seeds = new int[3];
    Path dirPath = new Path("/user/dhruba/raidtest");
    Path[] files =
        TestRaidDfs.createTestFiles(
            dirPath, fileSizes, blockSizes, crcs, seeds, fileSys, (short) 1);
    Path destPath = new Path("/destraid/user/dhruba");
    LOG.info("Test " + testName + " created test files");
    Configuration localConf = this.getRaidNodeConfig(conf, local);

    try {
      cnode = RaidNode.createRaidNode(null, localConf);
      TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
      cnode.stop();
      cnode.join();

      DistributedFileSystem dfs = (DistributedFileSystem) fileSys;
      String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals("no corrupt files expected", 0, corruptFiles.length);
      assertEquals(
          "filesFixed() should return 0 before fixing files",
          0,
          cnode.blockIntegrityMonitor.getNumFilesFixed());

      Integer[] corruptBlockIdxs = new Integer[] {blockToCorrupt};
      TestDirectoryRaidDfs.corruptBlocksInDirectory(
          conf, dirPath, crcs, corruptBlockIdxs, fileSys, dfsCluster, false, true);

      corruptFiles = DFSUtil.getCorruptFiles(dfs);
      assertEquals("files not corrupted", corruptBlockIdxs.length, corruptFiles.length);
      int corruptFileIdx = -1;
      for (int i = 0; i < files.length; i++) {
        if (files[i].toUri().getPath().equals(corruptFiles[0])) {
          corruptFileIdx = i;
          break;
        }
      }
      assertNotSame("Wrong corrupt file", -1, corruptFileIdx);
      cnode = RaidNode.createRaidNode(null, localConf);
      long start = System.currentTimeMillis();
      while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1
          && System.currentTimeMillis() - start < 120000) {
        LOG.info("Test testDirBlockFix waiting for files to be fixed.");
        Thread.sleep(1000);
      }
      TestBlockFixer.verifyMetrics(fileSys, cnode, local, 1L, corruptBlockIdxs.length);
      // Stop RaidNode
      cnode.stop();
      cnode.join();
      cnode = null;

      // The block has successfully been reconstructed.
      dfs = getDFS(conf, dfs);
      assertTrue(
          "file not fixed",
          TestRaidDfs.validateFile(
              dfs, files[corruptFileIdx], fileSizes[corruptFileIdx], crcs[corruptFileIdx]));

      // Now corrupt the generated block.
      TestDirectoryRaidDfs.corruptBlocksInDirectory(
          conf, dirPath, crcs, corruptBlockIdxs, dfs, dfsCluster, false, false);
      try {
        TestRaidDfs.validateFile(
            dfs, files[corruptFileIdx], fileSizes[corruptFileIdx], crcs[corruptFileIdx]);
        fail("Expected exception not thrown");
      } catch (org.apache.hadoop.fs.ChecksumException ce) {
      } catch (org.apache.hadoop.fs.BlockMissingException bme) {
      }
    } catch (Exception e) {
      LOG.info("Test " + testName + " Exception " + e, e);
      throw e;
    } finally {
      myTearDown();
    }
    LOG.info("Test " + testName + " completed.");
  }