/*
  * Check if the given block in the given file is corrupt.
  */
 public static boolean allBlockReplicasCorrupt(MiniDFSCluster cluster, Path file, int blockNo)
     throws IOException {
   DFSClient client =
       new DFSClient(
           new InetSocketAddress("localhost", cluster.getNameNodePort()),
           cluster.getConfiguration(0));
   LocatedBlocks blocks;
   try {
     blocks = client.getNamenode().getBlockLocations(file.toString(), 0, Long.MAX_VALUE);
   } finally {
     client.close();
   }
   return blocks.get(blockNo).isCorrupt();
 }
  /**
   * Regression test for HDFS-894 ensures that, when datanodes are restarted, the new IPC port is
   * registered with the namenode.
   */
  @Test
  public void testChangeIpcPort() throws Exception {
    HdfsConfiguration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
      DFSClient client = new DFSClient(addr, conf);

      // Restart datanodes
      cluster.restartDataNodes();

      // Wait until we get a heartbeat from the new datanode
      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
      long firstUpdateAfterRestart = report[0].getLastUpdate();

      boolean gotHeartbeat = false;
      for (int i = 0; i < 10 && !gotHeartbeat; i++) {
        try {
          Thread.sleep(i * 1000);
        } catch (InterruptedException ie) {
        }

        report = client.datanodeReport(DatanodeReportType.ALL);
        gotHeartbeat = (report[0].getLastUpdate() > firstUpdateAfterRestart);
      }
      if (!gotHeartbeat) {
        fail("Never got a heartbeat from restarted datanode.");
      }

      int realIpcPort = cluster.getDataNodes().get(0).getIpcPort();
      // Now make sure the reported IPC port is the correct one.
      assertEquals(realIpcPort, report[0].getIpcPort());
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  /**
   * Test that file leases are persisted across namenode restarts. This test is currently not
   * triggered because more HDFS work is is needed to handle persistent leases.
   */
  public void xxxtestFileCreationNamenodeRestart() throws IOException {
    Configuration conf = new Configuration();
    final int MAX_IDLE_TIME = 2000; // 2s
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt("heartbeat.recheck.interval", 1000);
    conf.setInt("dfs.heartbeat.interval", 1);
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }

    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = null;
    try {
      cluster.waitActive();
      fs = cluster.getFileSystem();
      final int nnport = cluster.getNameNodePort();

      // create a new file.
      Path file1 = new Path("/filestatus.dat");
      FSDataOutputStream stm = createFile(fs, file1, 1);
      System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);

      // write two full blocks.
      int remainingPiece = blockSize / 2;
      int blocksMinusPiece = numBlocks * blockSize - remainingPiece;
      writeFile(stm, blocksMinusPiece);
      stm.sync();
      int actualRepl =
          ((DFSClient.DFSOutputStream) (stm.getWrappedStream())).getNumCurrentReplicas();
      // if we sync on a block boundary, actualRepl will be 0
      assertTrue(
          file1 + " should be replicated to 1 datanodes, not " + actualRepl, actualRepl == 1);
      writeFile(stm, remainingPiece);
      stm.sync();

      // rename file wile keeping it open.
      Path fileRenamed = new Path("/filestatusRenamed.dat");
      fs.rename(file1, fileRenamed);
      System.out.println(
          "testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to " + fileRenamed);
      file1 = fileRenamed;

      // create another new file.
      //
      Path file2 = new Path("/filestatus2.dat");
      FSDataOutputStream stm2 = createFile(fs, file2, 1);
      System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);

      // create yet another new file with full path name.
      // rename it while open
      //
      Path file3 = new Path("/user/home/fullpath.dat");
      FSDataOutputStream stm3 = createFile(fs, file3, 1);
      System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
      Path file4 = new Path("/user/home/fullpath4.dat");
      FSDataOutputStream stm4 = createFile(fs, file4, 1);
      System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);

      fs.mkdirs(new Path("/bin"));
      fs.rename(new Path("/user/home"), new Path("/bin"));
      Path file3new = new Path("/bin/home/fullpath.dat");
      System.out.println(
          "testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to " + file3new);
      Path file4new = new Path("/bin/home/fullpath4.dat");
      System.out.println(
          "testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to " + file4new);

      // restart cluster with the same namenode port as before.
      // This ensures that leases are persisted in fsimage.
      cluster.shutdown();
      try {
        Thread.sleep(2 * MAX_IDLE_TIME);
      } catch (InterruptedException e) {
      }
      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, null, null, null);
      cluster.waitActive();

      // restart cluster yet again. This triggers the code to read in
      // persistent leases from fsimage.
      cluster.shutdown();
      try {
        Thread.sleep(5000);
      } catch (InterruptedException e) {
      }
      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, null, null, null);
      cluster.waitActive();
      fs = cluster.getFileSystem();

      // instruct the dfsclient to use a new filename when it requests
      // new blocks for files that were renamed.
      DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream) (stm.getWrappedStream());
      dfstream.setTestFilename(file1.toString());
      dfstream = (DFSClient.DFSOutputStream) (stm3.getWrappedStream());
      dfstream.setTestFilename(file3new.toString());
      dfstream = (DFSClient.DFSOutputStream) (stm4.getWrappedStream());
      dfstream.setTestFilename(file4new.toString());

      // write 1 byte to file.  This should succeed because the
      // namenode should have persisted leases.
      byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
      stm.write(buffer);
      stm.close();
      stm2.write(buffer);
      stm2.close();
      stm3.close();
      stm4.close();

      // verify that new block is associated with this file
      DFSClient client = ((DistributedFileSystem) fs).dfs;
      LocatedBlocks locations =
          client.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue(
          "Error blocks were not cleaned up for file " + file1, locations.locatedBlockCount() == 3);

      // verify filestatus2.dat
      locations = client.namenode.getBlockLocations(file2.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue(
          "Error blocks were not cleaned up for file " + file2, locations.locatedBlockCount() == 1);
    } finally {
      IOUtils.closeStream(fs);
      cluster.shutdown();
    }
  }
  /** Test that file data does not become corrupted even in the face of errors. */
  public void testFileCreationError1() throws IOException {
    Configuration conf = new Configuration();
    conf.setInt("heartbeat.recheck.interval", 1000);
    conf.setInt("dfs.heartbeat.interval", 1);
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    cluster.waitActive();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);

    try {

      // create a new file.
      //
      Path file1 = new Path("/filestatus.dat");
      FSDataOutputStream stm = createFile(fs, file1, 1);

      // verify that file exists in FS namespace
      assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isDir() == false);
      System.out.println("Path : \"" + file1 + "\"");

      // kill the datanode
      cluster.shutdownDataNodes();

      // wait for the datanode to be declared dead
      while (true) {
        DatanodeInfo[] info = client.datanodeReport(FSConstants.DatanodeReportType.LIVE);
        if (info.length == 0) {
          break;
        }
        System.out.println("testFileCreationError1: waiting for datanode " + " to die.");
        try {
          Thread.sleep(1000);
        } catch (InterruptedException e) {
        }
      }

      // write 1 byte to file.
      // This should fail because all datanodes are dead.
      byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
      try {
        stm.write(buffer);
        stm.close();
      } catch (Exception e) {
        System.out.println("Encountered expected exception");
      }

      // verify that no blocks are associated with this file
      // bad block allocations were cleaned up earlier.
      LocatedBlocks locations =
          client.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up", locations.locatedBlockCount() == 0);
    } finally {
      cluster.shutdown();
      client.close();
    }
  }
  /**
   * Test that copy on write for blocks works correctly
   *
   * @throws IOException an exception might be thrown
   */
  @Test
  public void testCopyOnWrite() throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    try {

      // create a new file, write to it and close it.
      //
      Path file1 = new Path("/filestatus.dat");
      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
      writeFile(stm);
      stm.close();

      // Get a handle to the datanode
      DataNode[] dn = cluster.listDataNodes();
      assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1);

      LocatedBlocks locations =
          client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
      List<LocatedBlock> blocks = locations.getLocatedBlocks();
      FSDataset dataset = (FSDataset) dn[0].data;

      //
      // Create hard links for a few of the blocks
      //
      for (int i = 0; i < blocks.size(); i = i + 2) {
        ExtendedBlock b = blocks.get(i).getBlock();
        File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock());
        File link = new File(f.toString() + ".link");
        System.out.println("Creating hardlink for File " + f + " to " + link);
        HardLink.createHardLink(f, link);
      }

      //
      // Detach all blocks. This should remove hardlinks (if any)
      //
      for (int i = 0; i < blocks.size(); i++) {
        ExtendedBlock b = blocks.get(i).getBlock();
        System.out.println("testCopyOnWrite detaching block " + b);
        assertTrue(
            "Detaching block " + b + " should have returned true", dataset.unlinkBlock(b, 1));
      }

      // Since the blocks were already detached earlier, these calls should
      // return false
      //
      for (int i = 0; i < blocks.size(); i++) {
        ExtendedBlock b = blocks.get(i).getBlock();
        System.out.println("testCopyOnWrite detaching block " + b);
        assertTrue(
            "Detaching block " + b + " should have returned false", !dataset.unlinkBlock(b, 1));
      }

    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
 /** Get the DFSClient. */
 public DFSClient getDFSClient() throws IOException {
   InetSocketAddress nnAddr = new InetSocketAddress("localhost", cluster.getNameNodePort());
   return new DFSClient(nnAddr, conf);
 }
  /** Tests mod & access time in DFS. */
  public void testTimes() throws IOException {
    Configuration conf = new HdfsConfiguration();
    final int MAX_IDLE_TIME = 2000; // 2s
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    final int nnport = cluster.getNameNodePort();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    int replicas = 1;
    assertTrue(fileSys instanceof DistributedFileSystem);

    try {
      //
      // create file and record atime/mtime
      //
      System.out.println("Creating testdir1 and testdir1/test1.dat.");
      Path dir1 = new Path("testdir1");
      Path file1 = new Path(dir1, "test1.dat");
      FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
      FileStatus stat = fileSys.getFileStatus(file1);
      long atimeBeforeClose = stat.getAccessTime();
      String adate = dateForm.format(new Date(atimeBeforeClose));
      System.out.println(
          "atime on " + file1 + " before close is " + adate + " (" + atimeBeforeClose + ")");
      assertTrue(atimeBeforeClose != 0);
      stm.close();

      stat = fileSys.getFileStatus(file1);
      long atime1 = stat.getAccessTime();
      long mtime1 = stat.getModificationTime();
      adate = dateForm.format(new Date(atime1));
      String mdate = dateForm.format(new Date(mtime1));
      System.out.println("atime on " + file1 + " is " + adate + " (" + atime1 + ")");
      System.out.println("mtime on " + file1 + " is " + mdate + " (" + mtime1 + ")");
      assertTrue(atime1 != 0);

      //
      // record dir times
      //
      stat = fileSys.getFileStatus(dir1);
      long mdir1 = stat.getAccessTime();
      assertTrue(mdir1 == 0);

      // set the access time to be one day in the past
      long atime2 = atime1 - (24L * 3600L * 1000L);
      fileSys.setTimes(file1, -1, atime2);

      // check new access time on file
      stat = fileSys.getFileStatus(file1);
      long atime3 = stat.getAccessTime();
      String adate3 = dateForm.format(new Date(atime3));
      System.out.println("new atime on " + file1 + " is " + adate3 + " (" + atime3 + ")");
      assertTrue(atime2 == atime3);
      assertTrue(mtime1 == stat.getModificationTime());

      // set the modification time to be 1 hour in the past
      long mtime2 = mtime1 - (3600L * 1000L);
      fileSys.setTimes(file1, mtime2, -1);

      // check new modification time on file
      stat = fileSys.getFileStatus(file1);
      long mtime3 = stat.getModificationTime();
      String mdate3 = dateForm.format(new Date(mtime3));
      System.out.println("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3 + ")");
      assertTrue(atime2 == stat.getAccessTime());
      assertTrue(mtime2 == mtime3);

      long mtime4 = System.currentTimeMillis() - (3600L * 1000L);
      long atime4 = System.currentTimeMillis();
      fileSys.setTimes(dir1, mtime4, atime4);
      // check new modification time on file
      stat = fileSys.getFileStatus(dir1);
      assertTrue("Not matching the modification times", mtime4 == stat.getModificationTime());
      assertTrue("Not matching the access times", atime4 == stat.getAccessTime());

      Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
      try {
        fileSys.setTimes(nonExistingDir, mtime4, atime4);
        fail("Expecting FileNotFoundException");
      } catch (FileNotFoundException e) {
        assertTrue(
            e.getMessage()
                .contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
      }
      // shutdown cluster and restart
      cluster.shutdown();
      try {
        Thread.sleep(2 * MAX_IDLE_TIME);
      } catch (InterruptedException e) {
      }
      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
      cluster.waitActive();
      fileSys = cluster.getFileSystem();

      // verify that access times and modification times persist after a
      // cluster restart.
      System.out.println("Verifying times after cluster restart");
      stat = fileSys.getFileStatus(file1);
      assertTrue(atime2 == stat.getAccessTime());
      assertTrue(mtime3 == stat.getModificationTime());

      cleanupFile(fileSys, file1);
      cleanupFile(fileSys, dir1);
    } catch (IOException e) {
      info = client.datanodeReport(DatanodeReportType.ALL);
      printDatanodeReport(info);
      throw e;
    } finally {
      fileSys.close();
      cluster.shutdown();
    }
  }
  /** Tests mod time change at close in DFS. */
  public void testTimesAtClose() throws IOException {
    Configuration conf = new HdfsConfiguration();
    final int MAX_IDLE_TIME = 2000; // 2s
    int replicas = 1;

    // parameter initialization
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    assertTrue(fileSys instanceof DistributedFileSystem);

    try {
      // create a new file and write to it
      Path file1 = new Path("/simple.dat");
      FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
      System.out.println("Created and wrote file simple.dat");
      FileStatus statBeforeClose = fileSys.getFileStatus(file1);
      long mtimeBeforeClose = statBeforeClose.getModificationTime();
      String mdateBeforeClose = dateForm.format(new Date(mtimeBeforeClose));
      System.out.println(
          "mtime on "
              + file1
              + " before close is "
              + mdateBeforeClose
              + " ("
              + mtimeBeforeClose
              + ")");
      assertTrue(mtimeBeforeClose != 0);

      // close file after writing
      stm.close();
      System.out.println("Closed file.");
      FileStatus statAfterClose = fileSys.getFileStatus(file1);
      long mtimeAfterClose = statAfterClose.getModificationTime();
      String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
      System.out.println(
          "mtime on "
              + file1
              + " after close is "
              + mdateAfterClose
              + " ("
              + mtimeAfterClose
              + ")");
      assertTrue(mtimeAfterClose != 0);
      assertTrue(mtimeBeforeClose != mtimeAfterClose);

      cleanupFile(fileSys, file1);
    } catch (IOException e) {
      info = client.datanodeReport(DatanodeReportType.ALL);
      printDatanodeReport(info);
      throw e;
    } finally {
      fileSys.close();
      cluster.shutdown();
    }
  }