/**
   * Test that an append with no locations fails with an exception showing insufficient locations.
   */
  @Test(timeout = 60000)
  public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();

    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
      // create a file with replication 3
      fileSystem = cluster.getFileSystem();
      Path f = new Path("/testAppend");
      FSDataOutputStream create = fileSystem.create(f, (short) 2);
      create.write("/testAppend".getBytes());
      create.close();

      // Check for replications
      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

      // Shut down all DNs that have the last block location for the file
      LocatedBlocks lbs =
          fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
      List<DataNode> dnsOfCluster = cluster.getDataNodes();
      DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
      for (DataNode dn : dnsOfCluster) {
        for (DatanodeInfo loc : dnsWithLocations) {
          if (dn.getDatanodeId().equals(loc)) {
            dn.shutdown();
            DFSTestUtil.waitForDatanodeDeath(dn);
          }
        }
      }

      // Wait till 0 replication is recognized
      DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

      // Append to the file, at this state there are 3 live DNs but none of them
      // have the block.
      try {
        fileSystem.append(f);
        fail("Append should fail because insufficient locations");
      } catch (IOException e) {
        LOG.info("Expected exception: ", e);
      }
      FSDirectory dir = cluster.getNamesystem().getFSDirectory();
      final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
      assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
      if (null != fileSystem) {
        fileSystem.close();
      }
      cluster.shutdown();
    }
  }
 /*
  * Returns the index of the first datanode which has a copy
  * of the given block, or -1 if no such datanode exists.
  */
 public static int firstDnWithBlock(MiniDFSCluster cluster, ExtendedBlock b) throws IOException {
   int numDatanodes = cluster.getDataNodes().size();
   for (int i = 0; i < numDatanodes; i++) {
     String blockContent = cluster.readBlockOnDataNode(i, b);
     if (blockContent != null) {
       return i;
     }
   }
   return -1;
 }
  /**
   * Regression test for HDFS-894 ensures that, when datanodes are restarted, the new IPC port is
   * registered with the namenode.
   */
  @Test
  public void testChangeIpcPort() throws Exception {
    HdfsConfiguration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
      DFSClient client = new DFSClient(addr, conf);

      // Restart datanodes
      cluster.restartDataNodes();

      // Wait until we get a heartbeat from the new datanode
      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
      long firstUpdateAfterRestart = report[0].getLastUpdate();

      boolean gotHeartbeat = false;
      for (int i = 0; i < 10 && !gotHeartbeat; i++) {
        try {
          Thread.sleep(i * 1000);
        } catch (InterruptedException ie) {
        }

        report = client.datanodeReport(DatanodeReportType.ALL);
        gotHeartbeat = (report[0].getLastUpdate() > firstUpdateAfterRestart);
      }
      if (!gotHeartbeat) {
        fail("Never got a heartbeat from restarted datanode.");
      }

      int realIpcPort = cluster.getDataNodes().get(0).getIpcPort();
      // Now make sure the reported IPC port is the correct one.
      assertEquals(realIpcPort, report[0].getIpcPort());
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }
  private void testDataNodeRedirect(Path path) throws IOException {
    // Create the file
    if (hdfs.exists(path)) {
      hdfs.delete(path, true);
    }
    FSDataOutputStream out = hdfs.create(path, (short) 1);
    out.writeBytes("0123456789");
    out.close();

    // Get the path's block location so we can determine
    // if we were redirected to the right DN.
    FileStatus status = hdfs.getFileStatus(path);
    BlockLocation[] locations = hdfs.getFileBlockLocations(status, 0, 10);
    String locationName = locations[0].getNames()[0];

    // Connect to the NN to get redirected
    URL u =
        hftpFs.getNamenodeURL(
            "/data" + ServletUtil.encodePath(path.toUri().getPath()), "ugi=userx,groupy");
    HttpURLConnection conn = (HttpURLConnection) u.openConnection();
    HttpURLConnection.setFollowRedirects(true);
    conn.connect();
    conn.getInputStream();

    boolean checked = false;
    // Find the datanode that has the block according to locations
    // and check that the URL was redirected to this DN's info port
    for (DataNode node : cluster.getDataNodes()) {
      DatanodeRegistration dnR = node.dnRegistration;
      if (dnR.getName().equals(locationName)) {
        checked = true;
        assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
      }
    }
    assertTrue(
        "The test never checked that location of " + "the block and hftp desitnation are the same",
        checked);
  }
  /** Make sure that the quota is decremented correctly when a block is abandoned */
  public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
    FileSystem fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;

    try {
      // Setting diskspace quota to 3MB
      dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);

      // Start writing a file with 2 replicas to ensure each datanode has one.
      // Block Size is 1MB.
      String src = FILE_NAME_PREFIX + "test_quota1";
      FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short) 2, 1024 * 1024);
      for (int i = 0; i < 1024; i++) {
        fout.writeByte(123);
      }

      // Shutdown one datanode, causing the block abandonment.
      cluster.getDataNodes().get(0).shutdown();

      // Close the file, new block will be allocated with 2MB pending size.
      try {
        fout.close();
      } catch (QuotaExceededException e) {
        fail("Unexpected quota exception when closing fout");
      }
    } finally {
      try {
        fs.close();
      } catch (Exception e) {
      }
      try {
        cluster.shutdown();
      } catch (Exception e) {
      }
    }
  }
  /** Test that file data becomes available before file is closed. */
  public void testFileCreation() throws IOException {
    Configuration conf = new Configuration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
    FileSystem fs = cluster.getFileSystem();
    try {

      //
      // check that / exists
      //
      Path path = new Path("/");
      System.out.println("Path : \"" + path.toString() + "\"");
      System.out.println(fs.getFileStatus(path).isDir());
      assertTrue("/ should be a directory", fs.getFileStatus(path).isDir() == true);

      //
      // Create a directory inside /, then try to overwrite it
      //
      Path dir1 = new Path("/test_dir");
      fs.mkdirs(dir1);
      System.out.println(
          "createFile: Creating " + dir1.getName() + " for overwrite of existing directory.");
      try {
        fs.create(dir1, true); // Create path, overwrite=true
        fs.close();
        assertTrue("Did not prevent directory from being overwritten.", false);
      } catch (IOException ie) {
        if (!ie.getMessage().contains("already exists as a directory.")) throw ie;
      }

      // create a new file in home directory. Do not close it.
      //
      Path file1 = new Path("filestatus.dat");
      FSDataOutputStream stm = createFile(fs, file1, 1);

      // verify that file exists in FS namespace
      assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isDir() == false);
      System.out.println("Path : \"" + file1 + "\"");

      // write to file
      writeFile(stm);

      // Make sure a client can read it before it is closed.
      checkFile(fs, file1, 1);

      // verify that file size has changed
      long len = fs.getFileStatus(file1).getLen();
      assertTrue(
          file1
              + " should be of size "
              + (numBlocks * blockSize)
              + " but found to be of size "
              + len,
          len == numBlocks * blockSize);

      stm.close();

      // verify that file size has changed to the full size
      len = fs.getFileStatus(file1).getLen();
      assertTrue(
          file1 + " should be of size " + fileSize + " but found to be of size " + len,
          len == fileSize);

      // Check storage usage
      // can't check capacities for real storage since the OS file system may be changing under us.
      if (simulatedStorage) {
        DataNode dn = cluster.getDataNodes().get(0);
        assertEquals(fileSize, dn.getFSDataset().getDfsUsed());
        assertEquals(
            SimulatedFSDataset.DEFAULT_CAPACITY - fileSize, dn.getFSDataset().getRemaining());
      }
    } finally {
      cluster.shutdown();
    }
  }
  /**
   * Test the case that a replica is reported corrupt while it is not in blocksMap. Make sure that
   * ArrayIndexOutOfBounds does not thrown. See Hadoop-4351.
   *
   * <p>TODO HOPS This test fails as it tries to remove a non-existing replica. Calling
   * findAndMarkBlockAsCorrupt from a DataNode that does not store any replica for this specific
   * block will lead to a tuple did not exist exception. The reason for this is that
   * BlockManager.removeStoredBlock is called with a node that does not store a replica and hence
   * the delete will not be able to succeed during commit.
   */
  @Test
  public void testArrayOutOfBoundsException() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new HdfsConfiguration();
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
      cluster.waitActive();

      FileSystem fs = cluster.getFileSystem();
      final Path FILE_PATH = new Path("/tmp.txt");
      final long FILE_LEN = 1L;
      DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short) 2, 1L);

      // get the block
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      File storageDir = cluster.getInstanceStorageDir(0, 0);
      File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      assertTrue("Data directory does not exist", dataDir.exists());
      ExtendedBlock blk = getBlock(bpid, dataDir);
      if (blk == null) {
        storageDir = cluster.getInstanceStorageDir(0, 1);
        dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        blk = getBlock(bpid, dataDir);
      }
      assertFalse(
          "Data directory does not contain any blocks or there was an " + "IO error", blk == null);

      // start a third datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      assertEquals(datanodes.size(), 3);
      DataNode dataNode = datanodes.get(2);

      // report corrupted block by the third datanode
      DatanodeRegistration dnR =
          DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());

      // Get the storage id of one of the storages on the datanode
      String storageId =
          cluster
              .getNamesystem()
              .getBlockManager()
              .getDatanodeManager()
              .getDatanode(dataNode.getDatanodeId())
              .getStorageInfos()[0]
              .getStorageID();

      cluster
          .getNamesystem()
          .getBlockManager()
          .findAndMarkBlockAsCorrupt(blk, new DatanodeInfo(dnR), storageId, "some test reason");

      // open the file
      fs.open(FILE_PATH);

      // clean up
      fs.delete(FILE_PATH, false);
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  }