/**
   * Test a simple flush on a simple HDFS file.
   *
   * @throws IOException an exception might be thrown
   */
  @Test
  public void testSimpleFlush() throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {

      // create a new file.
      Path file1 = new Path("/simpleFlush.dat");
      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
      System.out.println("Created file simpleFlush.dat");

      // write to file
      int mid = AppendTestUtil.FILE_SIZE / 2;
      stm.write(fileContents, 0, mid);
      stm.hflush();
      System.out.println("Wrote and Flushed first part of file.");

      // write the remainder of the file
      stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
      System.out.println("Written second part of file");
      stm.hflush();
      stm.hflush();
      System.out.println("Wrote and Flushed second part of file.");

      // verify that full blocks are sane
      checkFile(fs, file1, 1);

      stm.close();
      System.out.println("Closed file.");

      // verify that entire file is good
      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");

    } catch (IOException e) {
      System.out.println("Exception :" + e);
      throw e;
    } catch (Throwable e) {
      System.out.println("Throwable :" + e);
      e.printStackTrace();
      throw new IOException("Throwable : " + e);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
 /**
  * FileNotFoundException is expected for appending to a non-exisiting file
  *
  * @throws FileNotFoundException as the result
  */
 @Test(expected = FileNotFoundException.class)
 public void testFileNotFound() throws IOException {
   Configuration conf = new HdfsConfiguration();
   if (simulatedStorage) {
     conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
   }
   MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   FileSystem fs = cluster.getFileSystem();
   try {
     Path file1 = new Path("/nonexistingfile.dat");
     fs.append(file1);
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
  /**
   * Test that file data can be flushed.
   *
   * @throws IOException an exception might be thrown
   */
  @Test
  public void testComplexFlush() throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {

      // create a new file.
      Path file1 = new Path("/complexFlush.dat");
      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
      System.out.println("Created file complexFlush.dat");

      int start = 0;
      for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
        stm.write(fileContents, start, 29);
        stm.hflush();
        start += 29;
      }
      stm.write(fileContents, start, AppendTestUtil.FILE_SIZE - start);

      // verify that full blocks are sane
      checkFile(fs, file1, 1);
      stm.close();

      // verify that entire file is good
      AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
    } catch (IOException e) {
      System.out.println("Exception :" + e);
      throw e;
    } catch (Throwable e) {
      System.out.println("Throwable :" + e);
      e.printStackTrace();
      throw new IOException("Throwable : " + e);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
  /**
   * Create a file of the given size filled with random data.
   *
   * @return File data.
   */
  public byte[] writeFile(Path filepath, int sizeKB) throws IOException {
    FileSystem fs = cluster.getFileSystem();

    // Write a file with the specified amount of data
    DataOutputStream os = fs.create(filepath);
    byte data[] = new byte[1024 * sizeKB];
    new Random().nextBytes(data);
    os.write(data);
    os.close();
    return data;
  }
  public void testAbandonBlock() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
    FileSystem fs = cluster.getFileSystem();

    String src = FILE_NAME_PREFIX + "foo";
    FSDataOutputStream fout = null;
    try {
      // start writing a a file but not close it
      fout = fs.create(new Path(src), true, 4096, (short) 1, 512L);
      for (int i = 0; i < 1024; i++) {
        fout.write(123);
      }
      fout.sync();

      // try reading the block by someone
      final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
      LocatedBlocks blocks = dfsclient.namenode.getBlockLocations(src, 0, 1);
      LocatedBlock b = blocks.get(0);
      try {
        dfsclient.namenode.abandonBlock(b.getBlock(), src, "someone");
        // previous line should throw an exception.
        assertTrue(false);
      } catch (IOException ioe) {
        LOG.info("GREAT! " + StringUtils.stringifyException(ioe));
      }
    } finally {
      try {
        fout.close();
      } catch (Exception e) {
      }
      try {
        fs.close();
      } catch (Exception e) {
      }
      try {
        cluster.shutdown();
      } catch (Exception e) {
      }
    }
  }
  /** Make sure that the quota is decremented correctly when a block is abandoned */
  public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
    FileSystem fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;

    try {
      // Setting diskspace quota to 3MB
      dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);

      // Start writing a file with 2 replicas to ensure each datanode has one.
      // Block Size is 1MB.
      String src = FILE_NAME_PREFIX + "test_quota1";
      FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short) 2, 1024 * 1024);
      for (int i = 0; i < 1024; i++) {
        fout.writeByte(123);
      }

      // Shutdown one datanode, causing the block abandonment.
      cluster.getDataNodes().get(0).shutdown();

      // Close the file, new block will be allocated with 2MB pending size.
      try {
        fout.close();
      } catch (QuotaExceededException e) {
        fail("Unexpected quota exception when closing fout");
      }
    } finally {
      try {
        fs.close();
      } catch (Exception e) {
      }
      try {
        cluster.shutdown();
      } catch (Exception e) {
      }
    }
  }
Example #7
0
  @BeforeClass
  public static void setUp() throws IOException {
    ((Log4JLogger) HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);

    final long seed = RAN.nextLong();
    System.out.println("seed=" + seed);
    RAN.setSeed(seed);

    config = new Configuration();
    config.set("slave.host.name", "localhost");

    cluster = new MiniDFSCluster(config, 2, true, null);
    hdfs = cluster.getFileSystem();
    final String hftpuri = "hftp://" + config.get("dfs.http.address");
    hftpFs = (HftpFileSystem) new Path(hftpuri).getFileSystem(config);
  }
Example #8
0
  private void testDataNodeRedirect(Path path) throws IOException {
    // Create the file
    if (hdfs.exists(path)) {
      hdfs.delete(path, true);
    }
    FSDataOutputStream out = hdfs.create(path, (short) 1);
    out.writeBytes("0123456789");
    out.close();

    // Get the path's block location so we can determine
    // if we were redirected to the right DN.
    FileStatus status = hdfs.getFileStatus(path);
    BlockLocation[] locations = hdfs.getFileBlockLocations(status, 0, 10);
    String locationName = locations[0].getNames()[0];

    // Connect to the NN to get redirected
    URL u =
        hftpFs.getNamenodeURL(
            "/data" + ServletUtil.encodePath(path.toUri().getPath()), "ugi=userx,groupy");
    HttpURLConnection conn = (HttpURLConnection) u.openConnection();
    HttpURLConnection.setFollowRedirects(true);
    conn.connect();
    conn.getInputStream();

    boolean checked = false;
    // Find the datanode that has the block according to locations
    // and check that the URL was redirected to this DN's info port
    for (DataNode node : cluster.getDataNodes()) {
      DatanodeRegistration dnR = node.dnRegistration;
      if (dnR.getName().equals(locationName)) {
        checked = true;
        assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
      }
    }
    assertTrue(
        "The test never checked that location of " + "the block and hftp desitnation are the same",
        checked);
  }
Example #9
0
 @AfterClass
 public static void tearDown() throws IOException {
   hdfs.close();
   hftpFs.close();
   cluster.shutdown();
 }
  /**
   * Test that copy on write for blocks works correctly
   *
   * @throws IOException an exception might be thrown
   */
  @Test
  public void testCopyOnWrite() throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    try {

      // create a new file, write to it and close it.
      //
      Path file1 = new Path("/filestatus.dat");
      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
      writeFile(stm);
      stm.close();

      // Get a handle to the datanode
      DataNode[] dn = cluster.listDataNodes();
      assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1);

      LocatedBlocks locations =
          client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
      List<LocatedBlock> blocks = locations.getLocatedBlocks();
      FSDataset dataset = (FSDataset) dn[0].data;

      //
      // Create hard links for a few of the blocks
      //
      for (int i = 0; i < blocks.size(); i = i + 2) {
        ExtendedBlock b = blocks.get(i).getBlock();
        File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock());
        File link = new File(f.toString() + ".link");
        System.out.println("Creating hardlink for File " + f + " to " + link);
        HardLink.createHardLink(f, link);
      }

      //
      // Detach all blocks. This should remove hardlinks (if any)
      //
      for (int i = 0; i < blocks.size(); i++) {
        ExtendedBlock b = blocks.get(i).getBlock();
        System.out.println("testCopyOnWrite detaching block " + b);
        assertTrue(
            "Detaching block " + b + " should have returned true", dataset.unlinkBlock(b, 1));
      }

      // Since the blocks were already detached earlier, these calls should
      // return false
      //
      for (int i = 0; i < blocks.size(); i++) {
        ExtendedBlock b = blocks.get(i).getBlock();
        System.out.println("testCopyOnWrite detaching block " + b);
        assertTrue(
            "Detaching block " + b + " should have returned false", !dataset.unlinkBlock(b, 1));
      }

    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
 /** Get the DFSClient. */
 public DFSClient getDFSClient() throws IOException {
   InetSocketAddress nnAddr = new InetSocketAddress("localhost", cluster.getNameNodePort());
   return new DFSClient(nnAddr, conf);
 }
 /** Shutdown cluster */
 public void shutdown() {
   if (cluster != null) {
     cluster.shutdown();
   }
 }
 /** Setup the cluster */
 public BlockReaderTestUtil(int replicationFactor) throws Exception {
   conf = new HdfsConfiguration();
   conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replicationFactor);
   cluster = new MiniDFSCluster.Builder(conf).format(true).build();
   cluster.waitActive();
 }
 /** Get a DataNode that serves our testBlock. */
 public DataNode getDataNode(LocatedBlock testBlock) {
   DatanodeInfo[] nodes = testBlock.getLocations();
   int ipcport = nodes[0].ipcPort;
   return cluster.getDataNode(ipcport);
 }