/** Test deleteOnExit */ public void testDeleteOnExit() throws IOException { Configuration conf = new Configuration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); FileSystem localfs = FileSystem.getLocal(conf); try { // Creates files in HDFS and local file system. // Path file1 = new Path("filestatus.dat"); Path file2 = new Path("filestatus2.dat"); Path file3 = new Path("filestatus3.dat"); FSDataOutputStream stm1 = createFile(fs, file1, 1); FSDataOutputStream stm2 = createFile(fs, file2, 1); FSDataOutputStream stm3 = createFile(localfs, file3, 1); System.out.println("DeleteOnExit: Created files."); // write to files and close. Purposely, do not close file2. writeFile(stm1); writeFile(stm3); stm1.close(); stm2.close(); stm3.close(); // set delete on exit flag on files. fs.deleteOnExit(file1); fs.deleteOnExit(file2); localfs.deleteOnExit(file3); // close the file system. This should make the above files // disappear. fs.close(); localfs.close(); fs = null; localfs = null; // reopen file system and verify that file does not exist. fs = cluster.getFileSystem(); localfs = FileSystem.getLocal(conf); assertTrue(file1 + " still exists inspite of deletOnExit set.", !fs.exists(file1)); assertTrue(file2 + " still exists inspite of deletOnExit set.", !fs.exists(file2)); assertTrue(file3 + " still exists inspite of deletOnExit set.", !localfs.exists(file3)); System.out.println("DeleteOnExit successful."); } finally { IOUtils.closeStream(fs); IOUtils.closeStream(localfs); cluster.shutdown(); } }
/** Test that all open files are closed when client dies abnormally. */ public void testDFSClientDeath() throws IOException { Configuration conf = new Configuration(); System.out.println("Testing adbornal client death."); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSClient dfsclient = dfs.dfs; try { // create a new file in home directory. Do not close it. // Path file1 = new Path("/clienttest.dat"); FSDataOutputStream stm = createFile(fs, file1, 1); System.out.println("Created file clienttest.dat"); // write to file writeFile(stm); // close the dfsclient before closing the output stream. // This should close all existing file. dfsclient.close(); // reopen file system and verify that file exists. assertTrue( file1 + " does not exist.", AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1)); } finally { cluster.shutdown(); } }
// // writes to file but does not close it // static void writeFile(FSDataOutputStream stm) throws IOException { writeFile(stm, fileSize); }
/** * Test that file leases are persisted across namenode restarts. This test is currently not * triggered because more HDFS work is is needed to handle persistent leases. */ public void xxxtestFileCreationNamenodeRestart() throws IOException { Configuration conf = new Configuration(); final int MAX_IDLE_TIME = 2000; // 2s conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME); conf.setInt("heartbeat.recheck.interval", 1000); conf.setInt("dfs.heartbeat.interval", 1); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } // create cluster MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = null; try { cluster.waitActive(); fs = cluster.getFileSystem(); final int nnport = cluster.getNameNodePort(); // create a new file. Path file1 = new Path("/filestatus.dat"); FSDataOutputStream stm = createFile(fs, file1, 1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1); // write two full blocks. int remainingPiece = blockSize / 2; int blocksMinusPiece = numBlocks * blockSize - remainingPiece; writeFile(stm, blocksMinusPiece); stm.sync(); int actualRepl = ((DFSClient.DFSOutputStream) (stm.getWrappedStream())).getNumCurrentReplicas(); // if we sync on a block boundary, actualRepl will be 0 assertTrue( file1 + " should be replicated to 1 datanodes, not " + actualRepl, actualRepl == 1); writeFile(stm, remainingPiece); stm.sync(); // rename file wile keeping it open. Path fileRenamed = new Path("/filestatusRenamed.dat"); fs.rename(file1, fileRenamed); System.out.println( "testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to " + fileRenamed); file1 = fileRenamed; // create another new file. // Path file2 = new Path("/filestatus2.dat"); FSDataOutputStream stm2 = createFile(fs, file2, 1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2); // create yet another new file with full path name. // rename it while open // Path file3 = new Path("/user/home/fullpath.dat"); FSDataOutputStream stm3 = createFile(fs, file3, 1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3); Path file4 = new Path("/user/home/fullpath4.dat"); FSDataOutputStream stm4 = createFile(fs, file4, 1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4); fs.mkdirs(new Path("/bin")); fs.rename(new Path("/user/home"), new Path("/bin")); Path file3new = new Path("/bin/home/fullpath.dat"); System.out.println( "testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to " + file3new); Path file4new = new Path("/bin/home/fullpath4.dat"); System.out.println( "testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to " + file4new); // restart cluster with the same namenode port as before. // This ensures that leases are persisted in fsimage. cluster.shutdown(); try { Thread.sleep(2 * MAX_IDLE_TIME); } catch (InterruptedException e) { } cluster = new MiniDFSCluster(nnport, conf, 1, false, true, null, null, null); cluster.waitActive(); // restart cluster yet again. This triggers the code to read in // persistent leases from fsimage. cluster.shutdown(); try { Thread.sleep(5000); } catch (InterruptedException e) { } cluster = new MiniDFSCluster(nnport, conf, 1, false, true, null, null, null); cluster.waitActive(); fs = cluster.getFileSystem(); // instruct the dfsclient to use a new filename when it requests // new blocks for files that were renamed. DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream) (stm.getWrappedStream()); dfstream.setTestFilename(file1.toString()); dfstream = (DFSClient.DFSOutputStream) (stm3.getWrappedStream()); dfstream.setTestFilename(file3new.toString()); dfstream = (DFSClient.DFSOutputStream) (stm4.getWrappedStream()); dfstream.setTestFilename(file4new.toString()); // write 1 byte to file. This should succeed because the // namenode should have persisted leases. byte[] buffer = AppendTestUtil.randomBytes(seed, 1); stm.write(buffer); stm.close(); stm2.write(buffer); stm2.close(); stm3.close(); stm4.close(); // verify that new block is associated with this file DFSClient client = ((DistributedFileSystem) fs).dfs; LocatedBlocks locations = client.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue( "Error blocks were not cleaned up for file " + file1, locations.locatedBlockCount() == 3); // verify filestatus2.dat locations = client.namenode.getBlockLocations(file2.toString(), 0, Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue( "Error blocks were not cleaned up for file " + file2, locations.locatedBlockCount() == 1); } finally { IOUtils.closeStream(fs); cluster.shutdown(); } }
/** Test that file data becomes available before file is closed. */ public void testFileCreation() throws IOException { Configuration conf = new Configuration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); try { // // check that / exists // Path path = new Path("/"); System.out.println("Path : \"" + path.toString() + "\""); System.out.println(fs.getFileStatus(path).isDir()); assertTrue("/ should be a directory", fs.getFileStatus(path).isDir() == true); // // Create a directory inside /, then try to overwrite it // Path dir1 = new Path("/test_dir"); fs.mkdirs(dir1); System.out.println( "createFile: Creating " + dir1.getName() + " for overwrite of existing directory."); try { fs.create(dir1, true); // Create path, overwrite=true fs.close(); assertTrue("Did not prevent directory from being overwritten.", false); } catch (IOException ie) { if (!ie.getMessage().contains("already exists as a directory.")) throw ie; } // create a new file in home directory. Do not close it. // Path file1 = new Path("filestatus.dat"); FSDataOutputStream stm = createFile(fs, file1, 1); // verify that file exists in FS namespace assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isDir() == false); System.out.println("Path : \"" + file1 + "\""); // write to file writeFile(stm); // Make sure a client can read it before it is closed. checkFile(fs, file1, 1); // verify that file size has changed long len = fs.getFileStatus(file1).getLen(); assertTrue( file1 + " should be of size " + (numBlocks * blockSize) + " but found to be of size " + len, len == numBlocks * blockSize); stm.close(); // verify that file size has changed to the full size len = fs.getFileStatus(file1).getLen(); assertTrue( file1 + " should be of size " + fileSize + " but found to be of size " + len, len == fileSize); // Check storage usage // can't check capacities for real storage since the OS file system may be changing under us. if (simulatedStorage) { DataNode dn = cluster.getDataNodes().get(0); assertEquals(fileSize, dn.getFSDataset().getDfsUsed()); assertEquals( SimulatedFSDataset.DEFAULT_CAPACITY - fileSize, dn.getFSDataset().getRemaining()); } } finally { cluster.shutdown(); } }