static void checkFullFile(FileSystem fs, Path name) throws IOException { FileStatus stat = fs.getFileStatus(name); BlockLocation[] locations = fs.getFileBlockLocations(stat, 0, fileSize); for (int idx = 0; idx < locations.length; idx++) { String[] hosts = locations[idx].getNames(); for (int i = 0; i < hosts.length; i++) { System.out.print(hosts[i] + " "); } System.out.println( " off " + locations[idx].getOffset() + " len " + locations[idx].getLength()); } byte[] expected = AppendTestUtil.randomBytes(seed, fileSize); FSDataInputStream stm = fs.open(name); byte[] actual = new byte[fileSize]; stm.readFully(0, actual); checkData(actual, 0, expected, "Read 2"); stm.close(); }
// // verify that the data written to the full blocks are sane // private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException { boolean done = false; // wait till all full blocks are confirmed by the datanodes. while (!done) { try { Thread.sleep(1000); } catch (InterruptedException e) { } done = true; BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize); if (locations.length < numBlocks) { done = false; continue; } for (int idx = 0; idx < locations.length; idx++) { if (locations[idx].getHosts().length < repl) { done = false; break; } } } FSDataInputStream stm = fileSys.open(name); final byte[] expected; if (simulatedStorage) { expected = new byte[numBlocks * blockSize]; for (int i = 0; i < expected.length; i++) { expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE; } } else { expected = AppendTestUtil.randomBytes(seed, numBlocks * blockSize); } // do a sanity check. Read the file byte[] actual = new byte[numBlocks * blockSize]; stm.readFully(0, actual); stm.close(); checkData(actual, 0, expected, "Read 1"); }
// // writes specified bytes to file. // static void writeFile(FSDataOutputStream stm, int size) throws IOException { byte[] buffer = AppendTestUtil.randomBytes(seed, size); stm.write(buffer, 0, size); }
/** * Test that file leases are persisted across namenode restarts. This test is currently not * triggered because more HDFS work is is needed to handle persistent leases. */ public void xxxtestFileCreationNamenodeRestart() throws IOException { Configuration conf = new Configuration(); final int MAX_IDLE_TIME = 2000; // 2s conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME); conf.setInt("heartbeat.recheck.interval", 1000); conf.setInt("dfs.heartbeat.interval", 1); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } // create cluster MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = null; try { cluster.waitActive(); fs = cluster.getFileSystem(); final int nnport = cluster.getNameNodePort(); // create a new file. Path file1 = new Path("/filestatus.dat"); FSDataOutputStream stm = createFile(fs, file1, 1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1); // write two full blocks. int remainingPiece = blockSize / 2; int blocksMinusPiece = numBlocks * blockSize - remainingPiece; writeFile(stm, blocksMinusPiece); stm.sync(); int actualRepl = ((DFSClient.DFSOutputStream) (stm.getWrappedStream())).getNumCurrentReplicas(); // if we sync on a block boundary, actualRepl will be 0 assertTrue( file1 + " should be replicated to 1 datanodes, not " + actualRepl, actualRepl == 1); writeFile(stm, remainingPiece); stm.sync(); // rename file wile keeping it open. Path fileRenamed = new Path("/filestatusRenamed.dat"); fs.rename(file1, fileRenamed); System.out.println( "testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to " + fileRenamed); file1 = fileRenamed; // create another new file. // Path file2 = new Path("/filestatus2.dat"); FSDataOutputStream stm2 = createFile(fs, file2, 1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2); // create yet another new file with full path name. // rename it while open // Path file3 = new Path("/user/home/fullpath.dat"); FSDataOutputStream stm3 = createFile(fs, file3, 1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3); Path file4 = new Path("/user/home/fullpath4.dat"); FSDataOutputStream stm4 = createFile(fs, file4, 1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4); fs.mkdirs(new Path("/bin")); fs.rename(new Path("/user/home"), new Path("/bin")); Path file3new = new Path("/bin/home/fullpath.dat"); System.out.println( "testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to " + file3new); Path file4new = new Path("/bin/home/fullpath4.dat"); System.out.println( "testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to " + file4new); // restart cluster with the same namenode port as before. // This ensures that leases are persisted in fsimage. cluster.shutdown(); try { Thread.sleep(2 * MAX_IDLE_TIME); } catch (InterruptedException e) { } cluster = new MiniDFSCluster(nnport, conf, 1, false, true, null, null, null); cluster.waitActive(); // restart cluster yet again. This triggers the code to read in // persistent leases from fsimage. cluster.shutdown(); try { Thread.sleep(5000); } catch (InterruptedException e) { } cluster = new MiniDFSCluster(nnport, conf, 1, false, true, null, null, null); cluster.waitActive(); fs = cluster.getFileSystem(); // instruct the dfsclient to use a new filename when it requests // new blocks for files that were renamed. DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream) (stm.getWrappedStream()); dfstream.setTestFilename(file1.toString()); dfstream = (DFSClient.DFSOutputStream) (stm3.getWrappedStream()); dfstream.setTestFilename(file3new.toString()); dfstream = (DFSClient.DFSOutputStream) (stm4.getWrappedStream()); dfstream.setTestFilename(file4new.toString()); // write 1 byte to file. This should succeed because the // namenode should have persisted leases. byte[] buffer = AppendTestUtil.randomBytes(seed, 1); stm.write(buffer); stm.close(); stm2.write(buffer); stm2.close(); stm3.close(); stm4.close(); // verify that new block is associated with this file DFSClient client = ((DistributedFileSystem) fs).dfs; LocatedBlocks locations = client.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue( "Error blocks were not cleaned up for file " + file1, locations.locatedBlockCount() == 3); // verify filestatus2.dat locations = client.namenode.getBlockLocations(file2.toString(), 0, Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue( "Error blocks were not cleaned up for file " + file2, locations.locatedBlockCount() == 1); } finally { IOUtils.closeStream(fs); cluster.shutdown(); } }
/** Test that file data does not become corrupted even in the face of errors. */ public void testFileCreationError1() throws IOException { Configuration conf = new Configuration(); conf.setInt("heartbeat.recheck.interval", 1000); conf.setInt("dfs.heartbeat.interval", 1); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } // create cluster MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); cluster.waitActive(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); DFSClient client = new DFSClient(addr, conf); try { // create a new file. // Path file1 = new Path("/filestatus.dat"); FSDataOutputStream stm = createFile(fs, file1, 1); // verify that file exists in FS namespace assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isDir() == false); System.out.println("Path : \"" + file1 + "\""); // kill the datanode cluster.shutdownDataNodes(); // wait for the datanode to be declared dead while (true) { DatanodeInfo[] info = client.datanodeReport(FSConstants.DatanodeReportType.LIVE); if (info.length == 0) { break; } System.out.println("testFileCreationError1: waiting for datanode " + " to die."); try { Thread.sleep(1000); } catch (InterruptedException e) { } } // write 1 byte to file. // This should fail because all datanodes are dead. byte[] buffer = AppendTestUtil.randomBytes(seed, 1); try { stm.write(buffer); stm.close(); } catch (Exception e) { System.out.println("Encountered expected exception"); } // verify that no blocks are associated with this file // bad block allocations were cleaned up earlier. LocatedBlocks locations = client.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue("Error blocks were not cleaned up", locations.locatedBlockCount() == 0); } finally { cluster.shutdown(); client.close(); } }