/** * Test a simple flush on a simple HDFS file. * * @throws IOException an exception might be thrown */ @Test public void testSimpleFlush() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { // create a new file. Path file1 = new Path("/simpleFlush.dat"); FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1); System.out.println("Created file simpleFlush.dat"); // write to file int mid = AppendTestUtil.FILE_SIZE / 2; stm.write(fileContents, 0, mid); stm.hflush(); System.out.println("Wrote and Flushed first part of file."); // write the remainder of the file stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid); System.out.println("Written second part of file"); stm.hflush(); stm.hflush(); System.out.println("Wrote and Flushed second part of file."); // verify that full blocks are sane checkFile(fs, file1, 1); stm.close(); System.out.println("Closed file."); // verify that entire file is good AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2"); } catch (IOException e) { System.out.println("Exception :" + e); throw e; } catch (Throwable e) { System.out.println("Throwable :" + e); e.printStackTrace(); throw new IOException("Throwable : " + e); } finally { fs.close(); cluster.shutdown(); } }
/** * Test that file data can be flushed. * * @throws IOException an exception might be thrown */ @Test public void testComplexFlush() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { // create a new file. Path file1 = new Path("/complexFlush.dat"); FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1); System.out.println("Created file complexFlush.dat"); int start = 0; for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) { stm.write(fileContents, start, 29); stm.hflush(); start += 29; } stm.write(fileContents, start, AppendTestUtil.FILE_SIZE - start); // verify that full blocks are sane checkFile(fs, file1, 1); stm.close(); // verify that entire file is good AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2"); } catch (IOException e) { System.out.println("Exception :" + e); throw e; } catch (Throwable e) { System.out.println("Throwable :" + e); e.printStackTrace(); throw new IOException("Throwable : " + e); } finally { fs.close(); cluster.shutdown(); } }
/** * Test that copy on write for blocks works correctly * * @throws IOException an exception might be thrown */ @Test public void testCopyOnWrite() throws IOException { Configuration conf = new HdfsConfiguration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); DFSClient client = new DFSClient(addr, conf); try { // create a new file, write to it and close it. // Path file1 = new Path("/filestatus.dat"); FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1); writeFile(stm); stm.close(); // Get a handle to the datanode DataNode[] dn = cluster.listDataNodes(); assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1); LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE); List<LocatedBlock> blocks = locations.getLocatedBlocks(); FSDataset dataset = (FSDataset) dn[0].data; // // Create hard links for a few of the blocks // for (int i = 0; i < blocks.size(); i = i + 2) { ExtendedBlock b = blocks.get(i).getBlock(); File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock()); File link = new File(f.toString() + ".link"); System.out.println("Creating hardlink for File " + f + " to " + link); HardLink.createHardLink(f, link); } // // Detach all blocks. This should remove hardlinks (if any) // for (int i = 0; i < blocks.size(); i++) { ExtendedBlock b = blocks.get(i).getBlock(); System.out.println("testCopyOnWrite detaching block " + b); assertTrue( "Detaching block " + b + " should have returned true", dataset.unlinkBlock(b, 1)); } // Since the blocks were already detached earlier, these calls should // return false // for (int i = 0; i < blocks.size(); i++) { ExtendedBlock b = blocks.get(i).getBlock(); System.out.println("testCopyOnWrite detaching block " + b); assertTrue( "Detaching block " + b + " should have returned false", !dataset.unlinkBlock(b, 1)); } } finally { fs.close(); cluster.shutdown(); } }