/** Tests getPos() functionality. */ @Test public void testGetPos() throws IOException { final Path testFile = new Path("/testfile+1"); // Write a test file. FSDataOutputStream out = hdfs.create(testFile, true); out.writeBytes("0123456789"); out.close(); FSDataInputStream in = hftpFs.open(testFile); // Test read(). for (int i = 0; i < 5; ++i) { assertEquals(i, in.getPos()); in.read(); } // Test read(b, off, len). assertEquals(5, in.getPos()); byte[] buffer = new byte[10]; assertEquals(2, in.read(buffer, 0, 2)); assertEquals(7, in.getPos()); // Test read(b). int bytesRead = in.read(buffer); assertEquals(7 + bytesRead, in.getPos()); // Test EOF. for (int i = 0; i < 100; ++i) { in.read(); } assertEquals(10, in.getPos()); in.close(); }
private boolean checkConnectTimeout(HftpFileSystem fs, boolean ignoreReadTimeout) throws IOException { boolean timedout = false; List<HttpURLConnection> conns = new LinkedList<HttpURLConnection>(); try { // with a listen backlog of 1, should only have to make one connection // to trigger a connection timeout. however... linux doesn't honor the // socket's listen backlog so we have to try a bunch of times for (int n = 32; !timedout && n > 0; n--) { try { conns.add(fs.openConnection("/", "")); } catch (SocketTimeoutException ste) { String message = ste.getMessage(); // https will get a read timeout due to SSL negotiation, but // a normal http will not, so need to ignore SSL read timeouts // until a connect timeout occurs if (!(ignoreReadTimeout && message.equals("Read timed out"))) { timedout = true; assertEquals("connect timed out", message); } } } } finally { for (HttpURLConnection conn : conns) { conn.disconnect(); } } return timedout; }
/** Tests seek(). */ @Test public void testSeek() throws IOException { final Path testFile = new Path("/testfile+1"); FSDataOutputStream out = hdfs.create(testFile, true); out.writeBytes("0123456789"); out.close(); FSDataInputStream in = hftpFs.open(testFile); in.seek(7); assertEquals('7', in.read()); }
/** Test file creation and access with file names that need encoding. */ @Test public void testFileNameEncoding() throws IOException, URISyntaxException { for (Path p : TEST_PATHS) { // Create and access the path (data and streamFile servlets) FSDataOutputStream out = hdfs.create(p, true); out.writeBytes("0123456789"); out.close(); FSDataInputStream in = hftpFs.open(p); assertEquals('0', in.read()); // Check the file status matches the path. Hftp returns a FileStatus // with the entire URI, extract the path part. assertEquals(p, new Path(hftpFs.getFileStatus(p).getPath().toUri().getPath())); // Test list status (listPath servlet) assertEquals(1, hftpFs.listStatus(p).length); // Test content summary (contentSummary servlet) assertNotNull("No content summary", hftpFs.getContentSummary(p)); // Test checksums (fileChecksum and getFileChecksum servlets) assertNotNull("No file checksum", hftpFs.getFileChecksum(p)); } }
@Test public void testHftpSocketTimeout() throws Exception { Configuration conf = new Configuration(); ServerSocket socket = new ServerSocket(0, 1); URI uri = new URI( "hftp", null, InetAddress.getByName(null).getHostAddress(), socket.getLocalPort(), null, null, null); boolean timedout = false; HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); try { HttpURLConnection conn = fs.openConnection("/", ""); timedout = false; try { // this will consume the only slot in the backlog conn.getInputStream(); } catch (SocketTimeoutException ste) { timedout = true; assertEquals("Read timed out", ste.getMessage()); } finally { if (conn != null) { conn.disconnect(); } } assertTrue("read timedout", timedout); assertTrue("connect timedout", checkConnectTimeout(fs, false)); } finally { fs.close(); } }
private void testDataNodeRedirect(Path path) throws IOException { // Create the file if (hdfs.exists(path)) { hdfs.delete(path, true); } FSDataOutputStream out = hdfs.create(path, (short) 1); out.writeBytes("0123456789"); out.close(); // Get the path's block location so we can determine // if we were redirected to the right DN. FileStatus status = hdfs.getFileStatus(path); BlockLocation[] locations = hdfs.getFileBlockLocations(status, 0, 10); String locationName = locations[0].getNames()[0]; // Connect to the NN to get redirected URL u = hftpFs.getNamenodeURL( "/data" + ServletUtil.encodePath(path.toUri().getPath()), "ugi=userx,groupy"); HttpURLConnection conn = (HttpURLConnection) u.openConnection(); HttpURLConnection.setFollowRedirects(true); conn.connect(); conn.getInputStream(); boolean checked = false; // Find the datanode that has the block according to locations // and check that the URL was redirected to this DN's info port for (DataNode node : cluster.getDataNodes()) { DatanodeRegistration dnR = node.dnRegistration; if (dnR.getName().equals(locationName)) { checked = true; assertEquals(dnR.getInfoPort(), conn.getURL().getPort()); } } assertTrue( "The test never checked that location of " + "the block and hftp desitnation are the same", checked); }
@AfterClass public static void tearDown() throws IOException { hdfs.close(); hftpFs.close(); cluster.shutdown(); }
@Override public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); setupSsl(conf); ExpWarnDays = conf.getInt("ssl.expiration.warn.days", 30); }