@Test public void testHDFS() { Path file = new Path(hdfsURI + hdPath); org.apache.hadoop.fs.Path result = new org.apache.hadoop.fs.Path(hdfsURI + "/result"); try { FileSystem fs = file.getFileSystem(); Assert.assertTrue("Must be HadoopFileSystem", fs instanceof HadoopFileSystem); DopOneTestEnvironment.setAsContext(); try { WordCount.main(new String[] {file.toString(), result.toString()}); } catch (Throwable t) { t.printStackTrace(); Assert.fail("Test failed with " + t.getMessage()); } finally { DopOneTestEnvironment.unsetAsContext(); } Assert.assertTrue("No result file present", hdfs.exists(result)); // validate output: org.apache.hadoop.fs.FSDataInputStream inStream = hdfs.open(result); StringWriter writer = new StringWriter(); IOUtils.copy(inStream, writer); String resultString = writer.toString(); Assert.assertEquals("hdfs 10\n" + "hello 10\n", resultString); inStream.close(); } catch (IOException e) { e.printStackTrace(); Assert.fail("Error in test: " + e.getMessage()); } }
@Override public FSDataInputStream open(final Path f, final int bufferSize) throws IOException { final org.apache.hadoop.fs.FSDataInputStream fdis = this.fs.open(new org.apache.hadoop.fs.Path(f.toString()), bufferSize); return new DistributedDataInputStream(fdis); }
@Override public FileStatus[] listStatus(final Path f) throws IOException { final org.apache.hadoop.fs.FileStatus[] hadoopFiles = this.fs.listStatus(new org.apache.hadoop.fs.Path(f.toString())); final FileStatus[] files = new FileStatus[hadoopFiles.length]; // Convert types for (int i = 0; i < files.length; i++) { files[i] = new DistributedFileStatus(hadoopFiles[i]); } return files; }
@Override public FSDataOutputStream create( final Path f, final boolean overwrite, final int bufferSize, final short replication, final long blockSize) throws IOException { final org.apache.hadoop.fs.FSDataOutputStream fdos = this.fs.create( new org.apache.hadoop.fs.Path(f.toString()), overwrite, bufferSize, replication, blockSize); return new DistributedDataOutputStream(fdos); }
@Override public boolean rename(final Path src, final Path dst) throws IOException { return this.fs.rename( new org.apache.hadoop.fs.Path(src.toString()), new org.apache.hadoop.fs.Path(dst.toString())); }
@Override public boolean mkdirs(final Path f) throws IOException { return this.fs.mkdirs(new org.apache.hadoop.fs.Path(f.toString())); }
@Override public boolean delete(final Path f, final boolean recursive) throws IOException { return this.fs.delete(new org.apache.hadoop.fs.Path(f.toString()), recursive); }
@Override public FSDataOutputStream create(final Path f, final boolean overwrite) throws IOException { final org.apache.hadoop.fs.FSDataOutputStream fdos = this.fs.create(new org.apache.hadoop.fs.Path(f.toString()), overwrite); return new DistributedDataOutputStream(fdos); }
@Override public FileStatus getFileStatus(final Path f) throws IOException { org.apache.hadoop.fs.FileStatus status = this.fs.getFileStatus(new org.apache.hadoop.fs.Path(f.toString())); return new DistributedFileStatus(status); }
@Override public FSDataInputStream open(final Path f) throws IOException { final org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(f.toString()); final org.apache.hadoop.fs.FSDataInputStream fdis = fs.open(path); return new HadoopDataInputStream(fdis); }