// Tests that files are evicted when there is not enough space in the worker.
  @Test
  public void evictionTest() throws Exception {
    final int blockSize = (int) WORKER_CAPACITY_BYTES / 2;
    AlluxioURI file1 = new AlluxioURI("/file1");
    FileSystemTestUtils.createByteFile(mFileSystem, file1, WriteType.MUST_CACHE, blockSize);

    // File should be in memory after it is written with MUST_CACHE
    URIStatus fileInfo1 = mFileSystem.getStatus(file1);
    Assert.assertEquals(100, fileInfo1.getInMemoryPercentage());

    AlluxioURI file2 = new AlluxioURI("/file2");
    FileSystemTestUtils.createByteFile(mFileSystem, file2, WriteType.MUST_CACHE, blockSize);

    // Both file 1 and 2 should be in memory since the combined size is not larger than worker space
    fileInfo1 = mFileSystem.getStatus(file1);
    URIStatus fileInfo2 = mFileSystem.getStatus(file2);
    Assert.assertEquals(100, fileInfo1.getInMemoryPercentage());
    Assert.assertEquals(100, fileInfo2.getInMemoryPercentage());

    AlluxioURI file3 = new AlluxioURI("/file3");
    FileSystemTestUtils.createByteFile(mFileSystem, file3, WriteType.MUST_CACHE, blockSize);

    waitForHeartbeat();

    fileInfo1 = mFileSystem.getStatus(file1);
    fileInfo2 = mFileSystem.getStatus(file2);
    URIStatus fileInfo3 = mFileSystem.getStatus(file3);

    // File 3 should be in memory and one of file 1 or 2 should be in memory
    Assert.assertEquals(100, fileInfo3.getInMemoryPercentage());
    Assert.assertTrue(
        "Exactly one of file1 and file2 should be 100% in memory",
        fileInfo1.getInMemoryPercentage() == 100 ^ fileInfo2.getInMemoryPercentage() == 100);
  }
  // Tests that lock block returns the correct path
  @Test
  public void lockBlockTest() throws Exception {
    final int blockSize = (int) WORKER_CAPACITY_BYTES / 2;

    CreateFileOptions options =
        CreateFileOptions.defaults()
            .setBlockSizeBytes(blockSize)
            .setWriteType(WriteType.MUST_CACHE);
    FileOutStream out = mFileSystem.createFile(new AlluxioURI("/testFile"), options);
    URIStatus file = mFileSystem.getStatus(new AlluxioURI("/testFile"));

    final long blockId = BlockId.createBlockId(BlockId.getContainerId(file.getFileId()), 0);

    out.write(BufferUtils.getIncreasingByteArray(blockSize));
    out.close();

    String localPath = mBlockWorkerServiceHandler.lockBlock(blockId, SESSION_ID).getBlockPath();

    // The local path should exist
    Assert.assertNotNull(localPath);

    UnderFileSystem ufs = UnderFileSystem.get(localPath, mMasterConfiguration);
    byte[] data = new byte[blockSize];
    int bytesRead = ufs.open(localPath).read(data);

    // The data in the local file should equal the data we wrote earlier
    Assert.assertEquals(blockSize, bytesRead);
    Assert.assertTrue(BufferUtils.equalIncreasingByteArray(bytesRead, data));

    mBlockWorkerServiceHandler.unlockBlock(blockId, SESSION_ID);
  }
  // Tests that caching a block successfully persists the block if the block exists
  @Test
  public void cacheBlockTest() throws Exception {
    mFileSystem.createFile(new AlluxioURI("/testFile"));
    URIStatus file = mFileSystem.getStatus(new AlluxioURI("/testFile"));

    final int blockSize = (int) WORKER_CAPACITY_BYTES / 10;
    // Construct the block ids for the file.
    final long blockId0 = BlockId.createBlockId(BlockId.getContainerId(file.getFileId()), 0);
    final long blockId1 = BlockId.createBlockId(BlockId.getContainerId(file.getFileId()), 1);

    String filename =
        mBlockWorkerServiceHandler.requestBlockLocation(SESSION_ID, blockId0, blockSize);
    createBlockFile(filename, blockSize);
    mBlockWorkerServiceHandler.cacheBlock(SESSION_ID, blockId0);

    // The master should be immediately updated with the persisted block
    Assert.assertEquals(blockSize, mBlockMasterClient.getUsedBytes());

    // Attempting to cache a non existent block should throw an exception
    Exception exception = null;
    try {
      mBlockWorkerServiceHandler.cacheBlock(SESSION_ID, blockId1);
    } catch (TException e) {
      exception = e;
    }
    Assert.assertNotNull(exception);
  }
Пример #4
0
  @Override
  public boolean rename(Path src, Path dst) throws IOException {
    LOG.info("rename({}, {})", src, dst);
    if (mStatistics != null) {
      mStatistics.incrementWriteOps(1);
    }

    AlluxioURI srcPath = new AlluxioURI(HadoopUtils.getPathWithoutScheme(src));
    AlluxioURI dstPath = new AlluxioURI(HadoopUtils.getPathWithoutScheme(dst));
    ensureExists(srcPath);
    URIStatus dstStatus;
    try {
      dstStatus = mFileSystem.getStatus(dstPath);
    } catch (IOException | AlluxioException e) {
      dstStatus = null;
    }
    // If the destination is an existing folder, try to move the src into the folder
    if (dstStatus != null && dstStatus.isFolder()) {
      dstPath = dstPath.join(srcPath.getName());
    }
    try {
      mFileSystem.rename(srcPath, dstPath);
      return true;
    } catch (IOException | AlluxioException e) {
      LOG.error("Failed to rename {} to {}", src, dst, e);
      return false;
    }
  }
Пример #5
0
 /**
  * Constructs a new stream for reading a file from HDFS.
  *
  * @param uri the Alluxio file URI
  * @param conf Hadoop configuration
  * @param bufferSize the buffer size
  * @param stats filesystem statistics
  * @throws IOException if the underlying file does not exist or its stream cannot be created
  */
 public HdfsFileInputStream(
     AlluxioURI uri,
     org.apache.hadoop.conf.Configuration conf,
     int bufferSize,
     org.apache.hadoop.fs.FileSystem.Statistics stats)
     throws IOException {
   LOG.debug("HdfsFileInputStream({}, {}, {}, {}, {})", uri, conf, bufferSize, stats);
   long bufferBytes = Configuration.getBytes(Constants.USER_FILE_BUFFER_BYTES);
   mBuffer = new byte[Ints.checkedCast(bufferBytes) * 4];
   mCurrentPosition = 0;
   FileSystem fs = FileSystem.Factory.get();
   mHadoopConf = conf;
   mHadoopBufferSize = bufferSize;
   mStatistics = stats;
   try {
     mFileInfo = fs.getStatus(uri);
     mHdfsPath = new Path(mFileInfo.getUfsPath());
     mAlluxioFileInputStream = fs.openFile(uri, OpenFileOptions.defaults());
   } catch (FileDoesNotExistException e) {
     throw new FileNotFoundException(
         ExceptionMessage.HDFS_FILE_NOT_FOUND.getMessage(mHdfsPath, uri));
   } catch (AlluxioException e) {
     throw new IOException(e);
   }
 }
Пример #6
0
  /**
   * Attempts to create a file. Overwrite will not succeed if the path exists and is a folder.
   *
   * @param path path to create
   * @param permission permissions of the created file/folder
   * @param overwrite overwrite if file exists
   * @param bufferSize the size in bytes of the buffer to be used
   * @param replication under filesystem replication factor
   * @param blockSize block size in bytes
   * @param progress queryable progress
   * @return an {@link FSDataOutputStream} created at the indicated path of a file
   * @throws IOException if overwrite is not specified and the path already exists or if the path is
   *     a folder
   */
  @Override
  public FSDataOutputStream create(
      Path path,
      FsPermission permission,
      boolean overwrite,
      int bufferSize,
      short replication,
      long blockSize,
      Progressable progress)
      throws IOException {
    LOG.info(
        "create({}, {}, {}, {}, {}, {}, {})",
        path,
        permission,
        overwrite,
        bufferSize,
        replication,
        blockSize,
        progress);
    if (mStatistics != null) {
      mStatistics.incrementWriteOps(1);
    }

    // Check whether the file already exists, and delete it if overwrite is true
    AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
    try {
      if (mFileSystem.exists(uri)) {
        if (!overwrite) {
          throw new IOException(ExceptionMessage.FILE_ALREADY_EXISTS.getMessage(uri));
        }
        if (mFileSystem.getStatus(uri).isFolder()) {
          throw new IOException(ExceptionMessage.FILE_CREATE_IS_DIRECTORY.getMessage(uri));
        }
        mFileSystem.delete(uri);
      }
    } catch (AlluxioException e) {
      throw new IOException(e);
    }

    // The file no longer exists at this point, so we can create it
    CreateFileOptions options =
        CreateFileOptions.defaults()
            .setBlockSizeBytes(blockSize)
            .setMode(new Mode(permission.toShort()));
    try {
      FileOutStream outStream = mFileSystem.createFile(uri, options);
      return new FSDataOutputStream(outStream, mStatistics);
    } catch (AlluxioException e) {
      throw new IOException(e);
    }
  }
Пример #7
0
  /**
   * {@inheritDoc}
   *
   * <p>If the file does not exist in Alluxio, query it from HDFS.
   */
  @Override
  public FileStatus getFileStatus(Path path) throws IOException {
    LOG.info("getFileStatus({})", path);

    if (mStatistics != null) {
      mStatistics.incrementReadOps(1);
    }
    AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
    URIStatus fileStatus;
    try {
      fileStatus = mFileSystem.getStatus(uri);
    } catch (FileDoesNotExistException e) {
      throw new FileNotFoundException(e.getMessage());
    } catch (AlluxioException e) {
      throw new IOException(e);
    }

    return new FileStatus(
        fileStatus.getLength(),
        fileStatus.isFolder(),
        BLOCK_REPLICATION_CONSTANT,
        fileStatus.getBlockSizeBytes(),
        fileStatus.getLastModificationTimeMs(),
        fileStatus.getCreationTimeMs(),
        new FsPermission((short) fileStatus.getMode()),
        fileStatus.getOwner(),
        fileStatus.getGroup(),
        new Path(mAlluxioHeader + uri));
  }
Пример #8
0
 /**
  * Convenience method which ensures the given path exists, wrapping any {@link AlluxioException}
  * in {@link IOException}.
  *
  * @param path the path to look up
  * @throws IOException if an Alluxio exception occurs
  */
 private void ensureExists(AlluxioURI path) throws IOException {
   try {
     mFileSystem.getStatus(path);
   } catch (AlluxioException e) {
     throw new IOException(e);
   }
 }
Пример #9
0
 private List<FileBlockInfo> getFileBlocks(AlluxioURI path) throws IOException {
   try {
     return mFileSystem.getStatus(path).getFileBlockInfos();
   } catch (AlluxioException e) {
     throw new IOException(e);
   }
 }
  // Tests that lock block returns error on failure
  @Test
  public void lockBlockFailureTest() throws Exception {
    mFileSystem.createFile(new AlluxioURI("/testFile"));
    URIStatus file = mFileSystem.getStatus(new AlluxioURI("/testFile"));
    final long blockId = BlockId.createBlockId(BlockId.getContainerId(file.getFileId()), 0);

    Exception exception = null;
    try {
      mBlockWorkerServiceHandler.lockBlock(blockId, SESSION_ID);
    } catch (AlluxioTException e) {
      exception = e;
    }

    // A file does not exist exception should have been thrown
    Assert.assertNotNull(exception);
  }
Пример #11
0
 /**
  * Sets pin state for the input path.
  *
  * @param fs The {@link FileSystem} client
  * @param path The {@link AlluxioURI} path as the input of the command
  * @param pinned the state to be set
  * @throws IOException if a non-Alluxio related exception occurs
  */
 public static void setPinned(FileSystem fs, AlluxioURI path, boolean pinned) throws IOException {
   try {
     SetAttributeOptions options = SetAttributeOptions.defaults().setPinned(pinned);
     fs.setAttribute(path, options);
   } catch (AlluxioException e) {
     throw new IOException(e.getMessage());
   }
 }
Пример #12
0
 /**
  * Sets a new TTL value or unsets an existing TTL value for file at path.
  *
  * @param fs the file system for Alluxio
  * @param path the file path
  * @param ttlMs the TTL (time to live) value to use; it identifies duration (in milliseconds) the
  *     created file should be kept around before it is automatically deleted, irrespective of
  *     whether the file is pinned; {@link Constants#NO_TTL} means to unset the TTL value
  * @throws IOException when failing to set/unset the TTL
  */
 public static void setTtl(FileSystem fs, AlluxioURI path, long ttlMs) throws IOException {
   try {
     SetAttributeOptions options = SetAttributeOptions.defaults().setTtl(ttlMs);
     fs.setAttribute(path, options);
   } catch (AlluxioException e) {
     throw new IOException(e.getMessage());
   }
 }
Пример #13
0
 @Override
 public FSDataOutputStream append(Path path, int bufferSize, Progressable progress)
     throws IOException {
   LOG.info("append({}, {}, {})", path, bufferSize, progress);
   if (mStatistics != null) {
     mStatistics.incrementWriteOps(1);
   }
   AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
   try {
     if (mFileSystem.exists(uri)) {
       throw new IOException(ExceptionMessage.FILE_ALREADY_EXISTS.getMessage(uri));
     }
     return new FSDataOutputStream(mFileSystem.createFile(uri), mStatistics);
   } catch (AlluxioException e) {
     throw new IOException(e);
   }
 }
  /** Tests load metadata on list. */
  @Test
  public void loadMetadata() throws Exception {
    String dirName = "loadMetaDataRoot";

    String rootDir = PathUtils.concatPath(mUnderfsAddress, dirName);
    mUfs.mkdirs(rootDir, true);

    String rootFile1 = PathUtils.concatPath(rootDir, "file1");
    createEmptyFile(rootFile1);

    String rootFile2 = PathUtils.concatPath(rootDir, "file2");
    createEmptyFile(rootFile2);

    AlluxioURI rootAlluxioURI = new AlluxioURI("/" + dirName);
    FileSystem client = mLocalAlluxioClusterResource.get().getClient();
    client.listStatus(
        rootAlluxioURI, ListStatusOptions.defaults().setLoadMetadataType(LoadMetadataType.Always));

    try {
      client.createDirectory(rootAlluxioURI, CreateDirectoryOptions.defaults());
      Assert.fail("create is expected to fail with FileAlreadyExistsException");
    } catch (FileAlreadyExistsException e) {
      Assert.assertEquals(
          ExceptionMessage.FILE_ALREADY_EXISTS.getMessage(rootAlluxioURI), e.getMessage());
    }

    AlluxioURI file1URI = rootAlluxioURI.join("file1");
    try {
      client.createFile(file1URI, CreateFileOptions.defaults()).close();
      Assert.fail("create is expected to fail with FileAlreadyExistsException");
    } catch (FileAlreadyExistsException e) {
      Assert.assertEquals(
          ExceptionMessage.FILE_ALREADY_EXISTS.getMessage(file1URI), e.getMessage());
    }

    AlluxioURI file2URI = rootAlluxioURI.join("file2");
    try {
      client.createFile(file2URI, CreateFileOptions.defaults()).close();
      Assert.fail("create is expected to fail with FileAlreadyExistsException");
    } catch (FileAlreadyExistsException e) {
      Assert.assertEquals(
          ExceptionMessage.FILE_ALREADY_EXISTS.getMessage(file2URI), e.getMessage());
    }
  }
  // Tests that cancelling a block will remove the temporary file
  @Test
  public void cancelBlockTest() throws Exception {
    mFileSystem.createFile(new AlluxioURI("/testFile"));
    URIStatus file = mFileSystem.getStatus(new AlluxioURI("/testFile"));

    final int blockSize = (int) WORKER_CAPACITY_BYTES / 2;
    final long blockId = BlockId.createBlockId(BlockId.getContainerId(file.getFileId()), 0);

    String filename =
        mBlockWorkerServiceHandler.requestBlockLocation(SESSION_ID, blockId, blockSize);
    createBlockFile(filename, blockSize);
    mBlockWorkerServiceHandler.cancelBlock(SESSION_ID, blockId);

    // The block should not exist after being cancelled
    Assert.assertFalse(new File(filename).exists());

    // The master should not have recorded any used space after the block is cancelled
    waitForHeartbeat();
    Assert.assertEquals(0, mBlockMasterClient.getUsedBytes());
  }
Пример #16
0
 /**
  * Changes permission of a path.
  *
  * @param path path to set permission
  * @param permission permission set to path
  * @throws IOException if the path failed to be changed permission
  */
 @Override
 public void setPermission(Path path, FsPermission permission) throws IOException {
   LOG.info("setMode({},{})", path, permission.toString());
   AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
   SetAttributeOptions options =
       SetAttributeOptions.defaults().setMode(new Mode(permission.toShort())).setRecursive(false);
   try {
     mFileSystem.setAttribute(uri, options);
   } catch (AlluxioException e) {
     throw new IOException(e);
   }
 }
Пример #17
0
 /**
  * Attempts to delete the file or directory with the specified path.
  *
  * @param path path to delete
  * @param recursive if true, will attempt to delete all children of the path
  * @return true if one or more files/directories were deleted; false otherwise
  * @throws IOException if the path failed to be deleted due to some constraint (ie. non empty
  *     directory with recursive flag disabled)
  */
 @Override
 public boolean delete(Path path, boolean recursive) throws IOException {
   LOG.info("delete({}, {})", path, recursive);
   if (mStatistics != null) {
     mStatistics.incrementWriteOps(1);
   }
   AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
   DeleteOptions options = DeleteOptions.defaults().setRecursive(recursive);
   try {
     mFileSystem.delete(uri, options);
     return true;
   } catch (InvalidPathException | FileDoesNotExistException e) {
     LOG.error("delete failed: {}", e.getMessage());
     return false;
   } catch (AlluxioException e) {
     throw new IOException(e);
   }
 }
Пример #18
0
 /**
  * Attempts to create a folder with the specified path. Parent directories will be created.
  *
  * @param path path to create
  * @param permission permissions to grant the created folder
  * @return true if the indicated folder is created successfully or already exists
  * @throws IOException if the folder cannot be created
  */
 @Override
 public boolean mkdirs(Path path, FsPermission permission) throws IOException {
   LOG.info("mkdirs({}, {})", path, permission);
   if (mStatistics != null) {
     mStatistics.incrementWriteOps(1);
   }
   AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
   CreateDirectoryOptions options =
       CreateDirectoryOptions.defaults()
           .setRecursive(true)
           .setAllowExists(true)
           .setMode(new Mode(permission.toShort()));
   try {
     mFileSystem.createDirectory(uri, options);
     return true;
   } catch (AlluxioException e) {
     throw new IOException(e);
   }
 }
Пример #19
0
 /**
  * Changes owner or group of a path (i.e. a file or a directory). If username is null, the
  * original username remains unchanged. Same as groupname. If username and groupname are non-null,
  * both of them will be changed.
  *
  * @param path path to set owner or group
  * @param username username to be set
  * @param groupname groupname to be set
  * @throws IOException if changing owner or group of the path failed
  */
 @Override
 public void setOwner(Path path, final String username, final String groupname)
     throws IOException {
   LOG.info("setOwner({},{},{})", path, username, groupname);
   AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
   SetAttributeOptions options = SetAttributeOptions.defaults();
   boolean ownerOrGroupChanged = false;
   if (username != null && !username.isEmpty()) {
     options.setOwner(username).setRecursive(false);
     ownerOrGroupChanged = true;
   }
   if (groupname != null && !groupname.isEmpty()) {
     options.setGroup(groupname).setRecursive(false);
     ownerOrGroupChanged = true;
   }
   if (ownerOrGroupChanged) {
     try {
       mFileSystem.setAttribute(uri, options);
     } catch (AlluxioException e) {
       throw new IOException(e);
     }
   }
 }
Пример #20
0
  @Override
  public FileStatus[] listStatus(Path path) throws IOException {
    LOG.info("listStatus({})", path);

    if (mStatistics != null) {
      mStatistics.incrementReadOps(1);
    }

    AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
    List<URIStatus> statuses;
    try {
      statuses = mFileSystem.listStatus(uri);
    } catch (FileDoesNotExistException e) {
      throw new FileNotFoundException(HadoopUtils.getPathWithoutScheme(path));
    } catch (AlluxioException e) {
      throw new IOException(e);
    }

    FileStatus[] ret = new FileStatus[statuses.size()];
    for (int k = 0; k < statuses.size(); k++) {
      URIStatus status = statuses.get(k);

      ret[k] =
          new FileStatus(
              status.getLength(),
              status.isFolder(),
              BLOCK_REPLICATION_CONSTANT,
              status.getBlockSizeBytes(),
              status.getLastModificationTimeMs(),
              status.getCreationTimeMs(),
              new FsPermission((short) status.getMode()),
              status.getOwner(),
              status.getGroup(),
              new Path(mAlluxioHeader + status.getPath()));
    }
    return ret;
  }
Пример #21
0
  /**
   * Entry point for the {@link MultiMount} program.
   *
   * @param args command-line arguments
   */
  public static void main(String[] args) {
    if (args.length != 1) {
      System.err.println("Usage: ./bin/alluxio runClass alluxio.examples.MultiMount <HDFS_URL>");
      System.exit(-1);
    }

    AlluxioURI mntPath = new AlluxioURI("/mnt");
    AlluxioURI s3Mount = new AlluxioURI("/mnt/s3");
    AlluxioURI inputPath = new AlluxioURI("/mnt/s3/hello.txt");
    AlluxioURI s3Path = new AlluxioURI("s3n://alluxio-demo/");
    AlluxioURI hdfsMount = new AlluxioURI("/mnt/hdfs");
    AlluxioURI outputPath = new AlluxioURI("/mnt/hdfs/hello.txt");
    AlluxioURI hdfsPath = new AlluxioURI(args[0]);
    FileSystem fileSystem = FileSystem.Factory.get();

    try {
      // Make sure mount directory exists.
      if (!fileSystem.exists(mntPath)) {
        System.out.print("creating " + mntPath + " ... ");
        fileSystem.createDirectory(mntPath);
        System.out.println("done");
      }

      // Make sure the S3 mount point does not exist.
      if (fileSystem.exists(s3Mount)) {
        System.out.print("unmounting " + s3Mount + " ... ");
        fileSystem.unmount(s3Mount);
        System.out.println("done");
      }

      // Make sure the HDFS mount point does not exist.
      if (fileSystem.exists(hdfsMount)) {
        System.out.print("unmounting " + hdfsMount + " ... ");
        fileSystem.unmount(hdfsMount);
        System.out.println("done");
      }

      // Mount S3.
      System.out.print("mounting " + s3Path + " to " + s3Mount + " ... ");
      fileSystem.mount(s3Mount, s3Path);
      System.out.println("done");

      // Mount HDFS.
      System.out.print("mounting " + hdfsPath + " to " + hdfsMount + " ... ");
      fileSystem.mount(hdfsMount, hdfsPath);
      System.out.println("done");

      // Make sure output file does not exist.
      if (fileSystem.exists(outputPath)) {
        System.out.print("deleting " + outputPath + " ... ");
        fileSystem.delete(outputPath);
        System.out.println("done");
      }

      // Open the input stream.
      System.out.print("opening " + inputPath + " ... ");
      FileInStream is = fileSystem.openFile(inputPath);
      System.out.println("done");

      // Open the output stream, setting the write type to make sure result is persisted.
      System.out.print("opening " + outputPath + " ... ");
      CreateFileOptions options =
          CreateFileOptions.defaults().setWriteType(WriteType.CACHE_THROUGH);
      FileOutStream os = fileSystem.createFile(outputPath, options);
      System.out.println("done");

      // Copy the data
      System.out.print("transferring data from " + inputPath + " to " + outputPath + " ... ");
      IOUtils.copy(is, os);
      System.out.println("done");

      // Close the input stream.
      System.out.print("closing " + inputPath + " ... ");
      is.close();
      System.out.println("done");

      // Close the output stream.
      System.out.print("closing " + outputPath + " ... ");
      os.close();
      System.out.println("done");
    } catch (Exception e) {
      System.out.println("fail");
      e.printStackTrace();
    } finally {
      // Make sure the S3 mount point is removed.
      try {
        if (fileSystem.exists(s3Mount)) {
          System.out.print("unmounting " + s3Mount + " ... ");
          fileSystem.unmount(s3Mount);
          System.out.println("done");
        }
      } catch (Exception e) {
        System.out.println("fail");
        e.printStackTrace();
      }

      // Make sure the HDFS mount point is removed.
      try {
        if (fileSystem.exists(hdfsMount)) {
          System.out.print("unmounting " + hdfsMount + " ... ");
          fileSystem.unmount(hdfsMount);
          System.out.println("done");
        }
      } catch (Exception e) {
        System.out.println("fail");
        e.printStackTrace();
      }
    }
  }