コード例 #1
0
  /**
   * Write the filesystem out
   *
   * @param stream the OutputStream to which the filesystem will be written
   * @exception IOException thrown on errors writing to the stream
   */
  public void writeFilesystem(final OutputStream stream) throws IOException {
    // Have the datasource updated
    syncWithDataSource();

    // Now copy the contents to the stream
    _data.copyTo(stream);
  }
コード例 #2
0
 private BATBlock createBAT(int offset, boolean isBAT) throws IOException {
   // Create a new BATBlock
   BATBlock newBAT = BATBlock.createEmptyBATBlock(bigBlockSize, !isBAT);
   newBAT.setOurBlockIndex(offset);
   // Ensure there's a spot in the file for it
   ByteBuffer buffer = ByteBuffer.allocate(bigBlockSize.getBigBlockSize());
   int writeTo = (1 + offset) * bigBlockSize.getBigBlockSize(); // Header isn't in BATs
   _data.write(buffer, writeTo);
   // All done
   return newBAT;
 }
コード例 #3
0
 /** Load the block at the given offset, extending the file if needed */
 @Override
 protected ByteBuffer createBlockIfNeeded(final int offset) throws IOException {
   try {
     return getBlockAt(offset);
   } catch (IndexOutOfBoundsException e) {
     // The header block doesn't count, so add one
     long startAt = (offset + 1) * bigBlockSize.getBigBlockSize();
     // Allocate and write
     ByteBuffer buffer = ByteBuffer.allocate(getBigBlockSize());
     _data.write(buffer, startAt);
     // Retrieve the properly backed block
     return getBlockAt(offset);
   }
 }
コード例 #4
0
 /**
  * Closes the FileSystem, freeing any underlying files, streams and buffers. After this, you will
  * be unable to read or write from the FileSystem.
  */
 public void close() throws IOException {
   _data.close();
 }
コード例 #5
0
 @Override
 protected ChainLoopDetector getChainLoopDetector() throws IOException {
   return new ChainLoopDetector(_data.size());
 }
コード例 #6
0
 /** Load the block at the given offset. */
 @Override
 protected ByteBuffer getBlockAt(final int offset) throws IOException {
   // The header block doesn't count, so add one
   long startAt = (offset + 1) * bigBlockSize.getBigBlockSize();
   return _data.read(bigBlockSize.getBigBlockSize(), startAt);
 }