Ejemplo n.º 1
0
  protected static void writeToINode(INode node) {
    int blockID = node.getIndex() / (Constants.BLOCK_SIZE / Constants.INODE_SIZE);
    int inodeOffset = node.getIndex() % (Constants.BLOCK_SIZE / Constants.INODE_SIZE);
    byte[] dataToWrite = new byte[Constants.BLOCK_SIZE];
    byte[] fileMetadata = node.getMetadata();

    DBuffer buffer = _cache.getBlock(blockID);
    buffer.read(dataToWrite, 0, Constants.BLOCK_SIZE);
    for (int k = 0; k < fileMetadata.length; k++)
      dataToWrite[inodeOffset * Constants.INODE_SIZE + k] = fileMetadata[k];

    buffer.write(dataToWrite, 0, Constants.BLOCK_SIZE);
    _cache.releaseBlock(buffer);
  }
Ejemplo n.º 2
0
  private void init() {
    for (int i = 0; i < _inodes.length; i++) {
      int blockID = i / (Constants.BLOCK_SIZE / Constants.INODE_SIZE);
      int offset = i % (Constants.BLOCK_SIZE / Constants.INODE_SIZE);
      byte[] buffer = new byte[Constants.BLOCK_SIZE];

      DBuffer dbuf = _cache.getBlock(blockID);
      dbuf.read(buffer, 0, Constants.BLOCK_SIZE);
      _cache.releaseBlock(dbuf);
      _inodes[i].initialize(buffer, offset * Constants.INODE_SIZE, Constants.INODE_SIZE);

      if (_inodes[i].isUsed()) {
        int actualNumBlocks = _inodes[i].getBlockList().size();
        int predictedNumBlocks =
            (int) Math.ceil((double) _inodes[i].getSize() / Constants.BLOCK_SIZE);

        // ensure that the # blocks in your list is correct
        if (actualNumBlocks == predictedNumBlocks) {
          for (int dfid : _inodes[i].getBlockList()) {
            if (_usedBlocks[dfid])
              System.err.println("Block " + dfid + " is in use by multiple INodes");

            // block # is either - or greater than allowed # blocks
            // signifies corruption of block list
            if (dfid < 0 || dfid >= Constants.NUM_OF_BLOCKS) {
              try {
                _inodes[i].clearContent(); // delete the block data
                System.err.println(
                    "INode " + i + " has a # blocks outside of the acceptable range");
              } catch (Exception e) {
                System.err.println("Error in clearing dfiles");
              }
              break;
            }
            // we can initialize & use block
            else _usedBlocks[dfid] = true;
          }
        } else {
          // File size doesn't compute, given operations
          try {
            _inodes[i].clearContent(); // delete the block data
            System.err.println("File " + i + " doesn't have the correct size");
          } catch (Exception e) {
            System.err.println("Error in clearing dfiles");
          }
        }
      }
    }
  }
Ejemplo n.º 3
0
  public AbstractDFS(String volName, boolean format) {
    _volName = volName;
    _format = format;
    _cache = DBufferCache.getInstance(_volName, format, Constants.NUM_OF_CACHE_BLOCKS);
    _inodes = new INode[Constants.MAX_DFILES];
    _usedBlocks = new boolean[Constants.NUM_OF_BLOCKS];

    for (int i = 0; i < _inodes.length; i++) {
      _inodes[i] = new INode(i);
    }

    if (format) format();
    else init();
  }