Пример #1
0
 public StorageRootFile(String dbPath, String options, int pageSize, FreeSpaceManager fsm) {
   this.fsm = fsm;
   PAGE_SIZE = pageSize;
   File file = new File(dbPath);
   if (!file.exists()) {
     throw DBLogger.newUser("DB file does not exist: " + dbPath);
   }
   try {
     raf = new RandomAccessFile(file, options);
     fc = raf.getChannel();
     try {
       // tryLock is supposed to return null, but it throws an Exception
       fileLock = fc.tryLock();
       if (fileLock == null) {
         fc.close();
         raf.close();
         throw DBLogger.newUser("This file is in use by another process: " + dbPath);
       }
     } catch (OverlappingFileLockException e) {
       fc.close();
       raf.close();
       throw DBLogger.newUser("This file is in use by another PersistenceManager: " + dbPath);
     }
     if (ZooDebug.isTesting()) {
       ZooDebug.registerFile(fc);
     }
   } catch (IOException e) {
     throw DBLogger.newFatal("Error opening database: " + dbPath, e);
   }
 }
Пример #2
0
  /**
   * In case this is an existing index, read() should be called afterwards. Key and value length are
   * used to calculate the man number of entries on a page.
   *
   * @param file The read/write byte stream.
   * @param isNew Whether this is a new index or existing (i.e. read from disk).
   * @param keyLen The number of bytes required for the key.
   * @param valLen The number of bytes required for the value.
   */
  public AbstractPagedIndex(
      StorageChannel file,
      boolean isNew,
      int keyLen,
      int valLen,
      boolean isUnique,
      PAGE_TYPE dataType) {
    super(file, isNew, isUnique);

    in = file.getReader(false);
    out = file.getWriter(false);
    int pageSize = file.getPageSize();
    this.dataType = dataType;

    keySize = keyLen;
    valSize = valLen;

    // how many entries fit on one page?
    //
    // all pages:
    // - 0 byte for flags (isRoot=(root==null)), isLeaf(leaves==0), isDirty(is transient))
    // - // NO! root page: 4 byte  -> Don't store the root! We would have to update it!!!
    // - # leaves: 2 byte
    // - # entries: 2 byte
    // - TODO index-ID (oid)?
    // - TODO index-type (oid for the schema of an index)????
    //
    // middle pages:
    // - keyLen = keyLen ; refLen = 4byte (pageID)
    // - n values; n+1 references
    // ---> n = (PAGE_SIZE - 4 - refLen) / (keyLen + refLen)
    //
    // leave pages: keyLen = keyLen ; valLen = valLen
    // - n values
    // ---> n = (PAGE_SIZE - 4) / (keyLen + valLen)

    final int pageHeader = 4 + DiskIO.PAGE_HEADER_SIZE; // 2 + 2 + general_header
    final int refLen = 4; // one int for pageID
    // we use only int, so it should round down automatically...
    maxLeafN = (pageSize - pageHeader) / (keyLen + valLen);
    if (maxLeafN * (keyLen + valLen) + pageHeader > pageSize) {
      throw DBLogger.newFatalInternal("Illegal Index size: " + maxLeafN);
    }
    minLeafN = maxLeafN >> 1;

    int innerEntrySize = keyLen + refLen;
    if (!isUnique) {
      innerEntrySize += valLen;
    }
    // -2 for short nKeys
    maxInnerN = (pageSize - pageHeader - refLen - 2) / innerEntrySize;
    if (maxInnerN * innerEntrySize + pageHeader + refLen > pageSize) {
      throw DBLogger.newFatalInternal("Illegal Index size: " + maxInnerN);
    }
    minInnerN = maxInnerN >> 1;

    DBLogger.debugPrintln(1, "OidIndex entries per page: " + maxLeafN + " / inner: " + maxInnerN);
  }
Пример #3
0
 public void checkValidity(int modCount, long txId) {
   if (this.file.getTxId() != txId) {
     throw DBLogger.newUser("This iterator has been invalidated by commit() or rollback().");
   }
   if (this.modCount != modCount) {
     throw new ConcurrentModificationException();
   }
 }
Пример #4
0
 @Override
 public int statsGetPageCount() {
   try {
     return (int) (raf.length() / PAGE_SIZE);
   } catch (IOException e) {
     throw DBLogger.newFatal("", e);
   }
 }
Пример #5
0
 @Override
 public final void readPage(ByteBuffer buf, long pageId) {
   try {
     fc.read(buf, pageId * PAGE_SIZE);
     if (DBStatistics.isEnabled()) {
       statNRead++;
       statNReadUnique.put(pageId, null);
     }
   } catch (IOException e) {
     throw DBLogger.newFatal("Error loading Page: " + pageId, e);
   }
 }
Пример #6
0
 @Override
 public final void close() {
   flush();
   try {
     fc.force(true);
     fileLock.release();
     fc.close();
     raf.close();
   } catch (IOException e) {
     throw DBLogger.newFatal("Error closing database file.", e);
   }
 }
Пример #7
0
 @Override
 public final void write(ByteBuffer buf, long pageId) {
   try {
     if (pageId < 0) {
       return;
     }
     if (DBStatistics.isEnabled()) {
       statNWrite++;
     }
     fc.write(buf, pageId * PAGE_SIZE);
   } catch (IOException e) {
     throw DBLogger.newFatal("Error writing page: " + pageId, e);
   }
 }
Пример #8
0
 /** Not a true flush, just writes the stuff... */
 @Override
 public final void flush() {
   // flush associated splits.
   for (StorageChannelOutput paf : viewsOut) {
     // flush() only writers
     paf.flush();
   }
   for (StorageChannelInput paf : viewsIn) {
     paf.reset();
   }
   try {
     fc.force(false);
   } catch (IOException e) {
     throw DBLogger.newFatal("Error writing database file.", e);
   }
 }