Пример #1
0
 private void write(FileStorage fileStorage, long pos, ByteBuffer buffer) {
   try {
     fileStorage.writeFully(pos, buffer);
   } catch (IllegalStateException e) {
     panic(e);
     throw e;
   }
 }
Пример #2
0
  /** Commit and save all changes, if there are any, and compact the storage if needed. */
  public void writeInBackground(int autoCommitDelay) {
    if (closed) {
      return;
    }

    // could also commit when there are many unsaved pages,
    // but according to a test it doesn't really help
    long time = getTimeSinceCreation();
    if (time <= lastCommitTime + autoCommitDelay) {
      return;
    }
    if (hasUnsavedChanges()) {
      try {
        commitAndSave();
      } catch (Exception e) {
        if (backgroundExceptionHandler != null) {
          backgroundExceptionHandler.uncaughtException(null, e);
          return;
        }
      }
    }
    if (autoCompactFillRate > 0 && lastChunk != null && lastChunk.fileStorage != null) {
      FileStorage fileStorage = lastChunk.fileStorage;
      try {
        // whether there were file read or write operations since
        // the last time
        boolean fileOps;
        long fileOpCount = fileStorage.getWriteCount() + fileStorage.getReadCount();
        if (autoCompactLastFileOpCount != fileOpCount) {
          fileOps = true;
        } else {
          fileOps = false;
        }
        // use a lower fill rate if there were any file operations
        int fillRate = fileOps ? autoCompactFillRate / 3 : autoCompactFillRate;
        // TODO how to avoid endless compaction if there is a bug
        // in the bookkeeping?
        compact(fillRate, autoCommitMemory);
        autoCompactLastFileOpCount = fileStorage.getWriteCount() + fileStorage.getReadCount();
      } catch (Exception e) {
        if (backgroundExceptionHandler != null) {
          backgroundExceptionHandler.uncaughtException(null, e);
        }
      }
    }
  }
Пример #3
0
  private synchronized BTreeChunk readChunkHeader(int chunkId) {
    FileStorage fileStorage = getFileStorage(chunkId);

    BTreeChunk chunk = null;
    ByteBuffer chunkHeaderBlocks = fileStorage.readFully(0, CHUNK_HEADER_SIZE);
    byte[] buff = new byte[BLOCK_SIZE];
    for (int i = 0; i <= BLOCK_SIZE; i += BLOCK_SIZE) {
      chunkHeaderBlocks.get(buff);
      try {
        String s = new String(buff, 0, BLOCK_SIZE, DataUtils.LATIN).trim();
        HashMap<String, String> m = DataUtils.parseMap(s);
        int blockSize = DataUtils.readHexInt(m, "blockSize", BLOCK_SIZE);
        if (blockSize != BLOCK_SIZE) {
          throw DataUtils.newIllegalStateException(
              DataUtils.ERROR_UNSUPPORTED_FORMAT,
              "Block size {0} is currently not supported",
              blockSize);
        }
        int check = DataUtils.readHexInt(m, "fletcher", 0);
        m.remove("fletcher");
        s = s.substring(0, s.lastIndexOf("fletcher") - 1);
        byte[] bytes = s.getBytes(DataUtils.LATIN);
        int checksum = DataUtils.getFletcher32(bytes, bytes.length);
        if (check != checksum) {
          continue;
        }
        chunk = BTreeChunk.fromString(s);
        break;
      } catch (Exception e) {
        continue;
      }
    }
    if (chunk == null) {
      throw DataUtils.newIllegalStateException(
          DataUtils.ERROR_FILE_CORRUPT, "Storage header is corrupt: {0}", fileStorage);
    }

    chunk.fileStorage = fileStorage;
    chunks.put(chunk.id, chunk);
    return chunk;
  }
Пример #4
0
 private FileStorage getFileStorage(int chunkId) {
   String chunkFileName = btreeStorageName + File.separator + chunkId + AOStorage.SUFFIX_AO_FILE;
   FileStorage fileStorage = new FileStorage();
   fileStorage.open(chunkFileName, map.config);
   return fileStorage;
 }