Exemple #1
0
  private void deleteObsoleteFiles() {
    Preconditions.checkState(mutex.isHeldByCurrentThread());

    // Make a set of all of the live files
    List<Long> live = newArrayList(this.pendingOutputs);
    for (FileMetaData fileMetaData : versions.getLiveFiles()) {
      live.add(fileMetaData.getNumber());
    }

    for (File file : Filename.listFiles(databaseDir)) {
      FileInfo fileInfo = Filename.parseFileName(file);
      if (fileInfo == null) continue;
      long number = fileInfo.getFileNumber();
      boolean keep = true;
      switch (fileInfo.getFileType()) {
        case LOG:
          keep = ((number >= versions.getLogNumber()) || (number == versions.getPrevLogNumber()));
          break;
        case DESCRIPTOR:
          // Keep my manifest file, and any newer incarnations'
          // (in case there is a race that allows other incarnations)
          keep = (number >= versions.getManifestFileNumber());
          break;
        case TABLE:
          keep = live.contains(number);
          break;
        case TEMP:
          // Any temp files that are currently being written to must
          // be recorded in pending_outputs_, which is inserted into "live"
          keep = live.contains(number);
          break;
        case CURRENT:
        case DB_LOCK:
        case INFO_LOG:
          keep = true;
          break;
      }

      if (!keep) {
        if (fileInfo.getFileType() == FileType.TABLE) {
          tableCache.evict(number);
        }
        // todo info logging system needed
        //                Log(options_.info_log, "Delete type=%d #%lld\n",
        //                int(type),
        //                        static_cast < unsigned long long>(number));
        file.delete();
      }
    }
  }
Exemple #2
0
  private void installCompactionResults(CompactionState compact) throws IOException {
    Preconditions.checkState(mutex.isHeldByCurrentThread());

    // Add compaction outputs
    compact.compaction.addInputDeletions(compact.compaction.getEdit());
    int level = compact.compaction.getLevel();
    for (FileMetaData output : compact.outputs) {
      compact.compaction.getEdit().addFile(level + 1, output);
      pendingOutputs.remove(output.getNumber());
    }

    try {
      versions.logAndApply(compact.compaction.getEdit());
      deleteObsoleteFiles();
    } catch (IOException e) {
      // Compaction failed for some reason.  Simply discard the work and try again later.

      // Discard any files we may have created during this failed compaction
      for (FileMetaData output : compact.outputs) {
        File file = new File(databaseDir, Filename.tableFileName(output.getNumber()));
        file.delete();
      }
      compact.outputs.clear();
    }
  }
Exemple #3
0
  private long recoverLogFile(long fileNumber, VersionEdit edit) throws IOException {
    Preconditions.checkState(mutex.isHeldByCurrentThread());
    File file = new File(databaseDir, Filename.logFileName(fileNumber));
    FileChannel channel = new FileInputStream(file).getChannel();
    try {
      LogMonitor logMonitor = LogMonitors.logMonitor();
      LogReader logReader = new LogReader(channel, logMonitor, true, 0);

      // Log(options_.info_log, "Recovering log #%llu", (unsigned long long) log_number);

      // Read all the records and add to a memtable
      long maxSequence = 0;
      MemTable memTable = null;
      for (Slice record = logReader.readRecord(); record != null; record = logReader.readRecord()) {
        SliceInput sliceInput = record.input();
        // read header
        if (sliceInput.available() < 12) {
          logMonitor.corruption(sliceInput.available(), "log record too small");
          continue;
        }
        long sequenceBegin = sliceInput.readLong();
        int updateSize = sliceInput.readInt();

        // read entries
        WriteBatchImpl writeBatch = readWriteBatch(sliceInput, updateSize);

        // apply entries to memTable
        if (memTable == null) {
          memTable = new MemTable(internalKeyComparator);
        }
        writeBatch.forEach(new InsertIntoHandler(memTable, sequenceBegin));

        // update the maxSequence
        long lastSequence = sequenceBegin + updateSize - 1;
        if (lastSequence > maxSequence) {
          maxSequence = lastSequence;
        }

        // flush mem table if necessary
        if (memTable.approximateMemoryUsage() > options.writeBufferSize()) {
          writeLevel0Table(memTable, edit, null);
          memTable = null;
        }
      }

      // flush mem table
      if (memTable != null && !memTable.isEmpty()) {
        writeLevel0Table(memTable, edit, null);
      }

      return maxSequence;
    } finally {
      channel.close();
    }
  }
Exemple #4
0
  private FileMetaData buildTable(SeekingIterable<InternalKey, Slice> data, long fileNumber)
      throws IOException {
    File file = new File(databaseDir, Filename.tableFileName(fileNumber));
    try {
      InternalKey smallest = null;
      InternalKey largest = null;
      FileChannel channel = new FileOutputStream(file).getChannel();
      try {
        TableBuilder tableBuilder =
            new TableBuilder(options, channel, new InternalUserComparator(internalKeyComparator));

        for (Entry<InternalKey, Slice> entry : data) {
          // update keys
          InternalKey key = entry.getKey();
          if (smallest == null) {
            smallest = key;
          }
          largest = key;

          tableBuilder.add(key.encode(), entry.getValue());
        }

        tableBuilder.finish();
      } finally {
        try {
          channel.force(true);
        } finally {
          channel.close();
        }
      }

      if (smallest == null) {
        return null;
      }
      FileMetaData fileMetaData = new FileMetaData(fileNumber, file.length(), smallest, largest);

      // verify table can be opened
      tableCache.newIterator(fileMetaData);

      pendingOutputs.remove(fileNumber);

      return fileMetaData;

    } catch (IOException e) {
      file.delete();
      throw e;
    }
  }
Exemple #5
0
  private void openCompactionOutputFile(CompactionState compactionState)
      throws FileNotFoundException {
    Preconditions.checkNotNull(compactionState, "compactionState is null");
    Preconditions.checkArgument(
        compactionState.builder == null, "compactionState builder is not null");

    mutex.lock();
    try {
      long fileNumber = versions.getNextFileNumber();
      pendingOutputs.add(fileNumber);
      compactionState.currentFileNumber = fileNumber;
      compactionState.currentFileSize = 0;
      compactionState.currentSmallest = null;
      compactionState.currentLargest = null;

      File file = new File(databaseDir, Filename.tableFileName(fileNumber));
      compactionState.outfile = new FileOutputStream(file).getChannel();
      compactionState.builder =
          new TableBuilder(
              options, compactionState.outfile, new InternalUserComparator(internalKeyComparator));
    } finally {
      mutex.unlock();
    }
  }
Exemple #6
0
  private void makeRoomForWrite(boolean force) {
    Preconditions.checkState(mutex.isHeldByCurrentThread());

    boolean allowDelay = !force;

    while (true) {
      // todo background processing system need work
      //            if (!bg_error_.ok()) {
      //              // Yield previous error
      //              s = bg_error_;
      //              break;
      //            } else
      if (allowDelay && versions.numberOfFilesInLevel(0) > L0_SLOWDOWN_WRITES_TRIGGER) {
        // We are getting close to hitting a hard limit on the number of
        // L0 files.  Rather than delaying a single write by several
        // seconds when we hit the hard limit, start delaying each
        // individual write by 1ms to reduce latency variance.  Also,
        // this delay hands over some CPU to the compaction thread in
        // case it is sharing the same core as the writer.
        try {
          mutex.unlock();
          Thread.sleep(1);
        } catch (InterruptedException e) {
          Thread.currentThread().interrupt();
          throw new RuntimeException(e);
        } finally {
          mutex.lock();
        }

        // Do not delay a single write more than once
        allowDelay = false;
      } else if (!force && memTable.approximateMemoryUsage() <= options.writeBufferSize()) {
        // There is room in current memtable
        break;
      } else if (immutableMemTable != null) {
        // We have filled up the current memtable, but the previous
        // one is still being compacted, so we wait.
        backgroundCondition.awaitUninterruptibly();
      } else if (versions.numberOfFilesInLevel(0) >= L0_STOP_WRITES_TRIGGER) {
        // There are too many level-0 files.
        //                Log(options_.info_log, "waiting...\n");
        backgroundCondition.awaitUninterruptibly();
      } else {
        // Attempt to switch to a new memtable and trigger compaction of old
        Preconditions.checkState(versions.getPrevLogNumber() == 0);

        // close the existing log
        try {
          log.close();
        } catch (IOException e) {
          throw new RuntimeException("Unable to close log file " + log.getFile(), e);
        }

        // open a new log
        long logNumber = versions.getNextFileNumber();
        try {
          this.log =
              Logs.createLogWriter(
                  new File(databaseDir, Filename.logFileName(logNumber)), logNumber);
        } catch (IOException e) {
          throw new RuntimeException(
              "Unable to open new log file "
                  + new File(databaseDir, Filename.logFileName(logNumber)).getAbsoluteFile(),
              e);
        }

        // create a new mem table
        immutableMemTable = memTable;
        memTable = new MemTable(internalKeyComparator);

        // Do not force another compaction there is space available
        force = false;

        maybeScheduleCompaction();
      }
    }
  }
Exemple #7
0
  public DbImpl(Options options, File databaseDir) throws IOException {
    Preconditions.checkNotNull(options, "options is null");
    Preconditions.checkNotNull(databaseDir, "databaseDir is null");
    this.options = options;

    if (this.options.compressionType() == CompressionType.ZLIB && !Zlib.available()) {
      // There's little hope to continue.
      this.options.compressionType(CompressionType.NONE);
    }
    if (this.options.compressionType() == CompressionType.SNAPPY && !Snappy.available()) {
      // Disable snappy if it's not available.
      this.options.compressionType(CompressionType.NONE);
    }

    this.databaseDir = databaseDir;

    // use custom comparator if set
    DBComparator comparator = options.comparator();
    UserComparator userComparator;
    if (comparator != null) {
      userComparator = new CustomUserComparator(comparator);
    } else {
      userComparator = new BytewiseComparator();
    }
    internalKeyComparator = new InternalKeyComparator(userComparator);
    memTable = new MemTable(internalKeyComparator);
    immutableMemTable = null;

    ThreadFactory compactionThreadFactory =
        new ThreadFactoryBuilder()
            .setNameFormat("leveldb-compaction-%s")
            .setUncaughtExceptionHandler(
                new UncaughtExceptionHandler() {
                  @Override
                  public void uncaughtException(Thread t, Throwable e) {
                    // todo need a real UncaughtExceptionHandler
                    System.out.printf("%s%n", t);
                    e.printStackTrace();
                  }
                })
            .build();
    compactionExecutor = Executors.newSingleThreadExecutor(compactionThreadFactory);

    // Reserve ten files or so for other uses and give the rest to TableCache.
    int tableCacheSize = options.maxOpenFiles() - 10;
    tableCache =
        new TableCache(
            databaseDir,
            tableCacheSize,
            new InternalUserComparator(internalKeyComparator),
            options.verifyChecksums());

    // create the version set

    // create the database dir if it does not already exist
    databaseDir.mkdirs();
    Preconditions.checkArgument(
        databaseDir.exists(),
        "Database directory '%s' does not exist and could not be created",
        databaseDir);
    Preconditions.checkArgument(
        databaseDir.isDirectory(), "Database directory '%s' is not a directory", databaseDir);

    mutex.lock();
    try {
      // lock the database dir
      dbLock = new DbLock(new File(databaseDir, Filename.lockFileName()));

      // verify the "current" file
      File currentFile = new File(databaseDir, Filename.currentFileName());
      if (!currentFile.canRead()) {
        Preconditions.checkArgument(
            options.createIfMissing(),
            "Database '%s' does not exist and the create if missing option is disabled",
            databaseDir);
      } else {
        Preconditions.checkArgument(
            !options.errorIfExists(),
            "Database '%s' exists and the error if exists option is enabled",
            databaseDir);
      }

      versions = new VersionSet(databaseDir, tableCache, internalKeyComparator);

      // load  (and recover) current version
      versions.recover();

      // Recover from all newer log files than the ones named in the
      // descriptor (new log files may have been added by the previous
      // incarnation without registering them in the descriptor).
      //
      // Note that PrevLogNumber() is no longer used, but we pay
      // attention to it in case we are recovering a database
      // produced by an older version of leveldb.
      long minLogNumber = versions.getLogNumber();
      long previousLogNumber = versions.getPrevLogNumber();
      List<File> filenames = Filename.listFiles(databaseDir);

      List<Long> logs = Lists.newArrayList();
      for (File filename : filenames) {
        FileInfo fileInfo = Filename.parseFileName(filename);

        if (fileInfo != null
            && fileInfo.getFileType() == FileType.LOG
            && ((fileInfo.getFileNumber() >= minLogNumber)
                || (fileInfo.getFileNumber() == previousLogNumber))) {
          logs.add(fileInfo.getFileNumber());
        }
      }

      // Recover in the order in which the logs were generated
      VersionEdit edit = new VersionEdit();
      Collections.sort(logs);
      for (Long fileNumber : logs) {
        long maxSequence = recoverLogFile(fileNumber, edit);
        if (versions.getLastSequence() < maxSequence) {
          versions.setLastSequence(maxSequence);
        }
      }

      // open transaction log
      long logFileNumber = versions.getNextFileNumber();
      this.log =
          Logs.createLogWriter(
              new File(databaseDir, Filename.logFileName(logFileNumber)), logFileNumber);
      edit.setLogNumber(log.getFileNumber());

      // apply recovered edits
      versions.logAndApply(edit);

      // cleanup unused files
      deleteObsoleteFiles();

      // schedule compactions
      maybeScheduleCompaction();
    } finally {
      mutex.unlock();
    }
  }
Exemple #8
0
 public String getBlock() {
   return Filename.getPrefix(filename);
 }
Exemple #9
0
 public String getFilename() {
   return filename != null ? Filename.getFilename(filename) : "";
 }
Exemple #10
0
 public String fixPath(String value, String baseDir) {
   return Filename.findImagePath(value, baseDir, false);
 }
Exemple #11
0
 // public static boolean isPathField(int label) {
 // return Arrays.binarySearch(PATHS_FIELDS, label) >= 0;
 // }
 public String fixPath(String value) {
   return Filename.findImagePath(value, getBaseDir(), true);
 }
 /**
  * Returns the original filename for this document.
  *
  * @return the filename
  */
 public String getFilename() {
   Filename filename = getExtension(Filename.class);
   return filename == null ? null : filename.getValue();
 }