Пример #1
0
 @Override
 public Map<String, Double> getActiveSegmentCompressionRatios() {
   Map<String, Double> segmentRatios = new TreeMap<>();
   for (CommitLogSegment segment : allocator.getActiveSegments())
     segmentRatios.put(segment.getName(), 1.0 * segment.onDiskSize() / segment.contentSize());
   return segmentRatios;
 }
Пример #2
0
  /**
   * Modifies the per-CF dirty cursors of any commit log segments for the column family according to
   * the position given. Discards any commit log segments that are no longer used.
   *
   * @param cfId the column family ID that was flushed
   * @param context the replay position of the flush
   */
  public void discardCompletedSegments(final UUID cfId, final ReplayPosition context) {
    logger.trace("discard completed log segments for {}, table {}", context, cfId);

    // Go thru the active segment files, which are ordered oldest to newest, marking the
    // flushed CF as clean, until we reach the segment file containing the ReplayPosition passed
    // in the arguments. Any segments that become unused after they are marked clean will be
    // recycled or discarded.
    for (Iterator<CommitLogSegment> iter = allocator.getActiveSegments().iterator();
        iter.hasNext(); ) {
      CommitLogSegment segment = iter.next();
      segment.markClean(cfId, context);

      if (segment.isUnused()) {
        logger.trace("Commit log segment {} is unused", segment);
        allocator.recycleSegment(segment);
      } else {
        logger.trace(
            "Not safe to delete{} commit log segment {}; dirty is {}",
            (iter.hasNext() ? "" : " active"),
            segment,
            segment.dirtyString());
      }

      // Don't mark or try to delete any newer segments once we've reached the one containing the
      // position of the flush.
      if (segment.contains(context)) break;
    }
  }
Пример #3
0
 /** Forces a disk flush on the commit log files that need it. Blocking. */
 public void sync(boolean syncAllSegments) {
   CommitLogSegment current = allocator.allocatingFrom();
   for (CommitLogSegment segment : allocator.getActiveSegments()) {
     if (!syncAllSegments && segment.id > current.id) return;
     segment.sync();
   }
 }
  /**
   * Delete log segments whose contents have been turned into SSTables. NOT threadsafe.
   *
   * <p>param @ context The commitLog context . param @ id id of the columnFamily being flushed to
   * disk.
   */
  private void discardCompletedSegmentsInternal(
      CommitLogSegment.CommitLogContext context, Integer id) throws IOException {
    if (logger.isDebugEnabled())
      logger.debug("discard completed log segments for " + context + ", column family " + id + ".");

    /*
     * log replay assumes that we only have to look at entries past the last
     * flush position, so verify that this flush happens after the last. See CASSANDRA-936
     */
    assert context.position >= context.getSegment().getHeader().getPosition(id)
        : "discard at "
            + context
            + " is not after last flush at "
            + context.getSegment().getHeader().getPosition(id);
    /*
     * Loop through all the commit log files in the history. Now process
     * all files that are older than the one in the context. For each of
     * these files the header needs to modified by resetting the dirty
     * bit corresponding to the flushed CF.
     */
    Iterator<CommitLogSegment> iter = segments.iterator();
    while (iter.hasNext()) {
      CommitLogSegment segment = iter.next();
      CommitLogHeader header = segment.getHeader();
      if (segment.equals(context.getSegment())) {
        // we can't just mark the segment where the flush happened clean,
        // since there may have been writes to it between when the flush
        // started and when it finished. so mark the flush position as
        // the replay point for this CF, instead.
        if (logger.isDebugEnabled())
          logger.debug("Marking replay position " + context.position + " on commit log " + segment);
        header.turnOn(id, context.position);
        segment.writeHeader();
        break;
      }

      header.turnOff(id);
      if (header.isSafeToDelete()) {
        logger.info("Discarding obsolete commit log:" + segment);
        segment.close();
        DeletionService.submitDelete(segment.getHeaderPath());
        DeletionService.submitDelete(segment.getPath());
        // usually this will be the first (remaining) segment, but not always, if segment A contains
        // writes to a CF that is unflushed but is followed by segment B whose CFs are all flushed.
        iter.remove();
      } else {
        if (logger.isDebugEnabled())
          logger.debug(
              "Not safe to delete commit log " + segment + "; dirty is " + header.dirtyString());
        segment.writeHeader();
      }
    }
  }
Пример #5
0
 private boolean manages(String name) {
   for (CommitLogSegment segment : segments) {
     if (segment.getPath().endsWith(name)) return true;
   }
   return false;
 }
Пример #6
0
 @Override
 public long getActiveContentSize() {
   long size = 0;
   for (CommitLogSegment segment : allocator.getActiveSegments()) size += segment.contentSize();
   return size;
 }
Пример #7
0
 public List<String> getActiveSegmentNames() {
   List<String> segmentNames = new ArrayList<>();
   for (CommitLogSegment segment : allocator.getActiveSegments())
     segmentNames.add(segment.getName());
   return segmentNames;
 }