Exemplo n.º 1
0
 public static long getMinRepairedAt(Set<SSTableReader> actuallyCompact) {
   long minRepairedAt = Long.MAX_VALUE;
   for (SSTableReader sstable : actuallyCompact)
     minRepairedAt = Math.min(minRepairedAt, sstable.getSSTableMetadata().repairedAt);
   if (minRepairedAt == Long.MAX_VALUE) return ActiveRepairService.UNREPAIRED_SSTABLE;
   return minRepairedAt;
 }
  @Test
  public void testGetTemporaryFilesThrowsIfCompletingAfterObsoletion() throws Throwable {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstable = sstable(dataFolder, cfs, 0, 128);

    LogTransaction logs = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(logs);

    LogTransaction.SSTableTidier tidier = logs.obsoleted(sstable);

    sstable.markObsolete(tidier);
    sstable.selfRef().release();

    LogTransaction.waitForDeletions();

    try {
      // This should race with the asynchronous deletion of txn log files
      // it should throw because we are violating the requirement that a transaction must
      // finish before deleting files (i.e. releasing sstables)
      getTemporaryFiles(dataFolder);
      fail("Expected runtime exception");
    } catch (RuntimeException e) {
      // pass as long as the cause is not an assertion
      assertFalse(e.getCause() instanceof AssertionError);
    }

    logs.finish();
  }
 void assertInProgress() throws Exception {
   assertFiles(
       dataFolder.getPath(),
       Sets.newHashSet(
           Iterables.concat(
               sstableNew.getAllFilePaths(),
               sstableOld.getAllFilePaths(),
               txnLogs.logFilePaths())));
 }
      protected Throwable doAbort(Throwable accumulate) {
        tidier.abort();
        LogTransaction.waitForDeletions();

        Throwable ret = txnLogs.abort(accumulate);

        sstableNew.selfRef().release();
        sstableOld.selfRef().release();
        return ret;
      }
      protected Throwable doCommit(Throwable accumulate) {
        sstableOld.markObsolete(tidier);
        sstableOld.selfRef().release();
        LogTransaction.waitForDeletions();

        Throwable ret = txnLogs.commit(accumulate);

        sstableNew.selfRef().release();
        return ret;
      }
  @Test
  public void testCommitSameDesc() throws Throwable {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstableOld1 = sstable(dataFolder, cfs, 0, 128);
    SSTableReader sstableOld2 = sstable(dataFolder, cfs, 0, 256);
    SSTableReader sstableNew = sstable(dataFolder, cfs, 1, 128);

    LogTransaction log = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(log);

    log.trackNew(sstableNew);

    sstableOld1.setReplaced();

    LogTransaction.SSTableTidier tidier = log.obsoleted(sstableOld2);
    assertNotNull(tidier);

    log.finish();

    sstableOld2.markObsolete(tidier);

    sstableOld1.selfRef().release();
    sstableOld2.selfRef().release();

    assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));

    sstableNew.selfRef().release();
  }
Exemplo n.º 7
0
 public static SSTableReader sstable(
     int generation, int size, boolean keepRef, ColumnFamilyStore cfs) {
   Descriptor descriptor =
       new Descriptor(
           cfs.getDirectories().getDirectoryForNewSSTables(),
           cfs.keyspace.getName(),
           cfs.getColumnFamilyName(),
           generation);
   Set<Component> components =
       ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
   for (Component component : components) {
     File file = new File(descriptor.filenameFor(component));
     try {
       file.createNewFile();
     } catch (IOException e) {
     }
   }
   if (size > 0) {
     try {
       File file = new File(descriptor.filenameFor(Component.DATA));
       try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
         raf.setLength(size);
       }
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
   }
   SerializationHeader header = SerializationHeader.make(cfs.metadata, Collections.emptyList());
   StatsMetadata metadata =
       (StatsMetadata)
           new MetadataCollector(cfs.metadata.comparator)
               .finalizeMetadata(
                   cfs.metadata.partitioner.getClass().getCanonicalName(), 0.01f, -1, header)
               .get(MetadataType.STATS);
   SSTableReader reader =
       SSTableReader.internalOpen(
           descriptor,
           components,
           cfs.metadata,
           segmentedFile.sharedCopy(),
           segmentedFile.sharedCopy(),
           indexSummary.sharedCopy(),
           new AlwaysPresentFilter(),
           1L,
           metadata,
           SSTableReader.OpenReason.NORMAL,
           header);
   reader.first = reader.last = readerBounds(generation);
   if (!keepRef) reader.selfRef().release();
   return reader;
 }
 /** Gets the estimated total amount of data to write during compaction */
 private static long getTotalWriteSize(
     Iterable<SSTableReader> nonExpiredSSTables,
     long estimatedTotalKeys,
     ColumnFamilyStore cfs,
     OperationType compactionType) {
   long estimatedKeysBeforeCompaction = 0;
   for (SSTableReader sstable : nonExpiredSSTables)
     estimatedKeysBeforeCompaction += sstable.estimatedKeys();
   estimatedKeysBeforeCompaction = Math.max(1, estimatedKeysBeforeCompaction);
   double estimatedCompactionRatio = (double) estimatedTotalKeys / estimatedKeysBeforeCompaction;
   return Math.round(
       estimatedCompactionRatio
           * cfs.getExpectedCompactedFileSize(nonExpiredSSTables, compactionType));
 }
  private static SSTableReader sstable(
      File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException {
    Descriptor descriptor =
        new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), generation);
    Set<Component> components =
        ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
    for (Component component : components) {
      File file = new File(descriptor.filenameFor(component));
      if (!file.exists()) assertTrue(file.createNewFile());
      try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
        raf.setLength(size);
      }
    }

    SegmentedFile dFile =
        new BufferedSegmentedFile(
            new ChannelProxy(new File(descriptor.filenameFor(Component.DATA))),
            RandomAccessReader.DEFAULT_BUFFER_SIZE,
            0);
    SegmentedFile iFile =
        new BufferedSegmentedFile(
            new ChannelProxy(new File(descriptor.filenameFor(Component.PRIMARY_INDEX))),
            RandomAccessReader.DEFAULT_BUFFER_SIZE,
            0);

    SerializationHeader header = SerializationHeader.make(cfs.metadata, Collections.emptyList());
    StatsMetadata metadata =
        (StatsMetadata)
            new MetadataCollector(cfs.metadata.comparator)
                .finalizeMetadata(
                    cfs.metadata.partitioner.getClass().getCanonicalName(), 0.01f, -1, header)
                .get(MetadataType.STATS);
    SSTableReader reader =
        SSTableReader.internalOpen(
            descriptor,
            components,
            cfs.metadata,
            dFile,
            iFile,
            MockSchema.indexSummary.sharedCopy(),
            new AlwaysPresentFilter(),
            1L,
            metadata,
            SSTableReader.OpenReason.NORMAL,
            header);
    reader.first = reader.last = MockSchema.readerBounds(generation);
    return reader;
  }
  @Test
  public void testAbortOnlyNew() throws Throwable {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstable = sstable(dataFolder, cfs, 0, 128);

    LogTransaction log = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(log);

    log.trackNew(sstable);
    log.abort();

    sstable.selfRef().release();

    assertFiles(dataFolder.getPath(), new HashSet<>());
  }
  @Test
  public void testCommitOnlyOld() throws Throwable {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstable = sstable(dataFolder, cfs, 0, 128);

    LogTransaction log = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(log);

    LogTransaction.SSTableTidier tidier = log.obsoleted(sstable);
    assertNotNull(tidier);

    log.finish();
    sstable.markObsolete(tidier);
    sstable.selfRef().release();

    assertFiles(dataFolder.getPath(), new HashSet<>());
  }
  @Test
  public void testGetTemporaryFilesSafeAfterObsoletion() throws Throwable {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstable = sstable(dataFolder, cfs, 0, 128);

    LogTransaction logs = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(logs);

    LogTransaction.SSTableTidier tidier = logs.obsoleted(sstable);

    logs.finish();

    sstable.markObsolete(tidier);
    sstable.selfRef().release();

    // This should race with the asynchronous deletion of txn log files
    // It doesn't matter what it returns but it should not throw because the txn
    // was completed before deleting files (i.e. releasing sstables)
    for (int i = 0; i < 200; i++) getTemporaryFiles(dataFolder);
  }
  @Test
  public void testUntrack() throws Throwable {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstableNew = sstable(dataFolder, cfs, 1, 128);

    // complete a transaction without keep the new files since they were untracked
    LogTransaction log = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(log);

    log.trackNew(sstableNew);
    log.untrackNew(sstableNew);

    log.finish();

    sstableNew.selfRef().release();
    Thread.sleep(1);
    LogTransaction.waitForDeletions();

    assertFiles(dataFolder.getPath(), Collections.<String>emptySet());
  }
Exemplo n.º 14
0
  public boolean shouldInclude(SSTableReader sstable) {
    List<ByteBuffer> minColumnNames = sstable.getSSTableMetadata().minColumnNames;
    List<ByteBuffer> maxColumnNames = sstable.getSSTableMetadata().maxColumnNames;
    CellNameType comparator = sstable.metadata.comparator;

    if (minColumnNames.isEmpty() || maxColumnNames.isEmpty()) return true;

    for (ColumnSlice slice : slices)
      if (slice.intersects(minColumnNames, maxColumnNames, comparator, reversed)) return true;

    return false;
  }
Exemplo n.º 15
0
 /**
  * update a reader: if !original, this is a reader that is being introduced by this transaction;
  * otherwise it must be in the originals() set, i.e. a reader guarded by this transaction
  */
 public void update(SSTableReader reader, boolean original) {
   assert !staged.update.contains(reader)
       : "each reader may only be updated once per checkpoint: " + reader;
   assert !identities.contains(reader.instanceId)
       : "each reader instance may only be provided as an update once: " + reader;
   // check it isn't obsolete, and that it matches the original flag
   assert !(logged.obsolete.contains(reader) || staged.obsolete.contains(reader))
       : "may not update a reader that has been obsoleted";
   assert original == originals.contains(reader)
       : String.format(
           "the 'original' indicator was incorrect (%s provided): %s", original, reader);
   staged.update.add(reader);
   identities.add(reader.instanceId);
   if (!isOffline()) reader.setupKeyCache();
 }
  private static void testCorruptRecord(
      BiConsumer<LogTransaction, SSTableReader> modifier, boolean isRecoverable)
      throws IOException {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstableOld = sstable(dataFolder, cfs, 0, 128);
    SSTableReader sstableNew = sstable(dataFolder, cfs, 1, 128);

    // simulate tracking sstables with a committed transaction except the checksum will be wrong
    LogTransaction log = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(log);

    log.trackNew(sstableNew);
    log.obsoleted(sstableOld);

    // Modify the transaction log or disk state for sstableOld
    modifier.accept(log, sstableOld);

    assertNull(log.complete(null));

    sstableOld.selfRef().release();
    sstableNew.selfRef().release();

    // The files on disk, for old files make sure to exclude the files that were deleted by the
    // modifier
    Set<String> newFiles = sstableNew.getAllFilePaths().stream().collect(Collectors.toSet());
    Set<String> oldFiles =
        sstableOld
            .getAllFilePaths()
            .stream()
            .filter(p -> new File(p).exists())
            .collect(Collectors.toSet());

    // This should filter as in progress since the last record is corrupt
    assertFiles(newFiles, getTemporaryFiles(dataFolder));
    assertFiles(oldFiles, getFinalFiles(dataFolder));

    if (isRecoverable) { // the corruption is recoverable but the commit record is unreadable so the
      // transaction is still in progress

      // This should remove new files
      LogTransaction.removeUnfinishedLeftovers(cfs.metadata);

      // make sure to exclude the old files that were deleted by the modifier
      assertFiles(dataFolder.getPath(), oldFiles);
    } else { // if an intermediate line was also modified, it should ignore the tx log file

      // This should not remove any files
      LogTransaction.removeUnfinishedLeftovers(cfs.metadata);

      assertFiles(
          dataFolder.getPath(),
          Sets.newHashSet(Iterables.concat(newFiles, oldFiles, log.logFilePaths())));
    }
  }
  private static void testObsoletedFilesChanged(Consumer<SSTableReader> modifier)
      throws IOException {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstableOld = sstable(dataFolder, cfs, 0, 128);
    SSTableReader sstableNew = sstable(dataFolder, cfs, 1, 128);

    // simulate tracking sstables with a committed transaction except the checksum will be wrong
    LogTransaction log = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(log);

    log.trackNew(sstableNew);
    /*TransactionLog.SSTableTidier tidier =*/ log.obsoleted(sstableOld);

    // modify the old sstable files
    modifier.accept(sstableOld);

    // Fake a commit
    log.txnFile().commit();

    // This should not remove the old files
    LogTransaction.removeUnfinishedLeftovers(cfs.metadata);

    assertFiles(
        dataFolder.getPath(),
        Sets.newHashSet(
            Iterables.concat(
                sstableNew.getAllFilePaths(), sstableOld.getAllFilePaths(), log.logFilePaths())));

    sstableOld.selfRef().release();
    sstableNew.selfRef().release();

    // complete the transaction to avoid LEAK errors
    assertNull(log.complete(null));

    assertFiles(
        dataFolder.getPath(),
        Sets.newHashSet(
            Iterables.concat(
                sstableNew.getAllFilePaths(), sstableOld.getAllFilePaths(), log.logFilePaths())));
  }
  @Test
  public void testRemoveUnfinishedLeftovers_commit() throws Throwable {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstableOld = sstable(dataFolder, cfs, 0, 128);
    SSTableReader sstableNew = sstable(dataFolder, cfs, 1, 128);

    // simulate tracking sstables with a committed transaction (new log file deleted)
    LogTransaction log = new LogTransaction(OperationType.COMPACTION);
    assertNotNull(log);

    log.trackNew(sstableNew);
    LogTransaction.SSTableTidier tidier = log.obsoleted(sstableOld);

    // Fake a commit
    log.txnFile().commit();

    Set<File> tmpFiles =
        sstableOld.getAllFilePaths().stream().map(File::new).collect(Collectors.toSet());

    sstableNew.selfRef().release();
    sstableOld.selfRef().release();

    Assert.assertEquals(tmpFiles, getTemporaryFiles(sstableOld.descriptor.directory));

    // normally called at startup
    LogTransaction.removeUnfinishedLeftovers(cfs.metadata);

    // sstableNew should be only table left
    Directories directories = new Directories(cfs.metadata);
    Map<Descriptor, Set<Component>> sstables =
        directories.sstableLister(Directories.OnTxnErr.THROW).list();
    assertEquals(1, sstables.size());

    assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));

    // complete the transaction to avoid LEAK errors
    tidier.run();
    assertNull(log.complete(null));
  }
Exemplo n.º 19
0
  /**
   * Given arguments specifying an SSTable, and optionally an output file, export the contents of
   * the SSTable to JSON.
   *
   * @param args command lines arguments
   * @throws ConfigurationException on configuration failure (wrong params given)
   */
  public static void main(String[] args) throws ConfigurationException {
    CommandLineParser parser = new PosixParser();
    try {
      cmd = parser.parse(options, args);
    } catch (ParseException e1) {
      System.err.println(e1.getMessage());
      printUsage();
      System.exit(1);
    }

    if (cmd.getArgs().length != 1) {
      System.err.println("You must supply exactly one sstable");
      printUsage();
      System.exit(1);
    }

    String[] keys = cmd.getOptionValues(KEY_OPTION);
    HashSet<String> excludes =
        new HashSet<>(
            Arrays.asList(
                cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null
                    ? new String[0]
                    : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
    String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();

    if (Descriptor.isLegacyFile(new File(ssTableFileName))) {
      System.err.println("Unsupported legacy sstable");
      System.exit(1);
    }
    if (!new File(ssTableFileName).exists()) {
      System.err.println("Cannot find file " + ssTableFileName);
      System.exit(1);
    }
    Descriptor desc = Descriptor.fromFilename(ssTableFileName);
    try {
      CFMetaData metadata = metadataFromSSTable(desc);
      if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
        JsonTransformer.keysToJson(
            null,
            iterToStream(new KeyIterator(desc, metadata)),
            cmd.hasOption(RAW_TIMESTAMPS),
            metadata,
            System.out);
      } else {
        SSTableReader sstable = SSTableReader.openNoValidation(desc, metadata);
        IPartitioner partitioner = sstable.getPartitioner();
        final ISSTableScanner currentScanner;
        if ((keys != null) && (keys.length > 0)) {
          List<AbstractBounds<PartitionPosition>> bounds =
              Arrays.stream(keys)
                  .filter(key -> !excludes.contains(key))
                  .map(metadata.getKeyValidator()::fromString)
                  .map(partitioner::decorateKey)
                  .sorted()
                  .map(DecoratedKey::getToken)
                  .map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound()))
                  .collect(Collectors.toList());
          currentScanner = sstable.getScanner(bounds.iterator());
        } else {
          currentScanner = sstable.getScanner();
        }
        Stream<UnfilteredRowIterator> partitions =
            iterToStream(currentScanner)
                .filter(
                    i ->
                        excludes.isEmpty()
                            || !excludes.contains(
                                metadata.getKeyValidator().getString(i.partitionKey().getKey())));
        if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
          AtomicLong position = new AtomicLong();
          partitions.forEach(
              partition -> {
                position.set(currentScanner.getCurrentPosition());

                if (!partition.partitionLevelDeletion().isLive()) {
                  System.out.println(
                      "["
                          + metadata.getKeyValidator().getString(partition.partitionKey().getKey())
                          + "]@"
                          + position.get()
                          + " "
                          + partition.partitionLevelDeletion());
                }
                if (!partition.staticRow().isEmpty()) {
                  System.out.println(
                      "["
                          + metadata.getKeyValidator().getString(partition.partitionKey().getKey())
                          + "]@"
                          + position.get()
                          + " "
                          + partition.staticRow().toString(metadata, true));
                }
                partition.forEachRemaining(
                    row -> {
                      System.out.println(
                          "["
                              + metadata
                                  .getKeyValidator()
                                  .getString(partition.partitionKey().getKey())
                              + "]@"
                              + position.get()
                              + " "
                              + row.toString(metadata, false, true));
                      position.set(currentScanner.getCurrentPosition());
                    });
              });
        } else {
          JsonTransformer.toJson(
              currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
        }
      }
    } catch (IOException e) {
      // throwing exception outside main with broken pipe causes windows cmd to hang
      e.printStackTrace(System.err);
    }

    System.exit(0);
  }
Exemplo n.º 20
0
 public OnDiskAtomIterator getSSTableColumnIterator(
     SSTableReader sstable, FileDataInput file, DecoratedKey key, RowIndexEntry indexEntry) {
   return sstable.iterator(file, key, slices, reversed, indexEntry);
 }
Exemplo n.º 21
0
 public OnDiskAtomIterator getSSTableColumnIterator(SSTableReader sstable, DecoratedKey key) {
   return sstable.iterator(key, slices, reversed);
 }
  @Test
  public void testGetTemporaryFiles() throws IOException {
    ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
    File dataFolder = new Directories(cfs.metadata).getDirectoryForNewSSTables();
    SSTableReader sstable1 = sstable(dataFolder, cfs, 0, 128);

    Set<File> tmpFiles = getTemporaryFiles(dataFolder);
    assertNotNull(tmpFiles);
    assertEquals(0, tmpFiles.size());

    try (LogTransaction log = new LogTransaction(OperationType.WRITE)) {
      Directories directories = new Directories(cfs.metadata);

      File[] beforeSecondSSTable = dataFolder.listFiles(pathname -> !pathname.isDirectory());

      SSTableReader sstable2 = sstable(dataFolder, cfs, 1, 128);
      log.trackNew(sstable2);

      Map<Descriptor, Set<Component>> sstables =
          directories.sstableLister(Directories.OnTxnErr.THROW).list();
      assertEquals(2, sstables.size());

      // this should contain sstable1, sstable2 and the transaction log file
      File[] afterSecondSSTable = dataFolder.listFiles(pathname -> !pathname.isDirectory());

      int numNewFiles = afterSecondSSTable.length - beforeSecondSSTable.length;
      assertEquals(
          numNewFiles - 1,
          sstable2.getAllFilePaths().size()); // new files except for transaction log file

      tmpFiles = getTemporaryFiles(dataFolder);
      assertNotNull(tmpFiles);
      assertEquals(numNewFiles - 1, tmpFiles.size());

      File ssTable2DataFile = new File(sstable2.descriptor.filenameFor(Component.DATA));
      File ssTable2IndexFile = new File(sstable2.descriptor.filenameFor(Component.PRIMARY_INDEX));

      assertTrue(tmpFiles.contains(ssTable2DataFile));
      assertTrue(tmpFiles.contains(ssTable2IndexFile));

      List<File> files = directories.sstableLister(Directories.OnTxnErr.THROW).listFiles();
      List<File> filesNoTmp =
          directories.sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true).listFiles();
      assertNotNull(files);
      assertNotNull(filesNoTmp);

      assertTrue(files.contains(ssTable2DataFile));
      assertTrue(files.contains(ssTable2IndexFile));

      assertFalse(filesNoTmp.contains(ssTable2DataFile));
      assertFalse(filesNoTmp.contains(ssTable2IndexFile));

      log.finish();

      // Now it should be empty since the transaction has finished
      tmpFiles = getTemporaryFiles(dataFolder);
      assertNotNull(tmpFiles);
      assertEquals(0, tmpFiles.size());

      filesNoTmp =
          directories.sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true).listFiles();
      assertNotNull(filesNoTmp);
      assertTrue(filesNoTmp.contains(ssTable2DataFile));
      assertTrue(filesNoTmp.contains(ssTable2IndexFile));

      sstable1.selfRef().release();
      sstable2.selfRef().release();
    }
  }
 protected AbstractTransactionalTest.TestableTransaction newTest() throws Exception {
   LogTransaction.waitForDeletions();
   SSTableReader.resetTidying();
   return new TxnTest();
 }
 void assertAborted() throws Exception {
   assertFiles(dataFolder.getPath(), new HashSet<>(sstableOld.getAllFilePaths()));
 }
 void assertCommitted() throws Exception {
   assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths()));
 }
Exemplo n.º 26
0
  /**
   * For internal use and testing only. The rest of the system should go through the submit*
   * methods, which are properly serialized. Caller is in charge of marking/unmarking the sstables
   * as compacting.
   */
  protected void runMayThrow() throws Exception {
    // The collection of sstables passed may be empty (but not null); even if
    // it is not empty, it may compact down to nothing if all rows are deleted.
    assert transaction != null;

    if (transaction.originals().isEmpty()) return;

    // Note that the current compaction strategy, is not necessarily the one this task was created
    // under.
    // This should be harmless; see comments to CFS.maybeReloadCompactionStrategy.
    AbstractCompactionStrategy strategy = cfs.getCompactionStrategy();

    if (DatabaseDescriptor.isSnapshotBeforeCompaction())
      cfs.snapshotWithoutFlush(System.currentTimeMillis() + "-compact-" + cfs.name);

    // note that we need to do a rough estimate early if we can fit the compaction on disk - this is
    // pessimistic, but
    // since we might remove sstables from the compaction in checkAvailableDiskSpace it needs to be
    // done here
    long expectedWriteSize =
        cfs.getExpectedCompactedFileSize(transaction.originals(), compactionType);
    long earlySSTableEstimate = Math.max(1, expectedWriteSize / strategy.getMaxSSTableBytes());
    checkAvailableDiskSpace(earlySSTableEstimate, expectedWriteSize);

    // sanity check: all sstables must belong to the same cfs
    assert !Iterables.any(
        transaction.originals(),
        new Predicate<SSTableReader>() {
          @Override
          public boolean apply(SSTableReader sstable) {
            return !sstable.descriptor.cfname.equals(cfs.name);
          }
        });

    UUID taskId = SystemKeyspace.startCompaction(cfs, transaction.originals());

    // new sstables from flush can be added during a compaction, but only the compaction can remove
    // them,
    // so in our single-threaded compaction world this is a valid way of determining if we're
    // compacting
    // all the sstables (that existed when we started)
    StringBuilder ssTableLoggerMsg = new StringBuilder("[");
    for (SSTableReader sstr : transaction.originals()) {
      ssTableLoggerMsg.append(
          String.format("%s:level=%d, ", sstr.getFilename(), sstr.getSSTableLevel()));
    }
    ssTableLoggerMsg.append("]");
    String taskIdLoggerMsg = taskId == null ? UUIDGen.getTimeUUID().toString() : taskId.toString();
    logger.info("Compacting ({}) {}", taskIdLoggerMsg, ssTableLoggerMsg);

    long start = System.nanoTime();

    long totalKeysWritten = 0;

    long estimatedKeys = 0;
    try (CompactionController controller = getCompactionController(transaction.originals())) {
      Set<SSTableReader> actuallyCompact =
          Sets.difference(transaction.originals(), controller.getFullyExpiredSSTables());

      SSTableFormat.Type sstableFormat = getFormatType(transaction.originals());

      List<SSTableReader> newSStables;
      AbstractCompactionIterable ci;

      // SSTableScanners need to be closed before markCompactedSSTablesReplaced call as scanners
      // contain references
      // to both ifile and dfile and SSTR will throw deletion errors on Windows if it tries to
      // delete before scanner is closed.
      // See CASSANDRA-8019 and CASSANDRA-8399
      try (Refs<SSTableReader> refs = Refs.ref(actuallyCompact);
          AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(actuallyCompact)) {
        ci =
            new CompactionIterable(
                compactionType, scanners.scanners, controller, sstableFormat, taskId);
        try (CloseableIterator<AbstractCompactedRow> iter = ci.iterator()) {
          if (collector != null) collector.beginCompaction(ci);
          long lastCheckObsoletion = start;

          if (!controller.cfs.getCompactionStrategy().isActive)
            throw new CompactionInterruptedException(ci.getCompactionInfo());

          try (CompactionAwareWriter writer =
              getCompactionAwareWriter(cfs, transaction, actuallyCompact)) {
            estimatedKeys = writer.estimatedKeys();
            while (iter.hasNext()) {
              if (ci.isStopRequested())
                throw new CompactionInterruptedException(ci.getCompactionInfo());

              try (AbstractCompactedRow row = iter.next()) {
                if (writer.append(row)) totalKeysWritten++;

                if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L)) {
                  controller.maybeRefreshOverlaps();
                  lastCheckObsoletion = System.nanoTime();
                }
              }
            }

            // don't replace old sstables yet, as we need to mark the compaction finished in the
            // system table
            newSStables = writer.finish();
          } finally {
            // point of no return -- the new sstables are live on disk; next we'll start deleting
            // the old ones
            // (in replaceCompactedSSTables)
            if (taskId != null) SystemKeyspace.finishCompaction(taskId);

            if (collector != null) collector.finishCompaction(ci);
          }
        }
      }

      // log a bunch of statistics about the result and save to system table compaction_history
      long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
      long startsize = SSTableReader.getTotalBytes(transaction.originals());
      long endsize = SSTableReader.getTotalBytes(newSStables);
      double ratio = (double) endsize / (double) startsize;

      StringBuilder newSSTableNames = new StringBuilder();
      for (SSTableReader reader : newSStables)
        newSSTableNames.append(reader.descriptor.baseFilename()).append(",");

      double mbps = dTime > 0 ? (double) endsize / (1024 * 1024) / ((double) dTime / 1000) : 0;
      long totalSourceRows = 0;
      String mergeSummary =
          updateCompactionHistory(
              cfs.keyspace.getName(), cfs.getColumnFamilyName(), ci, startsize, endsize);
      logger.info(
          String.format(
              "Compacted (%s) %d sstables to [%s] to level=%d.  %,d bytes to %,d (~%d%% of original) in %,dms = %fMB/s.  %,d total partitions merged to %,d.  Partition merge counts were {%s}",
              taskIdLoggerMsg,
              transaction.originals().size(),
              newSSTableNames.toString(),
              getLevel(),
              startsize,
              endsize,
              (int) (ratio * 100),
              dTime,
              mbps,
              totalSourceRows,
              totalKeysWritten,
              mergeSummary));
      logger.debug(
          String.format(
              "CF Total Bytes Compacted: %,d", CompactionTask.addToTotalBytesCompacted(endsize)));
      logger.debug(
          "Actual #keys: {}, Estimated #keys:{}, Err%: {}",
          totalKeysWritten,
          estimatedKeys,
          ((double) (totalKeysWritten - estimatedKeys) / totalKeysWritten));

      if (offline) Refs.release(Refs.selfRefs(newSStables));
    }
  }