/** * Perform recovery on commit logs located in the directory specified by the config file. * * @return the number of mutations replayed */ public int recover() throws IOException { // If createReserveSegments is already flipped, the CLSM is running and recovery has already // taken place. if (allocator.createReserveSegments) return 0; // Allocator could be in the process of initial startup with 0 active and available segments. We // need to wait for // the allocation manager to finish allocation and add it to available segments so we don't get // an invalid response // on allocator.manages(...) below by grabbing a file off the filesystem before it's added to // the CLQ. allocator.allocatingFrom(); FilenameFilter unmanagedFilesFilter = new FilenameFilter() { public boolean accept(File dir, String name) { // we used to try to avoid instantiating commitlog (thus creating an empty segment ready // for writes) // until after recover was finished. this turns out to be fragile; it is less // error-prone to go // ahead and allow writes before recover(), and just skip active segments when we do. return CommitLogDescriptor.isValid(name) && !allocator.manages(name); } }; // submit all existing files in the commit log dir for archiving prior to recovery - // CASSANDRA-6904 for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles(unmanagedFilesFilter)) { archiver.maybeArchive(file.getPath(), file.getName()); archiver.maybeWaitForArchiving(file.getName()); } assert archiver.archivePending.isEmpty() : "Not all commit log archive tasks were completed before restore"; archiver.maybeRestoreArchive(); File[] files = new File(DatabaseDescriptor.getCommitLogLocation()).listFiles(unmanagedFilesFilter); int replayed = 0; if (files.length == 0) { logger.info("No commitlog files found; skipping replay"); } else { Arrays.sort(files, new CommitLogSegmentFileComparator()); logger.info("Replaying {}", StringUtils.join(files, ", ")); replayed = recover(files); logger.info("Log replay complete, {} replayed mutations", replayed); for (File f : files) allocator.recycleSegment(f); } allocator.enableReserveSegmentCreation(); return replayed; }
public static void recover() throws IOException { String directory = DatabaseDescriptor.getCommitLogLocation(); File[] files = new File(directory) .listFiles( new FilenameFilter() { public boolean accept(File dir, String name) { return CommitLogSegment.possibleCommitLogFile(name); } }); if (files.length == 0) return; Arrays.sort(files, new FileUtils.FileComparator()); logger.info("Replaying " + StringUtils.join(files, ", ")); recover(files); for (File f : files) { FileUtils.delete( CommitLogHeader.getHeaderPathFromSegmentPath( f.getAbsolutePath())); // may not actually exist if (!f.delete()) logger.error( "Unable to remove " + f + "; you should remove it manually or next restart will replay it again (harmless, but time-consuming)"); } logger.info("Log replay complete"); }
private static void cleanup() throws IOException { // clean up commitlog String[] directoryNames = { DatabaseDescriptor.getCommitLogLocation(), }; for (String dirName : directoryNames) { File dir = new File(dirName); if (!dir.exists()) { log.error("No such directory: " + dir.getAbsolutePath()); throw new RuntimeException("No such directory: " + dir.getAbsolutePath()); } FileUtils.deleteRecursive(dir); } // clean up data directory which are stored as data directory/table/data // files for (String dirName : DatabaseDescriptor.getAllDataFileLocations()) { File dir = new File(dirName); if (!dir.exists()) { log.error("No such directory: " + dir.getAbsolutePath()); throw new RuntimeException("No such directory: " + dir.getAbsolutePath()); } FileUtils.deleteRecursive(dir); } }
private static CommitLog construct() { CommitLog log = new CommitLog(DatabaseDescriptor.getCommitLogLocation(), CommitLogArchiver.construct()); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(log, new ObjectName("org.apache.cassandra.db:type=Commitlog")); } catch (Exception e) { throw new RuntimeException(e); } return log.start(); }
public void makeLog() throws IOException, InterruptedException { CommitLog commitLog = CommitLog.instance; System.out.format( "\nUsing commit log size %dmb, compressor %s, sync %s%s\n", mb(DatabaseDescriptor.getCommitLogSegmentSize()), commitLog.configuration.getCompressorName(), commitLog.executor.getClass().getSimpleName(), randomSize ? " random size" : ""); final List<CommitlogExecutor> threads = new ArrayList<>(); ScheduledExecutorService scheduled = startThreads(commitLog, threads); Thread.sleep(runTimeMs); stop = true; scheduled.shutdown(); scheduled.awaitTermination(2, TimeUnit.SECONDS); int hash = 0; int cells = 0; for (CommitlogExecutor t : threads) { t.join(); hash += t.hash; cells += t.cells; } commitLog.shutdownBlocking(); File dataDir = new File(CommitLogUpgradeTest.DATA_DIR + FBUtilities.getReleaseVersionString()); System.out.format("Data will be stored in %s\n", dataDir); if (dataDir.exists()) FileUtils.deleteRecursive(dataDir); dataDir.mkdirs(); for (File f : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles()) FileUtils.createHardLink(f, new File(dataDir, f.getName())); Properties prop = new Properties(); prop.setProperty(CFID_PROPERTY, Schema.instance.getId(KEYSPACE, TABLE).toString()); prop.setProperty(CELLS_PROPERTY, Integer.toString(cells)); prop.setProperty(HASH_PROPERTY, Integer.toString(hash)); prop.store( new FileOutputStream(new File(dataDir, PROPERTIES_FILE)), "CommitLog upgrade test, version " + FBUtilities.getReleaseVersionString()); System.out.println("Done"); }
public void maybeRestoreArchive() { if (Strings.isNullOrEmpty(restoreDirectories)) return; for (String dir : restoreDirectories.split(",")) { File[] files = new File(dir).listFiles(); if (files == null) { throw new RuntimeException("Unable to list director " + dir); } for (File fromFile : files) { File toFile = new File( DatabaseDescriptor.getCommitLogLocation(), new CommitLogDescriptor(CommitLogSegment.getNextId()).fileName()); String command = restoreCommand.replace("%from", fromFile.getPath()); command = command.replace("%to", toFile.getPath()); try { exec(command); } catch (IOException e) { throw new RuntimeException(e); } } } }
public static void recover() throws IOException { String directory = DatabaseDescriptor.getCommitLogLocation(); File[] files = new File(directory) .listFiles( new FilenameFilter() { public boolean accept(File dir, String name) { // we used to try to avoid instantiating commitlog (thus creating an empty // segment ready for writes) // until after recover was finished. this turns out to be fragile; it is less // error-prone to go // ahead and allow writes before recover(), and just skip active segments when // we do. return CommitLogSegment.possibleCommitLogFile(name) && !instance.manages(name); } }); if (files.length == 0) { logger.info("No commitlog files found; skipping replay"); return; } Arrays.sort(files, new FileUtils.FileComparator()); logger.info("Replaying " + StringUtils.join(files, ", ")); recover(files); for (File f : files) { FileUtils.delete( CommitLogHeader.getHeaderPathFromSegmentPath( f.getAbsolutePath())); // may not actually exist if (!f.delete()) logger.error( "Unable to remove " + f + "; you should remove it manually or next restart will replay it again (harmless, but time-consuming)"); } logger.info("Log replay complete"); }