public long estimatedKeys() { long n = 0; for (SSTableReader sstable : this) { n += sstable.estimatedKeys(); } return n; }
public SSTableSliceIterator( SSTableReader ssTable, String key, byte[] startColumn, byte[] finishColumn, boolean reversed) throws IOException { this.reversed = reversed; /* Morph key into actual key based on the partition type. */ DecoratedKey decoratedKey = ssTable.getPartitioner().decorateKey(key); FileDataInput fdi = ssTable.getFileDataInput( decoratedKey, DatabaseDescriptor.getSlicedReadBufferSizeInKB() * 1024); this.comparator = ssTable.getColumnComparator(); this.startColumn = startColumn; this.finishColumn = finishColumn; if (fdi != null) { // BIGDATA: move up here DecoratedKey keyInDisk = ssTable.getPartitioner().convertFromDiskFormat(fdi.readUTF()); assert keyInDisk.equals(decoratedKey) : String.format("%s != %s in %s", keyInDisk, decoratedKey, fdi.getPath()); dataSize = fdi.readInt(); // row data size dataStart = fdi.getAbsolutePosition(); rowFormat = ColumnFamily.serializer().deserializeRowFormat(fdi); realDataStart = fdi.getAbsolutePosition(); // !BIGDATA: branch out to the different code process for new rowformat/compression. if (ColumnFamily.serializer().isNewRowFormat(rowFormat)) { // new row format reader = new BigdataColumnGroupReader(ssTable, decoratedKey, fdi); } else { // old row format reader = new ColumnGroupReader(ssTable, decoratedKey, fdi); } } }
public synchronized void replace( Collection<SSTableReader> oldSSTables, Iterable<SSTableReader> replacements) throws IOException { Set<SSTableReader> sstablesNew = new HashSet<SSTableReader>(sstables); for (SSTableReader sstable : replacements) { assert sstable.getIndexPositions() != null; sstablesNew.add(sstable); long size = sstable.bytesOnDisk(); liveSize.addAndGet(size); totalSize.addAndGet(size); StorageService.instance.storageLiveSize.addAndGet(size); // BIGDATA StorageService.instance.storageTotalSize.addAndGet(size); // BIGDATA sstable.setTrackedBy(this); } for (SSTableReader sstable : oldSSTables) { boolean removed = sstablesNew.remove(sstable); // BIGDATA: // assert removed; // the old sstables in compacting may already be marked compacted by reset CF. if (removed) { sstable.markCompacted(); long bytesOnDisk = sstable.bytesOnDisk(); // BIGDATA liveSize.addAndGet(-bytesOnDisk); // BIGDATA StorageService.instance.storageLiveSize.addAndGet(-bytesOnDisk); // BIGDATA } else { logger.info( "The old sstable in compacting may already be marked compacted: " + sstable.getFilename()); } } sstables = Collections.unmodifiableSet(sstablesNew); updateCacheSizes(); }
public ColumnGroupReader(SSTableReader ssTable, DecoratedKey key, FileDataInput input) throws IOException { this.file = input; this.ssTable = ssTable; assert file.getAbsolutePosition() == realDataStart; // BIGDATA: some code move up. IndexHelper.skipBloomFilter(file); indexes = IndexHelper.deserializeIndex(file); emptyColumnFamily = ColumnFamily.serializer() .deserializeFromSSTableNoColumns(ssTable.makeColumnFamily(), file); file.readInt(); // column count file.mark(); curRangeIndex = IndexHelper.indexFor(startColumn, indexes, comparator, reversed); if (reversed && curRangeIndex == indexes.size()) curRangeIndex--; }
public static void main(String args[]) throws IOException { Options options = Options.parseArgs(args); try { // load keyspace descriptions. DatabaseDescriptor.loadSchemas(); String ksName = null; String cfName = null; Map<Descriptor, Set<Component>> parsedFilenames = new HashMap<Descriptor, Set<Component>>(); for (String filename : options.filenames) { File file = new File(filename); if (!file.exists()) { System.out.println("Skipping inexisting file " + file); continue; } Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file.getParentFile(), file.getName()); if (pair == null) { System.out.println("Skipping non sstable file " + file); continue; } Descriptor desc = pair.left; if (ksName == null) ksName = desc.ksname; else if (!ksName.equals(desc.ksname)) throw new IllegalArgumentException("All sstables must be part of the same keyspace"); if (cfName == null) cfName = desc.cfname; else if (!cfName.equals(desc.cfname)) throw new IllegalArgumentException("All sstables must be part of the same column family"); Set<Component> components = new HashSet<Component>( Arrays.asList( new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.COMPRESSION_INFO, Component.STATS })); Iterator<Component> iter = components.iterator(); while (iter.hasNext()) { Component component = iter.next(); if (!(new File(desc.filenameFor(component)).exists())) iter.remove(); } parsedFilenames.put(desc, components); } if (ksName == null || cfName == null) { System.err.println("No valid sstables to split"); System.exit(1); } // Do not load sstables since they might be broken Table table = Table.openWithoutSSTables(ksName); ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName); String snapshotName = "pre-split-" + System.currentTimeMillis(); List<SSTableReader> sstables = new ArrayList<SSTableReader>(); for (Map.Entry<Descriptor, Set<Component>> fn : parsedFilenames.entrySet()) { try { SSTableReader sstable = SSTableReader.openNoValidation(fn.getKey(), fn.getValue(), cfs.metadata); sstables.add(sstable); if (options.snapshot) { File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName); sstable.createLinks(snapshotDirectory.getPath()); } } catch (Exception e) { System.err.println(String.format("Error Loading %s: %s", fn.getKey(), e.getMessage())); if (options.debug) e.printStackTrace(System.err); } } if (options.snapshot) System.out.println( String.format("Pre-split sstables snapshotted into snapshot %s", snapshotName)); cfs.getDataTracker().markCompacting(sstables); for (SSTableReader sstable : sstables) { try { new SSTableSplitter(cfs, sstable, options.sizeInMB).split(); // Remove the sstable sstable.markCompacted(); sstable.releaseReference(); } catch (Exception e) { System.err.println(String.format("Error splitting %s: %s", sstable, e.getMessage())); if (options.debug) e.printStackTrace(System.err); } } SSTableDeletingTask.waitForDeletions(); System.exit(0); // We need that to stop non daemonized threads } catch (Exception e) { System.err.println(e.getMessage()); if (options.debug) e.printStackTrace(System.err); System.exit(1); } }
public static void main(String args[]) throws IOException { Options options = Options.parseArgs(args); try { // load keyspace descriptions. DatabaseDescriptor.loadSchemas(false); if (Schema.instance.getCFMetaData(options.keyspace, options.cf) == null) throw new IllegalArgumentException( String.format("Unknown keyspace/columnFamily %s.%s", options.keyspace, options.cf)); Keyspace keyspace = Keyspace.openWithoutSSTables(options.keyspace); ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(options.cf); OutputHandler handler = new OutputHandler.SystemOutput(false, options.debug); Directories.SSTableLister lister = cfs.directories.sstableLister(); if (options.snapshot != null) lister.onlyBackups(true).snapshots(options.snapshot); else lister.includeBackups(false); Collection<SSTableReader> readers = new ArrayList<SSTableReader>(); // Upgrade sstables for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) { Set<Component> components = entry.getValue(); if (!components.contains(Component.DATA) || !components.contains(Component.PRIMARY_INDEX)) continue; try { SSTableReader sstable = SSTableReader.openNoValidation(entry.getKey(), components, cfs.metadata); if (sstable.descriptor.version.equals(Descriptor.Version.CURRENT)) continue; readers.add(sstable); } catch (Exception e) { JVMStabilityInspector.inspectThrowable(e); System.err.println(String.format("Error Loading %s: %s", entry.getKey(), e.getMessage())); if (options.debug) e.printStackTrace(System.err); continue; } } int numSSTables = readers.size(); handler.output("Found " + numSSTables + " sstables that need upgrading."); for (SSTableReader sstable : readers) { try { Upgrader upgrader = new Upgrader(cfs, sstable, handler); upgrader.upgrade(); if (!options.keepSource) { // Remove the sstable (it's been copied by upgrade) System.out.format("Deleting table %s.%n", sstable.descriptor.baseFilename()); sstable.markObsolete(); sstable.selfRef().release(); } } catch (Exception e) { System.err.println(String.format("Error upgrading %s: %s", sstable, e.getMessage())); if (options.debug) e.printStackTrace(System.err); } } CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES); SSTableDeletingTask.waitForDeletions(); System.exit(0); } catch (Exception e) { System.err.println(e.getMessage()); if (options.debug) e.printStackTrace(System.err); System.exit(1); } }
public BigdataColumnGroupReader(SSTableReader ssTable, DecoratedKey key, FileDataInput input) throws IOException { this.file = input; this.ssTable = ssTable; assert file.getAbsolutePosition() == realDataStart; if (ColumnFamily.serializer().isNewRowFormatIndexAtEnd(rowFormat)) { ////// HEADER ////// // skip bloom filter IndexHelper.skipBloomFilter(file); // read deletion meta info emptyColumnFamily = ColumnFamily.serializer() .deserializeFromSSTableNoColumns(ssTable.makeColumnFamily(), file); file.readInt(); // column count // the position of the first block firstBlockPos = file.getAbsolutePosition(); ////// TRAILER ////// // seek to the trailer // THE FIRST SEEK!!! file.seek(dataStart + dataSize - (Integer.SIZE / Byte.SIZE)); // index size (with column index size's int) int indexSize = file.readInt(); ////// INDEX ////// // seek to index position // THE SECOND SEEK!!! file.seek(dataStart + dataSize - (Integer.SIZE / Byte.SIZE) - indexSize); // read index into memory indexes = IndexHelper.deserializeIndex(file); } else { // skip bloom filter IndexHelper.skipBloomFilter(file); // read in index indexes = IndexHelper.deserializeIndex(file); // read deletion meta info emptyColumnFamily = ColumnFamily.serializer() .deserializeFromSSTableNoColumns(ssTable.makeColumnFamily(), file); file.readInt(); // column count // the position of the first block firstBlockPos = file.getAbsolutePosition(); } curRangeIndex = IndexHelper.indexFor(startColumn, indexes, comparator, reversed); if (reversed && curRangeIndex == indexes.size()) curRangeIndex--; // compression algorithm used when writing Compression.Algorithm compressAlgo; try { compressAlgo = Compression.getCompressionAlgorithmById( ColumnFamily.serializer().getNewRowFormatCompressAlgo(rowFormat)); } catch (IllegalArgumentException e) { logger.error(e.toString()); throw new IOException(e); } compressContext = ColumnFamilySerializer.CompressionContext.getInstance(compressAlgo); }