public SuperColumn get_super_column( String table, String key, SuperColumnPath super_column_path, int consistency_level) throws InvalidRequestException, NotFoundException { if (logger.isDebugEnabled()) logger.debug("get_superColumn"); ThriftValidation.validateSuperColumnPath(table, super_column_path); ColumnFamily cfamily = readColumnFamily( new SliceByNamesReadCommand( table, key, new QueryPath(super_column_path.column_family), Arrays.asList(super_column_path.super_column)), consistency_level); if (cfamily == null) { throw new NotFoundException(); } Collection<IColumn> columns = cfamily.getSortedColumns(); if (columns == null || columns.size() == 0) { throw new NotFoundException(); } assert columns.size() == 1; IColumn column = columns.iterator().next(); if (column.getSubColumns().size() == 0) { throw new NotFoundException(); } return new SuperColumn(column.name(), thriftifyColumns(column.getSubColumns())); }
public static KSMetaData testMetadata( String name, Class<? extends AbstractReplicationStrategy> strategyClass, Map<String, String> strategyOptions, CFMetaData... cfDefs) { return new KSMetaData(name, strategyClass, strategyOptions, true, Arrays.asList(cfDefs)); }
public Column get_column(String table, String key, ColumnPath column_path, int consistency_level) throws InvalidRequestException, NotFoundException { if (logger.isDebugEnabled()) logger.debug("get_column"); ThriftValidation.validateColumnPath(table, column_path); QueryPath path = new QueryPath(column_path.column_family, column_path.super_column); ColumnFamily cfamily = readColumnFamily( new SliceByNamesReadCommand(table, key, path, Arrays.asList(column_path.column)), consistency_level); // TODO can we leverage getSlice here and just check that it returns one column? if (cfamily == null) { throw new NotFoundException(); } Collection<IColumn> columns = null; if (column_path.super_column != null) { IColumn column = cfamily.getColumn(column_path.super_column); if (column != null) { columns = column.getSubColumns(); } } else { columns = cfamily.getSortedColumns(); } if (columns == null || columns.size() == 0) { throw new NotFoundException(); } assert columns.size() == 1; IColumn column = columns.iterator().next(); if (column.isMarkedForDelete()) { throw new NotFoundException(); } return new Column(column.name(), column.value(), column.timestamp()); }
private LocalOrRemoteBlock getRemoteSubBlock( ByteBuffer blockId, ByteBuffer sblockId, int offset, ColumnParent subBlockDataPath) throws TimedOutException, UnavailableException, InvalidRequestException, NotFoundException { // The column name is the SubBlock id (UUID) ReadCommand rc = new SliceByNamesReadCommand( cfsKeyspace, blockId, subBlockDataPath, Arrays.asList(sblockId)); try { // CL=ONE as there are NOT multiple versions of the blocks. List<Row> rows = StorageProxy.read(Arrays.asList(rc), ConsistencyLevel.ONE); IColumn col = null; try { col = validateAndGetColumn(rows, sblockId); } catch (NotFoundException e) { // This is a best effort to get the value. Sometimes due to the size of // the sublocks, the normal replication may time out leaving a replicate without // the piece of data. Hence we re try with higher CL. rows = StorageProxy.read(Arrays.asList(rc), ConsistencyLevel.QUORUM); } col = validateAndGetColumn(rows, sblockId); ByteBuffer value = col.value(); if (value.remaining() < offset) throw new InvalidRequestException("Invalid offset for block of size: " + value.remaining()); LocalOrRemoteBlock block = new LocalOrRemoteBlock(); if (offset > 0) { ByteBuffer offsetBlock = value.duplicate(); offsetBlock.position(offsetBlock.position() + offset); block.setRemote_block(offsetBlock); } else { block.setRemote_block(value); } return block; } catch (IOException e) { throw new RuntimeException(e); } catch (TimeoutException e) { throw new TimedOutException(); } }
/** * Perform recovery on commit logs located in the directory specified by the config file. * * @return the number of mutations replayed */ public int recover() throws IOException { // If createReserveSegments is already flipped, the CLSM is running and recovery has already // taken place. if (allocator.createReserveSegments) return 0; // Allocator could be in the process of initial startup with 0 active and available segments. We // need to wait for // the allocation manager to finish allocation and add it to available segments so we don't get // an invalid response // on allocator.manages(...) below by grabbing a file off the filesystem before it's added to // the CLQ. allocator.allocatingFrom(); FilenameFilter unmanagedFilesFilter = new FilenameFilter() { public boolean accept(File dir, String name) { // we used to try to avoid instantiating commitlog (thus creating an empty segment ready // for writes) // until after recover was finished. this turns out to be fragile; it is less // error-prone to go // ahead and allow writes before recover(), and just skip active segments when we do. return CommitLogDescriptor.isValid(name) && !allocator.manages(name); } }; // submit all existing files in the commit log dir for archiving prior to recovery - // CASSANDRA-6904 for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles(unmanagedFilesFilter)) { archiver.maybeArchive(file.getPath(), file.getName()); archiver.maybeWaitForArchiving(file.getName()); } assert archiver.archivePending.isEmpty() : "Not all commit log archive tasks were completed before restore"; archiver.maybeRestoreArchive(); File[] files = new File(DatabaseDescriptor.getCommitLogLocation()).listFiles(unmanagedFilesFilter); int replayed = 0; if (files.length == 0) { logger.info("No commitlog files found; skipping replay"); } else { Arrays.sort(files, new CommitLogSegmentFileComparator()); logger.info("Replaying {}", StringUtils.join(files, ", ")); replayed = recover(files); logger.info("Log replay complete, {} replayed mutations", replayed); for (File f : files) allocator.recycleSegment(f); } allocator.enableReserveSegmentCreation(); return replayed; }
@Override public String toString() { return "SliceQueryFilter [reversed=" + reversed + ", slices=" + Arrays.toString(slices) + ", count=" + count + ", toGroup = " + compositesToGroup + "]"; }
public static KSMetaData systemKeyspace() { List<CFMetaData> cfDefs = Arrays.asList( CFMetaData.StatusCf, CFMetaData.HintsCf, CFMetaData.MigrationsCf, CFMetaData.SchemaCf, CFMetaData.IndexCf, CFMetaData.NodeIdCf, CFMetaData.VersionCf, CFMetaData.SchemaKeyspacesCf, CFMetaData.SchemaColumnFamiliesCf, CFMetaData.SchemaColumnsCf); return new KSMetaData(Table.SYSTEM_TABLE, LocalStrategy.class, optsWithRF(1), true, cfDefs); }
public static KSMetaData fromThrift(KsDef ksd, CFMetaData... cfDefs) throws ConfigurationException { Class<? extends AbstractReplicationStrategy> cls = AbstractReplicationStrategy.getClass(ksd.strategy_class); if (cls.equals(LocalStrategy.class)) throw new ConfigurationException( "Unable to use given strategy class: LocalStrategy is reserved for internal use."); return new KSMetaData( ksd.name, cls, ksd.strategy_options == null ? Collections.<String, String>emptyMap() : ksd.strategy_options, ksd.durable_writes, Arrays.asList(cfDefs)); }