コード例 #1
0
  public List<Row> fetchPage(int pageSize)
      throws RequestValidationException, RequestExecutionException {
    if (isExhausted()) return Collections.emptyList();

    int currentPageSize = nextPageSize(pageSize);
    List<Row> rows = filterEmpty(queryNextPage(currentPageSize, consistencyLevel, localQuery));

    if (rows.isEmpty()) {
      logger.debug("Got empty set of rows, considering pager exhausted");
      exhausted = true;
      return Collections.emptyList();
    }

    int liveCount = getPageLiveCount(rows);
    logger.debug("Fetched {} live rows", liveCount);

    // Because SP.getRangeSlice doesn't trim the result (see SP.trim()), liveCount may be greater
    // than what asked
    // (currentPageSize). This would throw off the paging logic so we trim the excess. It's not
    // extremely efficient
    // but most of the time there should be nothing or very little to trim.
    if (liveCount > currentPageSize) {
      rows = discardLast(rows, liveCount - currentPageSize);
      liveCount = currentPageSize;
    }

    remaining -= liveCount;

    // If we've got less than requested, there is no more query to do (but
    // we still need to return the current page)
    if (liveCount < currentPageSize) {
      logger.debug(
          "Got result ({}) smaller than page size ({}), considering pager exhausted",
          liveCount,
          currentPageSize);
      exhausted = true;
    }

    // If it's not the first query and the first column is the last one returned (likely
    // but not certain since paging can race with deletes/expiration), then remove the
    // first column.
    if (containsPreviousLast(rows.get(0))) {
      rows = discardFirst(rows);
      remaining++;
    }
    // Otherwise, if 'lastWasRecorded', we queried for one more than the page size,
    // so if the page is full, trim the last entry
    else if (lastWasRecorded && !exhausted) {
      // We've asked for one more than necessary
      rows = discardLast(rows);
      remaining++;
    }

    logger.debug("Remaining rows to page: {}", remaining);

    if (!isExhausted()) lastWasRecorded = recordLast(rows.get(rows.size() - 1));

    return rows;
  }
コード例 #2
0
  @VisibleForTesting
  List<Row> discardLast(List<Row> rows, int toDiscard) {
    if (toDiscard == 0 || rows.isEmpty()) return rows;

    int i = rows.size() - 1;
    DecoratedKey lastKey = null;
    ColumnFamily lastCf = null;
    while (toDiscard > 0 && i >= 0) {
      Row last = rows.get(i--);
      lastKey = last.key;
      lastCf = last.cf.cloneMeShallow(isReversed());
      toDiscard -=
          isReversed()
              ? discardFirst(last.cf, toDiscard, lastCf)
              : discardLast(last.cf, toDiscard, lastCf);
    }

    // If there is less live data than to discard, all is discarded
    if (toDiscard > 0) return Collections.<Row>emptyList();

    // i is the index of the last row that we are sure to keep. On top of that,
    // we also keep lastCf is it hasn't been fully emptied by the last iteration above.
    int count = lastCf.getColumnCount();
    int newSize = count == 0 ? i + 1 : i + 2;
    List<Row> newRows = new ArrayList<Row>(newSize);
    newRows.addAll(rows.subList(0, i + 1));
    if (count != 0) newRows.add(new Row(lastKey, lastCf));

    return newRows;
  }
コード例 #3
0
ファイル: CompactionsTest.java プロジェクト: placrosse/ACaZoo
  @Test
  public void testEchoedRow() throws IOException, ExecutionException, InterruptedException {
    // This test check that EchoedRow doesn't skipp rows: see CASSANDRA-2653

    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard2");

    // disable compaction while flushing
    cfs.disableAutoCompaction();

    // Insert 4 keys in two sstables. We need the sstables to have 2 rows
    // at least to trigger what was causing CASSANDRA-2653
    for (int i = 1; i < 5; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      RowMutation rm = new RowMutation(KEYSPACE1, key.key);
      rm.add(
          "Standard2",
          ByteBufferUtil.bytes(String.valueOf(i)),
          ByteBufferUtil.EMPTY_BYTE_BUFFER,
          i);
      rm.apply();

      if (i % 2 == 0) cfs.forceBlockingFlush();
    }
    Collection<SSTableReader> toCompact = cfs.getSSTables();
    assert toCompact.size() == 2;

    // Reinserting the same keys. We will compact only the previous sstable, but we need those new
    // ones
    // to make sure we use EchoedRow, otherwise it won't be used because purge can be done.
    for (int i = 1; i < 5; i++) {
      DecoratedKey key = Util.dk(String.valueOf(i));
      RowMutation rm = new RowMutation(KEYSPACE1, key.key);
      rm.add(
          "Standard2",
          ByteBufferUtil.bytes(String.valueOf(i)),
          ByteBufferUtil.EMPTY_BYTE_BUFFER,
          i);
      rm.apply();
    }
    cfs.forceBlockingFlush();
    SSTableReader tmpSSTable = null;
    for (SSTableReader sstable : cfs.getSSTables())
      if (!toCompact.contains(sstable)) tmpSSTable = sstable;
    assert tmpSSTable != null;

    // Force compaction on first sstables. Since each row is in only one sstable, we will be using
    // EchoedRow.
    Util.compact(cfs, toCompact);
    assertEquals(2, cfs.getSSTables().size());

    // Now, we remove the sstable that was just created to force the use of EchoedRow (so that it
    // doesn't hide the problem)
    cfs.markObsolete(Collections.singleton(tmpSSTable), OperationType.UNKNOWN);
    assertEquals(1, cfs.getSSTables().size());

    // Now assert we do have the 4 keys
    assertEquals(4, Util.getRangeSlice(cfs).size());
  }
コード例 #4
0
    public SplittingCompactionTask(
        ColumnFamilyStore cfs, SSTableReader sstable, int sstableSizeInMB) {
      super(cfs, Collections.singletonList(sstable), CompactionManager.NO_GC);
      this.sstableSizeInMB = sstableSizeInMB;

      if (sstableSizeInMB <= 0)
        throw new IllegalArgumentException(
            "Invalid target size for SSTables, must be > 0 (got: " + sstableSizeInMB + ")");
    }
コード例 #5
0
ファイル: KSMetaData.java プロジェクト: rcamus/platform
  // For new user created keyspaces (through CQL)
  public static KSMetaData newKeyspace(
      String name, String strategyName, Map<String, String> options) throws ConfigurationException {
    Class<? extends AbstractReplicationStrategy> cls =
        AbstractReplicationStrategy.getClass(strategyName);
    if (cls.equals(LocalStrategy.class))
      throw new ConfigurationException(
          "Unable to use given strategy class: LocalStrategy is reserved for internal use.");

    return new KSMetaData(name, cls, options, true, Collections.<CFMetaData>emptyList());
  }
コード例 #6
0
ファイル: KSMetaData.java プロジェクト: rcamus/platform
  public KSMetaData reloadAttributes() throws IOException {
    Row ksDefRow = SystemTable.readSchemaRow(name);

    if (ksDefRow.cf == null)
      throw new IOException(
          String.format(
              "%s not found in the schema definitions table (%s).",
              name, SystemTable.SCHEMA_KEYSPACES_CF));

    return fromSchema(ksDefRow, Collections.<CFMetaData>emptyList());
  }
コード例 #7
0
 public static SSTableReader sstable(
     int generation, int size, boolean keepRef, ColumnFamilyStore cfs) {
   Descriptor descriptor =
       new Descriptor(
           cfs.getDirectories().getDirectoryForNewSSTables(),
           cfs.keyspace.getName(),
           cfs.getColumnFamilyName(),
           generation);
   Set<Component> components =
       ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
   for (Component component : components) {
     File file = new File(descriptor.filenameFor(component));
     try {
       file.createNewFile();
     } catch (IOException e) {
     }
   }
   if (size > 0) {
     try {
       File file = new File(descriptor.filenameFor(Component.DATA));
       try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
         raf.setLength(size);
       }
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
   }
   SerializationHeader header = SerializationHeader.make(cfs.metadata, Collections.emptyList());
   StatsMetadata metadata =
       (StatsMetadata)
           new MetadataCollector(cfs.metadata.comparator)
               .finalizeMetadata(
                   cfs.metadata.partitioner.getClass().getCanonicalName(), 0.01f, -1, header)
               .get(MetadataType.STATS);
   SSTableReader reader =
       SSTableReader.internalOpen(
           descriptor,
           components,
           cfs.metadata,
           segmentedFile.sharedCopy(),
           segmentedFile.sharedCopy(),
           indexSummary.sharedCopy(),
           new AlwaysPresentFilter(),
           1L,
           metadata,
           SSTableReader.OpenReason.NORMAL,
           header);
   reader.first = reader.last = readerBounds(generation);
   if (!keepRef) reader.selfRef().release();
   return reader;
 }
コード例 #8
0
ファイル: Util.java プロジェクト: hobinyoon/mutants-cassandra
  /** Creates initial set of nodes and tokens. Nodes are added to StorageService as 'normal' */
  public static void createInitialRing(
      StorageService ss,
      IPartitioner partitioner,
      List<Token> endpointTokens,
      List<Token> keyTokens,
      List<InetAddress> hosts,
      List<UUID> hostIds,
      int howMany)
      throws UnknownHostException {
    // Expand pool of host IDs as necessary
    for (int i = hostIdPool.size(); i < howMany; i++) hostIdPool.add(UUID.randomUUID());

    for (int i = 0; i < howMany; i++) {
      endpointTokens.add(new BigIntegerToken(String.valueOf(10 * i)));
      keyTokens.add(new BigIntegerToken(String.valueOf(10 * i + 5)));
      hostIds.add(hostIdPool.get(i));
    }

    for (int i = 0; i < endpointTokens.size(); i++) {
      InetAddress ep = InetAddress.getByName("127.0.0." + String.valueOf(i + 1));
      Gossiper.instance.initializeNodeUnsafe(ep, hostIds.get(i), 1);
      Gossiper.instance.injectApplicationState(
          ep,
          ApplicationState.TOKENS,
          new VersionedValue.VersionedValueFactory(partitioner)
              .tokens(Collections.singleton(endpointTokens.get(i))));
      ss.onChange(
          ep,
          ApplicationState.STATUS,
          new VersionedValue.VersionedValueFactory(partitioner)
              .normal(Collections.singleton(endpointTokens.get(i))));
      hosts.add(ep);
    }

    // check that all nodes are in token metadata
    for (int i = 0; i < endpointTokens.size(); ++i)
      assertTrue(ss.getTokenMetadata().isMember(hosts.get(i)));
  }
コード例 #9
0
ファイル: KSMetaData.java プロジェクト: rcamus/platform
 KSMetaData(
     String name,
     Class<? extends AbstractReplicationStrategy> strategyClass,
     Map<String, String> strategyOptions,
     boolean durableWrites,
     Iterable<CFMetaData> cfDefs) {
   this.name = name;
   this.strategyClass = strategyClass == null ? NetworkTopologyStrategy.class : strategyClass;
   this.strategyOptions = strategyOptions;
   Map<String, CFMetaData> cfmap = new HashMap<String, CFMetaData>();
   for (CFMetaData cfm : cfDefs) cfmap.put(cfm.cfName, cfm);
   this.cfMetaData = Collections.unmodifiableMap(cfmap);
   this.durableWrites = durableWrites;
 }
コード例 #10
0
ファイル: KSMetaData.java プロジェクト: rcamus/platform
  public static KSMetaData fromThrift(KsDef ksd, CFMetaData... cfDefs)
      throws ConfigurationException {
    Class<? extends AbstractReplicationStrategy> cls =
        AbstractReplicationStrategy.getClass(ksd.strategy_class);
    if (cls.equals(LocalStrategy.class))
      throw new ConfigurationException(
          "Unable to use given strategy class: LocalStrategy is reserved for internal use.");

    return new KSMetaData(
        ksd.name,
        cls,
        ksd.strategy_options == null
            ? Collections.<String, String>emptyMap()
            : ksd.strategy_options,
        ksd.durable_writes,
        Arrays.asList(cfDefs));
  }
コード例 #11
0
  public List<Column> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder) {
    if (columns == null || columns.isEmpty()) {
      return EMPTY_COLUMNS;
    }

    ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
    for (IColumn column : columns) {
      if (column.isMarkedForDelete()) {
        continue;
      }
      Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
      thriftColumns.add(thrift_column);
    }

    // we have to do the reversing here, since internally we pass results around in ColumnFamily
    // objects, which always sort their columns in the "natural" order
    if (reverseOrder) Collections.reverse(thriftColumns);
    return thriftColumns;
  }
コード例 #12
0
  private List<SuperColumn> thriftifySuperColumns(
      Collection<IColumn> columns, boolean reverseOrder) {
    if (columns == null || columns.isEmpty()) {
      return EMPTY_SUPERCOLUMNS;
    }

    ArrayList<SuperColumn> thriftSuperColumns = new ArrayList<SuperColumn>(columns.size());
    for (IColumn column : columns) {
      List<Column> subcolumns = thriftifyColumns(column.getSubColumns());
      if (subcolumns.isEmpty()) {
        continue;
      }
      thriftSuperColumns.add(new SuperColumn(column.name(), subcolumns));
    }

    if (reverseOrder) Collections.reverse(thriftSuperColumns);

    return thriftSuperColumns;
  }
コード例 #13
0
ファイル: KSMetaData.java プロジェクト: rcamus/platform
  /**
   * Deserialize ColumnFamilies from low-level schema representation, all of them belong to the same
   * keyspace
   *
   * @param row
   * @return map containing name of the ColumnFamily and it's metadata for faster lookup
   */
  public static Map<String, CFMetaData> deserializeColumnFamilies(Row row) {
    if (row.cf == null) return Collections.emptyMap();

    Map<String, CFMetaData> cfms = new HashMap<String, CFMetaData>();
    UntypedResultSet results =
        QueryProcessor.resultify("SELECT * FROM system.schema_columnfamilies", row);
    for (UntypedResultSet.Row result : results) {
      CFMetaData cfm = CFMetaData.fromSchema(result);
      cfms.put(cfm.cfName, cfm);
    }

    for (CFMetaData cfm : cfms.values()) {
      Row columnRow = ColumnDefinition.readSchema(cfm.ksName, cfm.cfName);
      for (ColumnDefinition cd : ColumnDefinition.fromSchema(columnRow, cfm))
        cfm.column_metadata.put(cd.name, cd);
    }

    return cfms;
  }
コード例 #14
0
/** Author : Avinash Lakshman ( [email protected]) & Prashant Malik ( [email protected] ) */
public class CassandraServer implements Cassandra.Iface {
  private static Logger logger = Logger.getLogger(CassandraServer.class);

  private static final List<Column> EMPTY_COLUMNS = Collections.emptyList();
  private static final List<SuperColumn> EMPTY_SUPERCOLUMNS = Collections.emptyList();

  /*
   * Handle to the storage service to interact with the other machines in the
   * cluster.
   */
  protected StorageService storageService;

  public CassandraServer() {
    storageService = StorageService.instance();
  }

  /*
   * The start function initializes the server and start's listening on the
   * specified port.
   */
  public void start() throws IOException {
    LogUtil.init();
    // LogUtil.setLogLevel("com.facebook", "DEBUG");
    // Start the storage service
    storageService.start();
  }

  protected ColumnFamily readColumnFamily(ReadCommand command, int consistency_level)
      throws InvalidRequestException {
    String cfName = command.getColumnFamilyName();
    ThriftValidation.validateKey(command.key);

    if (consistency_level == ConsistencyLevel.ZERO) {
      throw new InvalidRequestException(
          "Consistency level zero may not be applied to read operations");
    }
    if (consistency_level == ConsistencyLevel.ALL) {
      throw new InvalidRequestException(
          "Consistency level all is not yet supported on read operations");
    }

    Row row;
    try {
      row = StorageProxy.readProtocol(command, consistency_level);
    } catch (IOException e) {
      throw new RuntimeException(e);
    } catch (TimeoutException e) {
      throw new RuntimeException(e);
    }

    if (row == null) {
      return null;
    }
    return row.getColumnFamily(cfName);
  }

  public List<Column> thriftifyColumns(Collection<IColumn> columns) {
    return thriftifyColumns(columns, false);
  }

  public List<Column> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder) {
    if (columns == null || columns.isEmpty()) {
      return EMPTY_COLUMNS;
    }

    ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
    for (IColumn column : columns) {
      if (column.isMarkedForDelete()) {
        continue;
      }
      Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
      thriftColumns.add(thrift_column);
    }

    // we have to do the reversing here, since internally we pass results around in ColumnFamily
    // objects, which always sort their columns in the "natural" order
    if (reverseOrder) Collections.reverse(thriftColumns);
    return thriftColumns;
  }

  /** for resultsets of standard columns */
  private List<Column> getSlice(ReadCommand command, int consistency_level)
      throws InvalidRequestException {
    ColumnFamily cfamily = readColumnFamily(command, consistency_level);
    boolean reverseOrder = false;

    if (command instanceof SliceFromReadCommand)
      reverseOrder = !((SliceFromReadCommand) command).isAscending;

    if (cfamily == null || cfamily.getColumnsMap().size() == 0) {
      return EMPTY_COLUMNS;
    }
    if (cfamily.isSuper()) {
      IColumn column = cfamily.getColumnsMap().values().iterator().next();
      return thriftifyColumns(column.getSubColumns(), reverseOrder);
    }
    return thriftifyColumns(cfamily.getSortedColumns(), reverseOrder);
  }

  public List<Column> get_slice_by_names(
      String table,
      String key,
      ColumnParent column_parent,
      List<byte[]> column_names,
      int consistency_level)
      throws InvalidRequestException, NotFoundException {
    if (logger.isDebugEnabled()) logger.debug("get_slice_by_names");
    ThriftValidation.validateColumnParent(table, column_parent);
    return getSlice(
        new SliceByNamesReadCommand(table, key, column_parent, column_names), consistency_level);
  }

  public List<Column> get_slice(
      String table,
      String key,
      ColumnParent column_parent,
      byte[] start,
      byte[] finish,
      boolean is_ascending,
      int count,
      int consistency_level)
      throws InvalidRequestException, NotFoundException {
    if (logger.isDebugEnabled()) logger.debug("get_slice_from");
    ThriftValidation.validateColumnParent(table, column_parent);
    // TODO support get_slice on super CFs
    if (count <= 0) throw new InvalidRequestException("get_slice requires positive count");

    return getSlice(
        new SliceFromReadCommand(table, key, column_parent, start, finish, is_ascending, count),
        consistency_level);
  }

  public Column get_column(String table, String key, ColumnPath column_path, int consistency_level)
      throws InvalidRequestException, NotFoundException {
    if (logger.isDebugEnabled()) logger.debug("get_column");
    ThriftValidation.validateColumnPath(table, column_path);

    QueryPath path = new QueryPath(column_path.column_family, column_path.super_column);
    ColumnFamily cfamily =
        readColumnFamily(
            new SliceByNamesReadCommand(table, key, path, Arrays.asList(column_path.column)),
            consistency_level);
    // TODO can we leverage getSlice here and just check that it returns one column?
    if (cfamily == null) {
      throw new NotFoundException();
    }
    Collection<IColumn> columns = null;
    if (column_path.super_column != null) {
      IColumn column = cfamily.getColumn(column_path.super_column);
      if (column != null) {
        columns = column.getSubColumns();
      }
    } else {
      columns = cfamily.getSortedColumns();
    }
    if (columns == null || columns.size() == 0) {
      throw new NotFoundException();
    }

    assert columns.size() == 1;
    IColumn column = columns.iterator().next();
    if (column.isMarkedForDelete()) {
      throw new NotFoundException();
    }

    return new Column(column.name(), column.value(), column.timestamp());
  }

  public int get_column_count(
      String table, String key, ColumnParent column_parent, int consistency_level)
      throws InvalidRequestException {
    if (logger.isDebugEnabled()) logger.debug("get_column_count");
    // validateColumnParent assumes we require simple columns; g_c_c is the only
    // one of the columnParent-taking apis that can also work at the SC level.
    // so we roll a one-off validator here.
    String cfType = ThriftValidation.validateColumnFamily(table, column_parent.column_family);
    if (cfType.equals("Standard") && column_parent.super_column != null) {
      throw new InvalidRequestException(
          "columnfamily alone is required for standard CF " + column_parent.column_family);
    }

    ColumnFamily cfamily;
    cfamily =
        readColumnFamily(
            new SliceFromReadCommand(
                table,
                key,
                column_parent,
                ArrayUtils.EMPTY_BYTE_ARRAY,
                ArrayUtils.EMPTY_BYTE_ARRAY,
                true,
                Integer.MAX_VALUE),
            consistency_level);
    if (cfamily == null) {
      return 0;
    }
    Collection<IColumn> columns = null;
    if (column_parent.super_column != null) {
      IColumn column = cfamily.getColumn(column_parent.super_column);
      if (column != null) {
        columns = column.getSubColumns();
      }
    } else {
      columns = cfamily.getSortedColumns();
    }
    if (columns == null || columns.size() == 0) {
      return 0;
    }
    return columns.size();
  }

  public void insert(
      String table,
      String key,
      ColumnPath column_path,
      byte[] value,
      long timestamp,
      int consistency_level)
      throws InvalidRequestException, UnavailableException {
    if (logger.isDebugEnabled()) logger.debug("insert");
    ThriftValidation.validateKey(key);
    ThriftValidation.validateColumnPath(table, column_path);

    RowMutation rm = new RowMutation(table, key.trim());
    try {
      rm.add(new QueryPath(column_path), value, timestamp);
    } catch (MarshalException e) {
      throw new InvalidRequestException(e.getMessage());
    }
    doInsert(consistency_level, rm);
  }

  public void batch_insert(String table, BatchMutation batch_mutation, int consistency_level)
      throws InvalidRequestException, UnavailableException {
    if (logger.isDebugEnabled()) logger.debug("batch_insert");
    RowMutation rm = RowMutation.getRowMutation(table, batch_mutation);
    Set<String> cfNames = rm.columnFamilyNames();
    ThriftValidation.validateKeyCommand(
        rm.key(), rm.table(), cfNames.toArray(new String[cfNames.size()]));

    doInsert(consistency_level, rm);
  }

  public void remove(
      String table,
      String key,
      ColumnPathOrParent column_path_or_parent,
      long timestamp,
      int consistency_level)
      throws InvalidRequestException, UnavailableException {
    if (logger.isDebugEnabled()) logger.debug("remove");
    ThriftValidation.validateColumnPathOrParent(table, column_path_or_parent);

    RowMutation rm = new RowMutation(table, key.trim());
    rm.delete(new QueryPath(column_path_or_parent), timestamp);

    doInsert(consistency_level, rm);
  }

  private void doInsert(int consistency_level, RowMutation rm) throws UnavailableException {
    if (consistency_level != ConsistencyLevel.ZERO) {
      StorageProxy.insertBlocking(rm, consistency_level);
    } else {
      StorageProxy.insert(rm);
    }
  }

  public List<SuperColumn> get_slice_super_by_names(
      String table,
      String key,
      String column_family,
      List<byte[]> super_column_names,
      int consistency_level)
      throws InvalidRequestException {
    if (logger.isDebugEnabled()) logger.debug("get_slice_super_by_names");
    ThriftValidation.validateColumnFamily(table, column_family);

    ColumnFamily cfamily =
        readColumnFamily(
            new SliceByNamesReadCommand(
                table, key, new QueryPath(column_family), super_column_names),
            consistency_level);
    if (cfamily == null) {
      return EMPTY_SUPERCOLUMNS;
    }
    return thriftifySuperColumns(cfamily.getSortedColumns());
  }

  private List<SuperColumn> thriftifySuperColumns(Collection<IColumn> columns) {
    return thriftifySuperColumns(columns, false);
  }

  private List<SuperColumn> thriftifySuperColumns(
      Collection<IColumn> columns, boolean reverseOrder) {
    if (columns == null || columns.isEmpty()) {
      return EMPTY_SUPERCOLUMNS;
    }

    ArrayList<SuperColumn> thriftSuperColumns = new ArrayList<SuperColumn>(columns.size());
    for (IColumn column : columns) {
      List<Column> subcolumns = thriftifyColumns(column.getSubColumns());
      if (subcolumns.isEmpty()) {
        continue;
      }
      thriftSuperColumns.add(new SuperColumn(column.name(), subcolumns));
    }

    if (reverseOrder) Collections.reverse(thriftSuperColumns);

    return thriftSuperColumns;
  }

  public List<SuperColumn> get_slice_super(
      String table,
      String key,
      String column_family,
      byte[] start,
      byte[] finish,
      boolean is_ascending,
      int count,
      int consistency_level)
      throws InvalidRequestException {
    if (logger.isDebugEnabled()) logger.debug("get_slice_super");
    if (!DatabaseDescriptor.getColumnFamilyType(table, column_family).equals("Super"))
      throw new InvalidRequestException("get_slice_super requires a super CF name");
    if (count <= 0) throw new InvalidRequestException("get_slice_super requires positive count");

    ColumnFamily cfamily =
        readColumnFamily(
            new SliceFromReadCommand(
                table, key, new QueryPath(column_family), start, finish, is_ascending, count),
            consistency_level);
    if (cfamily == null) {
      return EMPTY_SUPERCOLUMNS;
    }
    Collection<IColumn> columns = cfamily.getSortedColumns();
    return thriftifySuperColumns(columns, !is_ascending);
  }

  public SuperColumn get_super_column(
      String table, String key, SuperColumnPath super_column_path, int consistency_level)
      throws InvalidRequestException, NotFoundException {
    if (logger.isDebugEnabled()) logger.debug("get_superColumn");
    ThriftValidation.validateSuperColumnPath(table, super_column_path);

    ColumnFamily cfamily =
        readColumnFamily(
            new SliceByNamesReadCommand(
                table,
                key,
                new QueryPath(super_column_path.column_family),
                Arrays.asList(super_column_path.super_column)),
            consistency_level);
    if (cfamily == null) {
      throw new NotFoundException();
    }
    Collection<IColumn> columns = cfamily.getSortedColumns();
    if (columns == null || columns.size() == 0) {
      throw new NotFoundException();
    }

    assert columns.size() == 1;
    IColumn column = columns.iterator().next();
    if (column.getSubColumns().size() == 0) {
      throw new NotFoundException();
    }

    return new SuperColumn(column.name(), thriftifyColumns(column.getSubColumns()));
  }

  public void batch_insert_super_column(
      String table, BatchMutationSuper batch_mutation_super, int consistency_level)
      throws InvalidRequestException, UnavailableException {
    if (logger.isDebugEnabled()) logger.debug("batch_insert_SuperColumn");
    RowMutation rm = RowMutation.getRowMutation(table, batch_mutation_super);
    Set<String> cfNames = rm.columnFamilyNames();
    ThriftValidation.validateKeyCommand(
        rm.key(), rm.table(), cfNames.toArray(new String[cfNames.size()]));

    doInsert(consistency_level, rm);
  }

  public String get_string_property(String propertyName) {
    if (propertyName.equals("cluster name")) {
      return DatabaseDescriptor.getClusterName();
    } else if (propertyName.equals("config file")) {
      String filename = DatabaseDescriptor.getConfigFileName();
      try {
        StringBuilder fileData = new StringBuilder(8192);
        BufferedInputStream stream = new BufferedInputStream(new FileInputStream(filename));
        byte[] buf = new byte[1024];
        int numRead;
        while ((numRead = stream.read(buf)) != -1) {
          String str = new String(buf, 0, numRead);
          fileData.append(str);
        }
        stream.close();
        return fileData.toString();
      } catch (IOException e) {
        return "file not found!";
      }
    } else if (propertyName.equals("version")) {
      return "0.3.0";
    } else {
      return "?";
    }
  }

  public List<String> get_string_list_property(String propertyName) {
    if (propertyName.equals("keyspaces")) {
      return DatabaseDescriptor.getTables();
    } else {
      return new ArrayList<String>();
    }
  }

  public Map<String, Map<String, String>> describe_keyspace(String table) throws NotFoundException {
    Map<String, Map<String, String>> columnFamiliesMap = new HashMap<String, Map<String, String>>();

    Map<String, CFMetaData> tableMetaData = DatabaseDescriptor.getTableMetaData(table);
    // table doesn't exist
    if (tableMetaData == null) {
      throw new NotFoundException();
    }

    Iterator iter = tableMetaData.entrySet().iterator();
    while (iter.hasNext()) {
      Map.Entry<String, CFMetaData> pairs = (Map.Entry<String, CFMetaData>) iter.next();
      CFMetaData columnFamilyMetaData = pairs.getValue();

      String desc = "";

      Map<String, String> columnMap = new HashMap<String, String>();
      desc =
          columnFamilyMetaData.n_columnMap
              + "("
              + columnFamilyMetaData.n_columnKey
              + ", "
              + columnFamilyMetaData.n_columnValue
              + ", "
              + columnFamilyMetaData.n_columnTimestamp
              + ")";
      if (columnFamilyMetaData.columnType.equals("Super")) {
        columnMap.put("Type", "Super");
        desc =
            columnFamilyMetaData.n_superColumnMap
                + "("
                + columnFamilyMetaData.n_superColumnKey
                + ", "
                + desc
                + ")";
      } else {
        columnMap.put("Type", "Standard");
      }

      desc =
          columnFamilyMetaData.tableName
              + "."
              + columnFamilyMetaData.cfName
              + "("
              + columnFamilyMetaData.n_rowKey
              + ", "
              + desc
              + ")";

      columnMap.put("Desc", desc);
      columnMap.put("CompareWith", columnFamilyMetaData.comparator.getClass().getName());
      if (columnFamilyMetaData.columnType.equals("Super")) {
        columnMap.put(
            "CompareSubcolumnsWith", columnFamilyMetaData.subcolumnComparator.getClass().getName());
      }
      columnMap.put("FlushPeriodInMinutes", columnFamilyMetaData.flushPeriodInMinutes + "");
      columnFamiliesMap.put(columnFamilyMetaData.cfName, columnMap);
    }
    return columnFamiliesMap;
  }

  public org.apache.cassandra.service.CqlResult execute_query(String query) throws TException {
    org.apache.cassandra.service.CqlResult result = new org.apache.cassandra.service.CqlResult();

    CqlResult cqlResult = CqlDriver.executeQuery(query);

    // convert CQL result type to Thrift specific return type
    if (cqlResult != null) {
      result.error_txt = cqlResult.errorTxt;
      result.result_set = cqlResult.resultSet;
      result.error_code = cqlResult.errorCode;
    }
    return result;
  }

  public List<String> get_key_range(
      String tablename, String columnFamily, String startWith, String stopAt, int maxResults)
      throws InvalidRequestException, TException {
    if (logger.isDebugEnabled()) logger.debug("get_key_range");
    ThriftValidation.validateCommand(tablename, columnFamily);
    if (!(StorageService.getPartitioner() instanceof OrderPreservingPartitioner)) {
      throw new InvalidRequestException(
          "range queries may only be performed against an order-preserving partitioner");
    }
    if (maxResults <= 0) {
      throw new InvalidRequestException("maxResults must be positive");
    }

    try {
      return StorageProxy.getKeyRange(
          new RangeCommand(tablename, columnFamily, startWith, stopAt, maxResults));
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
  }

  // main method moved to CassandraDaemon
}
コード例 #15
0
 /**
  * Flushes all dirty CFs, waiting for them to free and recycle any segments they were retaining
  */
 public void forceRecycleAllSegments() {
   allocator.forceRecycleAll(Collections.<UUID>emptyList());
 }