コード例 #1
0
  public List<Row> fetchPage(int pageSize)
      throws RequestValidationException, RequestExecutionException {
    if (isExhausted()) return Collections.emptyList();

    int currentPageSize = nextPageSize(pageSize);
    List<Row> rows = filterEmpty(queryNextPage(currentPageSize, consistencyLevel, localQuery));

    if (rows.isEmpty()) {
      logger.debug("Got empty set of rows, considering pager exhausted");
      exhausted = true;
      return Collections.emptyList();
    }

    int liveCount = getPageLiveCount(rows);
    logger.debug("Fetched {} live rows", liveCount);

    // Because SP.getRangeSlice doesn't trim the result (see SP.trim()), liveCount may be greater
    // than what asked
    // (currentPageSize). This would throw off the paging logic so we trim the excess. It's not
    // extremely efficient
    // but most of the time there should be nothing or very little to trim.
    if (liveCount > currentPageSize) {
      rows = discardLast(rows, liveCount - currentPageSize);
      liveCount = currentPageSize;
    }

    remaining -= liveCount;

    // If we've got less than requested, there is no more query to do (but
    // we still need to return the current page)
    if (liveCount < currentPageSize) {
      logger.debug(
          "Got result ({}) smaller than page size ({}), considering pager exhausted",
          liveCount,
          currentPageSize);
      exhausted = true;
    }

    // If it's not the first query and the first column is the last one returned (likely
    // but not certain since paging can race with deletes/expiration), then remove the
    // first column.
    if (containsPreviousLast(rows.get(0))) {
      rows = discardFirst(rows);
      remaining++;
    }
    // Otherwise, if 'lastWasRecorded', we queried for one more than the page size,
    // so if the page is full, trim the last entry
    else if (lastWasRecorded && !exhausted) {
      // We've asked for one more than necessary
      rows = discardLast(rows);
      remaining++;
    }

    logger.debug("Remaining rows to page: {}", remaining);

    if (!isExhausted()) lastWasRecorded = recordLast(rows.get(rows.size() - 1));

    return rows;
  }
コード例 #2
0
  @VisibleForTesting
  List<Row> discardLast(List<Row> rows, int toDiscard) {
    if (toDiscard == 0 || rows.isEmpty()) return rows;

    int i = rows.size() - 1;
    DecoratedKey lastKey = null;
    ColumnFamily lastCf = null;
    while (toDiscard > 0 && i >= 0) {
      Row last = rows.get(i--);
      lastKey = last.key;
      lastCf = last.cf.cloneMeShallow(isReversed());
      toDiscard -=
          isReversed()
              ? discardFirst(last.cf, toDiscard, lastCf)
              : discardLast(last.cf, toDiscard, lastCf);
    }

    // If there is less live data than to discard, all is discarded
    if (toDiscard > 0) return Collections.<Row>emptyList();

    // i is the index of the last row that we are sure to keep. On top of that,
    // we also keep lastCf is it hasn't been fully emptied by the last iteration above.
    int count = lastCf.getColumnCount();
    int newSize = count == 0 ? i + 1 : i + 2;
    List<Row> newRows = new ArrayList<Row>(newSize);
    newRows.addAll(rows.subList(0, i + 1));
    if (count != 0) newRows.add(new Row(lastKey, lastCf));

    return newRows;
  }
コード例 #3
0
ファイル: KSMetaData.java プロジェクト: rcamus/platform
  // For new user created keyspaces (through CQL)
  public static KSMetaData newKeyspace(
      String name, String strategyName, Map<String, String> options) throws ConfigurationException {
    Class<? extends AbstractReplicationStrategy> cls =
        AbstractReplicationStrategy.getClass(strategyName);
    if (cls.equals(LocalStrategy.class))
      throw new ConfigurationException(
          "Unable to use given strategy class: LocalStrategy is reserved for internal use.");

    return new KSMetaData(name, cls, options, true, Collections.<CFMetaData>emptyList());
  }
コード例 #4
0
ファイル: KSMetaData.java プロジェクト: rcamus/platform
  public KSMetaData reloadAttributes() throws IOException {
    Row ksDefRow = SystemTable.readSchemaRow(name);

    if (ksDefRow.cf == null)
      throw new IOException(
          String.format(
              "%s not found in the schema definitions table (%s).",
              name, SystemTable.SCHEMA_KEYSPACES_CF));

    return fromSchema(ksDefRow, Collections.<CFMetaData>emptyList());
  }
コード例 #5
0
 public static SSTableReader sstable(
     int generation, int size, boolean keepRef, ColumnFamilyStore cfs) {
   Descriptor descriptor =
       new Descriptor(
           cfs.getDirectories().getDirectoryForNewSSTables(),
           cfs.keyspace.getName(),
           cfs.getColumnFamilyName(),
           generation);
   Set<Component> components =
       ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
   for (Component component : components) {
     File file = new File(descriptor.filenameFor(component));
     try {
       file.createNewFile();
     } catch (IOException e) {
     }
   }
   if (size > 0) {
     try {
       File file = new File(descriptor.filenameFor(Component.DATA));
       try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
         raf.setLength(size);
       }
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
   }
   SerializationHeader header = SerializationHeader.make(cfs.metadata, Collections.emptyList());
   StatsMetadata metadata =
       (StatsMetadata)
           new MetadataCollector(cfs.metadata.comparator)
               .finalizeMetadata(
                   cfs.metadata.partitioner.getClass().getCanonicalName(), 0.01f, -1, header)
               .get(MetadataType.STATS);
   SSTableReader reader =
       SSTableReader.internalOpen(
           descriptor,
           components,
           cfs.metadata,
           segmentedFile.sharedCopy(),
           segmentedFile.sharedCopy(),
           indexSummary.sharedCopy(),
           new AlwaysPresentFilter(),
           1L,
           metadata,
           SSTableReader.OpenReason.NORMAL,
           header);
   reader.first = reader.last = readerBounds(generation);
   if (!keepRef) reader.selfRef().release();
   return reader;
 }
コード例 #6
0
/** Author : Avinash Lakshman ( [email protected]) & Prashant Malik ( [email protected] ) */
public class CassandraServer implements Cassandra.Iface {
  private static Logger logger = Logger.getLogger(CassandraServer.class);

  private static final List<Column> EMPTY_COLUMNS = Collections.emptyList();
  private static final List<SuperColumn> EMPTY_SUPERCOLUMNS = Collections.emptyList();

  /*
   * Handle to the storage service to interact with the other machines in the
   * cluster.
   */
  protected StorageService storageService;

  public CassandraServer() {
    storageService = StorageService.instance();
  }

  /*
   * The start function initializes the server and start's listening on the
   * specified port.
   */
  public void start() throws IOException {
    LogUtil.init();
    // LogUtil.setLogLevel("com.facebook", "DEBUG");
    // Start the storage service
    storageService.start();
  }

  protected ColumnFamily readColumnFamily(ReadCommand command, int consistency_level)
      throws InvalidRequestException {
    String cfName = command.getColumnFamilyName();
    ThriftValidation.validateKey(command.key);

    if (consistency_level == ConsistencyLevel.ZERO) {
      throw new InvalidRequestException(
          "Consistency level zero may not be applied to read operations");
    }
    if (consistency_level == ConsistencyLevel.ALL) {
      throw new InvalidRequestException(
          "Consistency level all is not yet supported on read operations");
    }

    Row row;
    try {
      row = StorageProxy.readProtocol(command, consistency_level);
    } catch (IOException e) {
      throw new RuntimeException(e);
    } catch (TimeoutException e) {
      throw new RuntimeException(e);
    }

    if (row == null) {
      return null;
    }
    return row.getColumnFamily(cfName);
  }

  public List<Column> thriftifyColumns(Collection<IColumn> columns) {
    return thriftifyColumns(columns, false);
  }

  public List<Column> thriftifyColumns(Collection<IColumn> columns, boolean reverseOrder) {
    if (columns == null || columns.isEmpty()) {
      return EMPTY_COLUMNS;
    }

    ArrayList<Column> thriftColumns = new ArrayList<Column>(columns.size());
    for (IColumn column : columns) {
      if (column.isMarkedForDelete()) {
        continue;
      }
      Column thrift_column = new Column(column.name(), column.value(), column.timestamp());
      thriftColumns.add(thrift_column);
    }

    // we have to do the reversing here, since internally we pass results around in ColumnFamily
    // objects, which always sort their columns in the "natural" order
    if (reverseOrder) Collections.reverse(thriftColumns);
    return thriftColumns;
  }

  /** for resultsets of standard columns */
  private List<Column> getSlice(ReadCommand command, int consistency_level)
      throws InvalidRequestException {
    ColumnFamily cfamily = readColumnFamily(command, consistency_level);
    boolean reverseOrder = false;

    if (command instanceof SliceFromReadCommand)
      reverseOrder = !((SliceFromReadCommand) command).isAscending;

    if (cfamily == null || cfamily.getColumnsMap().size() == 0) {
      return EMPTY_COLUMNS;
    }
    if (cfamily.isSuper()) {
      IColumn column = cfamily.getColumnsMap().values().iterator().next();
      return thriftifyColumns(column.getSubColumns(), reverseOrder);
    }
    return thriftifyColumns(cfamily.getSortedColumns(), reverseOrder);
  }

  public List<Column> get_slice_by_names(
      String table,
      String key,
      ColumnParent column_parent,
      List<byte[]> column_names,
      int consistency_level)
      throws InvalidRequestException, NotFoundException {
    if (logger.isDebugEnabled()) logger.debug("get_slice_by_names");
    ThriftValidation.validateColumnParent(table, column_parent);
    return getSlice(
        new SliceByNamesReadCommand(table, key, column_parent, column_names), consistency_level);
  }

  public List<Column> get_slice(
      String table,
      String key,
      ColumnParent column_parent,
      byte[] start,
      byte[] finish,
      boolean is_ascending,
      int count,
      int consistency_level)
      throws InvalidRequestException, NotFoundException {
    if (logger.isDebugEnabled()) logger.debug("get_slice_from");
    ThriftValidation.validateColumnParent(table, column_parent);
    // TODO support get_slice on super CFs
    if (count <= 0) throw new InvalidRequestException("get_slice requires positive count");

    return getSlice(
        new SliceFromReadCommand(table, key, column_parent, start, finish, is_ascending, count),
        consistency_level);
  }

  public Column get_column(String table, String key, ColumnPath column_path, int consistency_level)
      throws InvalidRequestException, NotFoundException {
    if (logger.isDebugEnabled()) logger.debug("get_column");
    ThriftValidation.validateColumnPath(table, column_path);

    QueryPath path = new QueryPath(column_path.column_family, column_path.super_column);
    ColumnFamily cfamily =
        readColumnFamily(
            new SliceByNamesReadCommand(table, key, path, Arrays.asList(column_path.column)),
            consistency_level);
    // TODO can we leverage getSlice here and just check that it returns one column?
    if (cfamily == null) {
      throw new NotFoundException();
    }
    Collection<IColumn> columns = null;
    if (column_path.super_column != null) {
      IColumn column = cfamily.getColumn(column_path.super_column);
      if (column != null) {
        columns = column.getSubColumns();
      }
    } else {
      columns = cfamily.getSortedColumns();
    }
    if (columns == null || columns.size() == 0) {
      throw new NotFoundException();
    }

    assert columns.size() == 1;
    IColumn column = columns.iterator().next();
    if (column.isMarkedForDelete()) {
      throw new NotFoundException();
    }

    return new Column(column.name(), column.value(), column.timestamp());
  }

  public int get_column_count(
      String table, String key, ColumnParent column_parent, int consistency_level)
      throws InvalidRequestException {
    if (logger.isDebugEnabled()) logger.debug("get_column_count");
    // validateColumnParent assumes we require simple columns; g_c_c is the only
    // one of the columnParent-taking apis that can also work at the SC level.
    // so we roll a one-off validator here.
    String cfType = ThriftValidation.validateColumnFamily(table, column_parent.column_family);
    if (cfType.equals("Standard") && column_parent.super_column != null) {
      throw new InvalidRequestException(
          "columnfamily alone is required for standard CF " + column_parent.column_family);
    }

    ColumnFamily cfamily;
    cfamily =
        readColumnFamily(
            new SliceFromReadCommand(
                table,
                key,
                column_parent,
                ArrayUtils.EMPTY_BYTE_ARRAY,
                ArrayUtils.EMPTY_BYTE_ARRAY,
                true,
                Integer.MAX_VALUE),
            consistency_level);
    if (cfamily == null) {
      return 0;
    }
    Collection<IColumn> columns = null;
    if (column_parent.super_column != null) {
      IColumn column = cfamily.getColumn(column_parent.super_column);
      if (column != null) {
        columns = column.getSubColumns();
      }
    } else {
      columns = cfamily.getSortedColumns();
    }
    if (columns == null || columns.size() == 0) {
      return 0;
    }
    return columns.size();
  }

  public void insert(
      String table,
      String key,
      ColumnPath column_path,
      byte[] value,
      long timestamp,
      int consistency_level)
      throws InvalidRequestException, UnavailableException {
    if (logger.isDebugEnabled()) logger.debug("insert");
    ThriftValidation.validateKey(key);
    ThriftValidation.validateColumnPath(table, column_path);

    RowMutation rm = new RowMutation(table, key.trim());
    try {
      rm.add(new QueryPath(column_path), value, timestamp);
    } catch (MarshalException e) {
      throw new InvalidRequestException(e.getMessage());
    }
    doInsert(consistency_level, rm);
  }

  public void batch_insert(String table, BatchMutation batch_mutation, int consistency_level)
      throws InvalidRequestException, UnavailableException {
    if (logger.isDebugEnabled()) logger.debug("batch_insert");
    RowMutation rm = RowMutation.getRowMutation(table, batch_mutation);
    Set<String> cfNames = rm.columnFamilyNames();
    ThriftValidation.validateKeyCommand(
        rm.key(), rm.table(), cfNames.toArray(new String[cfNames.size()]));

    doInsert(consistency_level, rm);
  }

  public void remove(
      String table,
      String key,
      ColumnPathOrParent column_path_or_parent,
      long timestamp,
      int consistency_level)
      throws InvalidRequestException, UnavailableException {
    if (logger.isDebugEnabled()) logger.debug("remove");
    ThriftValidation.validateColumnPathOrParent(table, column_path_or_parent);

    RowMutation rm = new RowMutation(table, key.trim());
    rm.delete(new QueryPath(column_path_or_parent), timestamp);

    doInsert(consistency_level, rm);
  }

  private void doInsert(int consistency_level, RowMutation rm) throws UnavailableException {
    if (consistency_level != ConsistencyLevel.ZERO) {
      StorageProxy.insertBlocking(rm, consistency_level);
    } else {
      StorageProxy.insert(rm);
    }
  }

  public List<SuperColumn> get_slice_super_by_names(
      String table,
      String key,
      String column_family,
      List<byte[]> super_column_names,
      int consistency_level)
      throws InvalidRequestException {
    if (logger.isDebugEnabled()) logger.debug("get_slice_super_by_names");
    ThriftValidation.validateColumnFamily(table, column_family);

    ColumnFamily cfamily =
        readColumnFamily(
            new SliceByNamesReadCommand(
                table, key, new QueryPath(column_family), super_column_names),
            consistency_level);
    if (cfamily == null) {
      return EMPTY_SUPERCOLUMNS;
    }
    return thriftifySuperColumns(cfamily.getSortedColumns());
  }

  private List<SuperColumn> thriftifySuperColumns(Collection<IColumn> columns) {
    return thriftifySuperColumns(columns, false);
  }

  private List<SuperColumn> thriftifySuperColumns(
      Collection<IColumn> columns, boolean reverseOrder) {
    if (columns == null || columns.isEmpty()) {
      return EMPTY_SUPERCOLUMNS;
    }

    ArrayList<SuperColumn> thriftSuperColumns = new ArrayList<SuperColumn>(columns.size());
    for (IColumn column : columns) {
      List<Column> subcolumns = thriftifyColumns(column.getSubColumns());
      if (subcolumns.isEmpty()) {
        continue;
      }
      thriftSuperColumns.add(new SuperColumn(column.name(), subcolumns));
    }

    if (reverseOrder) Collections.reverse(thriftSuperColumns);

    return thriftSuperColumns;
  }

  public List<SuperColumn> get_slice_super(
      String table,
      String key,
      String column_family,
      byte[] start,
      byte[] finish,
      boolean is_ascending,
      int count,
      int consistency_level)
      throws InvalidRequestException {
    if (logger.isDebugEnabled()) logger.debug("get_slice_super");
    if (!DatabaseDescriptor.getColumnFamilyType(table, column_family).equals("Super"))
      throw new InvalidRequestException("get_slice_super requires a super CF name");
    if (count <= 0) throw new InvalidRequestException("get_slice_super requires positive count");

    ColumnFamily cfamily =
        readColumnFamily(
            new SliceFromReadCommand(
                table, key, new QueryPath(column_family), start, finish, is_ascending, count),
            consistency_level);
    if (cfamily == null) {
      return EMPTY_SUPERCOLUMNS;
    }
    Collection<IColumn> columns = cfamily.getSortedColumns();
    return thriftifySuperColumns(columns, !is_ascending);
  }

  public SuperColumn get_super_column(
      String table, String key, SuperColumnPath super_column_path, int consistency_level)
      throws InvalidRequestException, NotFoundException {
    if (logger.isDebugEnabled()) logger.debug("get_superColumn");
    ThriftValidation.validateSuperColumnPath(table, super_column_path);

    ColumnFamily cfamily =
        readColumnFamily(
            new SliceByNamesReadCommand(
                table,
                key,
                new QueryPath(super_column_path.column_family),
                Arrays.asList(super_column_path.super_column)),
            consistency_level);
    if (cfamily == null) {
      throw new NotFoundException();
    }
    Collection<IColumn> columns = cfamily.getSortedColumns();
    if (columns == null || columns.size() == 0) {
      throw new NotFoundException();
    }

    assert columns.size() == 1;
    IColumn column = columns.iterator().next();
    if (column.getSubColumns().size() == 0) {
      throw new NotFoundException();
    }

    return new SuperColumn(column.name(), thriftifyColumns(column.getSubColumns()));
  }

  public void batch_insert_super_column(
      String table, BatchMutationSuper batch_mutation_super, int consistency_level)
      throws InvalidRequestException, UnavailableException {
    if (logger.isDebugEnabled()) logger.debug("batch_insert_SuperColumn");
    RowMutation rm = RowMutation.getRowMutation(table, batch_mutation_super);
    Set<String> cfNames = rm.columnFamilyNames();
    ThriftValidation.validateKeyCommand(
        rm.key(), rm.table(), cfNames.toArray(new String[cfNames.size()]));

    doInsert(consistency_level, rm);
  }

  public String get_string_property(String propertyName) {
    if (propertyName.equals("cluster name")) {
      return DatabaseDescriptor.getClusterName();
    } else if (propertyName.equals("config file")) {
      String filename = DatabaseDescriptor.getConfigFileName();
      try {
        StringBuilder fileData = new StringBuilder(8192);
        BufferedInputStream stream = new BufferedInputStream(new FileInputStream(filename));
        byte[] buf = new byte[1024];
        int numRead;
        while ((numRead = stream.read(buf)) != -1) {
          String str = new String(buf, 0, numRead);
          fileData.append(str);
        }
        stream.close();
        return fileData.toString();
      } catch (IOException e) {
        return "file not found!";
      }
    } else if (propertyName.equals("version")) {
      return "0.3.0";
    } else {
      return "?";
    }
  }

  public List<String> get_string_list_property(String propertyName) {
    if (propertyName.equals("keyspaces")) {
      return DatabaseDescriptor.getTables();
    } else {
      return new ArrayList<String>();
    }
  }

  public Map<String, Map<String, String>> describe_keyspace(String table) throws NotFoundException {
    Map<String, Map<String, String>> columnFamiliesMap = new HashMap<String, Map<String, String>>();

    Map<String, CFMetaData> tableMetaData = DatabaseDescriptor.getTableMetaData(table);
    // table doesn't exist
    if (tableMetaData == null) {
      throw new NotFoundException();
    }

    Iterator iter = tableMetaData.entrySet().iterator();
    while (iter.hasNext()) {
      Map.Entry<String, CFMetaData> pairs = (Map.Entry<String, CFMetaData>) iter.next();
      CFMetaData columnFamilyMetaData = pairs.getValue();

      String desc = "";

      Map<String, String> columnMap = new HashMap<String, String>();
      desc =
          columnFamilyMetaData.n_columnMap
              + "("
              + columnFamilyMetaData.n_columnKey
              + ", "
              + columnFamilyMetaData.n_columnValue
              + ", "
              + columnFamilyMetaData.n_columnTimestamp
              + ")";
      if (columnFamilyMetaData.columnType.equals("Super")) {
        columnMap.put("Type", "Super");
        desc =
            columnFamilyMetaData.n_superColumnMap
                + "("
                + columnFamilyMetaData.n_superColumnKey
                + ", "
                + desc
                + ")";
      } else {
        columnMap.put("Type", "Standard");
      }

      desc =
          columnFamilyMetaData.tableName
              + "."
              + columnFamilyMetaData.cfName
              + "("
              + columnFamilyMetaData.n_rowKey
              + ", "
              + desc
              + ")";

      columnMap.put("Desc", desc);
      columnMap.put("CompareWith", columnFamilyMetaData.comparator.getClass().getName());
      if (columnFamilyMetaData.columnType.equals("Super")) {
        columnMap.put(
            "CompareSubcolumnsWith", columnFamilyMetaData.subcolumnComparator.getClass().getName());
      }
      columnMap.put("FlushPeriodInMinutes", columnFamilyMetaData.flushPeriodInMinutes + "");
      columnFamiliesMap.put(columnFamilyMetaData.cfName, columnMap);
    }
    return columnFamiliesMap;
  }

  public org.apache.cassandra.service.CqlResult execute_query(String query) throws TException {
    org.apache.cassandra.service.CqlResult result = new org.apache.cassandra.service.CqlResult();

    CqlResult cqlResult = CqlDriver.executeQuery(query);

    // convert CQL result type to Thrift specific return type
    if (cqlResult != null) {
      result.error_txt = cqlResult.errorTxt;
      result.result_set = cqlResult.resultSet;
      result.error_code = cqlResult.errorCode;
    }
    return result;
  }

  public List<String> get_key_range(
      String tablename, String columnFamily, String startWith, String stopAt, int maxResults)
      throws InvalidRequestException, TException {
    if (logger.isDebugEnabled()) logger.debug("get_key_range");
    ThriftValidation.validateCommand(tablename, columnFamily);
    if (!(StorageService.getPartitioner() instanceof OrderPreservingPartitioner)) {
      throw new InvalidRequestException(
          "range queries may only be performed against an order-preserving partitioner");
    }
    if (maxResults <= 0) {
      throw new InvalidRequestException("maxResults must be positive");
    }

    try {
      return StorageProxy.getKeyRange(
          new RangeCommand(tablename, columnFamily, startWith, stopAt, maxResults));
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
  }

  // main method moved to CassandraDaemon
}
コード例 #7
0
 /**
  * Flushes all dirty CFs, waiting for them to free and recycle any segments they were retaining
  */
 public void forceRecycleAllSegments() {
   allocator.forceRecycleAll(Collections.<UUID>emptyList());
 }