Beispiel #1
0
  /**
   * Writes out a bunch of mutations for a single column family.
   *
   * @param mutations A group of Mutations for the same keyspace and column family.
   * @return The ColumnFamilyStore that was used.
   */
  public static ColumnFamilyStore writeColumnFamily(List<Mutation> mutations) {
    IMutation first = mutations.get(0);
    String keyspaceName = first.getKeyspaceName();
    UUID cfid = first.getColumnFamilyIds().iterator().next();

    for (Mutation rm : mutations) rm.applyUnsafe();

    ColumnFamilyStore store = Keyspace.open(keyspaceName).getColumnFamilyStore(cfid);
    store.forceBlockingFlush();
    return store;
  }
  @Test
  public void testRowCacheCleanup() throws Exception {
    StorageService.instance.initServer(0);
    CacheService.instance.setRowCacheCapacityInMB(1);
    rowCacheLoad(100, Integer.MAX_VALUE, 1000);

    ColumnFamilyStore store = Keyspace.open(KEYSPACE_CACHED).getColumnFamilyStore(CF_CACHED);
    assertEquals(CacheService.instance.rowCache.size(), 100);
    store.cleanupCache();
    assertEquals(CacheService.instance.rowCache.size(), 100);
    TokenMetadata tmd = StorageService.instance.getTokenMetadata();
    byte[] tk1, tk2;
    tk1 = "key1000".getBytes();
    tk2 = "key1050".getBytes();
    tmd.updateNormalToken(new BytesToken(tk1), InetAddress.getByName("127.0.0.1"));
    tmd.updateNormalToken(new BytesToken(tk2), InetAddress.getByName("127.0.0.2"));
    store.cleanupCache();
    assertEquals(50, CacheService.instance.rowCache.size());
    CacheService.instance.setRowCacheCapacityInMB(0);
  }
Beispiel #3
0
  public static List<Row> getRangeSlice(ColumnFamilyStore cfs, ByteBuffer superColumn) {
    IDiskAtomFilter filter =
        superColumn == null
            ? new IdentityQueryFilter()
            : new SliceQueryFilter(
                SuperColumns.startOf(superColumn),
                SuperColumns.endOf(superColumn),
                false,
                Integer.MAX_VALUE);

    Token min = StorageService.getPartitioner().getMinimumToken();
    return cfs.getRangeSlice(Bounds.makeRowBounds(min, min), null, filter, 10000);
  }
  @Test
  public void testRowCache() throws Exception {
    CompactionManager.instance.disableAutoCompaction();

    Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
    ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(CF_CACHED);

    // empty the row cache
    CacheService.instance.invalidateRowCache();

    // set global row cache size to 1 MB
    CacheService.instance.setRowCacheCapacityInMB(1);

    // inserting 100 rows into both column families
    SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHED, 0, 100);

    // now reading rows one by one and checking if row change grows
    for (int i = 0; i < 100; i++) {
      DecoratedKey key = Util.dk("key" + i);

      cachedStore.getColumnFamily(
          key, Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
      assert CacheService.instance.rowCache.size() == i + 1;
      assert cachedStore.containsCachedRow(key); // current key should be stored in the cache

      // checking if cell is read correctly after cache
      ColumnFamily cf =
          cachedStore.getColumnFamily(
              key, Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
      Collection<Cell> cells = cf.getSortedColumns();

      Cell cell = cells.iterator().next();

      assert cells.size() == 1;
      assert cell.name().toByteBuffer().equals(ByteBufferUtil.bytes("col" + i));
      assert cell.value().equals(ByteBufferUtil.bytes("val" + i));
    }

    // insert 10 more keys
    SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHED, 100, 10);

    for (int i = 100; i < 110; i++) {
      DecoratedKey key = Util.dk("key" + i);

      cachedStore.getColumnFamily(
          key, Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
      assert cachedStore.containsCachedRow(
          key); // cache should be populated with the latest rows read (old ones should be popped)

      // checking if cell is read correctly after cache
      ColumnFamily cf =
          cachedStore.getColumnFamily(
              key, Composites.EMPTY, Composites.EMPTY, false, 1, System.currentTimeMillis());
      Collection<Cell> cells = cf.getSortedColumns();

      Cell cell = cells.iterator().next();

      assert cells.size() == 1;
      assert cell.name().toByteBuffer().equals(ByteBufferUtil.bytes("col" + i));
      assert cell.value().equals(ByteBufferUtil.bytes("val" + i));
    }

    // clear 100 rows from the cache
    int keysLeft = 109;
    for (int i = 109; i >= 10; i--) {
      cachedStore.invalidateCachedRow(Util.dk("key" + i));
      assert CacheService.instance.rowCache.size() == keysLeft;
      keysLeft--;
    }

    CacheService.instance.setRowCacheCapacityInMB(0);
  }
  @Test
  public void testRowCacheRange() {
    CompactionManager.instance.disableAutoCompaction();

    Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
    String cf = "CachedIntCF";
    ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(cf);
    long startRowCacheHits = cachedStore.metric.rowCacheHit.getCount();
    long startRowCacheOutOfRange = cachedStore.metric.rowCacheHitOutOfRange.getCount();
    // empty the row cache
    CacheService.instance.invalidateRowCache();

    // set global row cache size to 1 MB
    CacheService.instance.setRowCacheCapacityInMB(1);

    ByteBuffer key = ByteBufferUtil.bytes("rowcachekey");
    DecoratedKey dk = cachedStore.partitioner.decorateKey(key);
    RowCacheKey rck = new RowCacheKey(cachedStore.metadata.ksAndCFName, dk);
    Mutation mutation = new Mutation(KEYSPACE_CACHED, key);
    for (int i = 0; i < 200; i++)
      mutation.add(
          cf, Util.cellname(i), ByteBufferUtil.bytes("val" + i), System.currentTimeMillis());
    mutation.applyUnsafe();

    // populate row cache, we should not get a row cache hit;
    cachedStore.getColumnFamily(
        QueryFilter.getSliceFilter(
            dk, cf, Composites.EMPTY, Composites.EMPTY, false, 10, System.currentTimeMillis()));
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());

    // do another query, limit is 20, which is < 100 that we cache, we should get a hit and it
    // should be in range
    cachedStore.getColumnFamily(
        QueryFilter.getSliceFilter(
            dk, cf, Composites.EMPTY, Composites.EMPTY, false, 20, System.currentTimeMillis()));
    assertEquals(++startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());

    // get a slice from 95 to 105, 95->99 are in cache, we should not get a hit and then row cache
    // is out of range
    cachedStore.getColumnFamily(
        QueryFilter.getSliceFilter(
            dk,
            cf,
            CellNames.simpleDense(ByteBufferUtil.bytes(95)),
            CellNames.simpleDense(ByteBufferUtil.bytes(105)),
            false,
            10,
            System.currentTimeMillis()));
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(++startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());

    // get a slice with limit > 100, we should get a hit out of range.
    cachedStore.getColumnFamily(
        QueryFilter.getSliceFilter(
            dk, cf, Composites.EMPTY, Composites.EMPTY, false, 101, System.currentTimeMillis()));
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(++startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());

    CacheService.instance.invalidateRowCache();

    // try to populate row cache with a limit > rows to cache, we should still populate row cache;
    cachedStore.getColumnFamily(
        QueryFilter.getSliceFilter(
            dk, cf, Composites.EMPTY, Composites.EMPTY, false, 105, System.currentTimeMillis()));
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    // validate the stuff in cache;
    ColumnFamily cachedCf = (ColumnFamily) CacheService.instance.rowCache.get(rck);
    assertEquals(cachedCf.getColumnCount(), 100);
    int i = 0;
    for (Cell c : cachedCf) {
      assertEquals(c.name(), Util.cellname(i++));
    }
  }
Beispiel #6
0
 public static NamesQueryFilter namesFilter(ColumnFamilyStore cfs, String... names) {
   SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
   for (String str : names) s.add(cellname(str));
   return new NamesQueryFilter(s);
 }
Beispiel #7
0
 public static QueryFilter namesQueryFilter(
     ColumnFamilyStore cfs, DecoratedKey key, CellName... names) {
   SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
   for (CellName n : names) s.add(n);
   return QueryFilter.getNamesFilter(key, cfs.name, s, System.currentTimeMillis());
 }
Beispiel #8
0
 public static void compact(ColumnFamilyStore cfs, Collection<SSTableReader> sstables) {
   int gcBefore = cfs.gcBefore(System.currentTimeMillis());
   AbstractCompactionTask task =
       cfs.getCompactionStrategy().getUserDefinedTask(sstables, gcBefore);
   task.execute(null);
 }
Beispiel #9
0
 public static Future<?> compactAll(ColumnFamilyStore cfs, int gcBefore) {
   List<Descriptor> descriptors = new ArrayList<>();
   for (SSTableReader sstable : cfs.getSSTables()) descriptors.add(sstable.descriptor);
   return CompactionManager.instance.submitUserDefined(cfs, descriptors, gcBefore);
 }
Beispiel #10
0
 public static ColumnFamily cloneAndRemoveDeleted(ColumnFamily cf, int gcBefore) {
   return ColumnFamilyStore.removeDeleted(cf.cloneMe(), gcBefore);
 }
Beispiel #11
0
 public static ColumnFamily getColumnFamily(Keyspace keyspace, DecoratedKey key, String cfName) {
   ColumnFamilyStore cfStore = keyspace.getColumnFamilyStore(cfName);
   assert cfStore != null : "Table " + cfName + " has not been defined";
   return cfStore.getColumnFamily(
       QueryFilter.getIdentityFilter(key, cfName, System.currentTimeMillis()));
 }