@Override protected void applyModifications(List<? extends Modification> mods) throws CacheLoaderException { Cassandra.Client cassandraClient = null; try { cassandraClient = dataSource.getConnection(); Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>(); for (Modification m : mods) { switch (m.getType()) { case STORE: store0(((Store) m).getStoredEntry(), mutationMap); break; case CLEAR: clear(); break; case REMOVE: remove0(ByteBufferUtil.bytes(hashKey(((Remove) m).getKey())), mutationMap); break; default: throw new AssertionError(); } } cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel); } catch (Exception e) { throw new CacheLoaderException(e); } finally { dataSource.releaseConnection(cassandraClient); } }
/** * Purge expired entries. Expiration entries are stored in a single key (expirationKey) within a * specific ColumnFamily (set by configuration). The entries are grouped by expiration timestamp * in SuperColumns within which each entry's key is mapped to a column */ @Override protected void purgeInternal() throws CacheLoaderException { if (trace) log.trace("purgeInternal"); Cassandra.Client cassandraClient = null; try { cassandraClient = dataSource.getConnection(); // We need to get all supercolumns from the beginning of time until // now, in SLICE_SIZE chunks SlicePredicate predicate = new SlicePredicate(); predicate.setSlice_range( new SliceRange( ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.bytes(System.currentTimeMillis()), false, SLICE_SIZE)); Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>(); for (boolean complete = false; !complete; ) { // Get all columns List<ColumnOrSuperColumn> slice = cassandraClient.get_slice( expirationKey, expirationColumnParent, predicate, readConsistencyLevel); complete = slice.size() < SLICE_SIZE; // Delete all keys returned by the slice for (ColumnOrSuperColumn crumb : slice) { SuperColumn scol = crumb.getSuper_column(); for (Iterator<Column> i = scol.getColumnsIterator(); i.hasNext(); ) { Column col = i.next(); // Remove the entry row remove0(ByteBuffer.wrap(col.getName()), mutationMap); } // Remove the expiration supercolumn addMutation( mutationMap, expirationKey, config.expirationColumnFamily, ByteBuffer.wrap(scol.getName()), null, null); } } cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel); } catch (Exception e) { throw new CacheLoaderException(e); } finally { dataSource.releaseConnection(cassandraClient); } }
public void store(InternalCacheEntry entry) throws CacheLoaderException { Cassandra.Client cassandraClient = null; try { cassandraClient = dataSource.getConnection(); Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>(2); store0(entry, mutationMap); cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel); } catch (Exception e) { throw new CacheLoaderException(e); } finally { dataSource.releaseConnection(cassandraClient); } }
@Override public boolean remove(Object key) throws CacheLoaderException { if (trace) log.tracef("remove(\"%s\") ", key); Cassandra.Client cassandraClient = null; try { cassandraClient = dataSource.getConnection(); Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>(); remove0(ByteBufferUtil.bytes(hashKey(key)), mutationMap); cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel); return true; } catch (Exception e) { log.errorRemovingKey(key, e); return false; } finally { dataSource.releaseConnection(cassandraClient); } }
@Override public Set<Object> loadAllKeys(Set<Object> keysToExclude) throws CacheLoaderException { Cassandra.Client cassandraClient = null; try { cassandraClient = dataSource.getConnection(); Set<Object> s = new HashSet<Object>(); SlicePredicate slicePredicate = new SlicePredicate(); slicePredicate.setSlice_range( new SliceRange( ByteBuffer.wrap(entryColumnPath.getColumn()), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1)); String startKey = ""; boolean complete = false; // Get the keys in SLICE_SIZE blocks while (!complete) { KeyRange keyRange = new KeyRange(SLICE_SIZE); keyRange.setStart_token(startKey); keyRange.setEnd_token(""); List<KeySlice> keySlices = cassandraClient.get_range_slices( entryColumnParent, slicePredicate, keyRange, readConsistencyLevel); if (keySlices.size() < SLICE_SIZE) { complete = true; } else { startKey = new String(keySlices.get(keySlices.size() - 1).getKey(), UTF8Charset); } for (KeySlice keySlice : keySlices) { if (keySlice.getColumnsSize() > 0) { Object key = unhashKey(keySlice.getKey()); if (key != null && (keysToExclude == null || !keysToExclude.contains(key))) s.add(key); } } } return s; } catch (Exception e) { throw new CacheLoaderException(e); } finally { dataSource.releaseConnection(cassandraClient); } }
@Override public void clear() throws CacheLoaderException { Cassandra.Client cassandraClient = null; try { cassandraClient = dataSource.getConnection(); SlicePredicate slicePredicate = new SlicePredicate(); slicePredicate.setSlice_range( new SliceRange( ByteBuffer.wrap(entryColumnPath.getColumn()), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1)); String startKey = ""; boolean complete = false; // Get the keys in SLICE_SIZE blocks while (!complete) { KeyRange keyRange = new KeyRange(SLICE_SIZE); keyRange.setStart_token(startKey); keyRange.setEnd_token(""); List<KeySlice> keySlices = cassandraClient.get_range_slices( entryColumnParent, slicePredicate, keyRange, readConsistencyLevel); if (keySlices.size() < SLICE_SIZE) { complete = true; } else { startKey = new String(keySlices.get(keySlices.size() - 1).getKey(), UTF8Charset); } Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap = new HashMap<ByteBuffer, Map<String, List<Mutation>>>(); for (KeySlice keySlice : keySlices) { remove0(ByteBuffer.wrap(keySlice.getKey()), mutationMap); } cassandraClient.batch_mutate(mutationMap, ConsistencyLevel.ALL); } } catch (Exception e) { throw new CacheLoaderException(e); } finally { dataSource.releaseConnection(cassandraClient); } }
@Override public InternalCacheEntry load(Object key) throws CacheLoaderException { String hashKey = hashKey(key); Cassandra.Client cassandraClient = null; try { cassandraClient = dataSource.getConnection(); ColumnOrSuperColumn column = cassandraClient.get(ByteBufferUtil.bytes(hashKey), entryColumnPath, readConsistencyLevel); InternalCacheEntry ice = unmarshall(column.getColumn().getValue(), key); if (ice != null && ice.isExpired()) { remove(key); return null; } return ice; } catch (NotFoundException nfe) { log.debugf("Key '%s' not found", hashKey); return null; } catch (Exception e) { throw new CacheLoaderException(e); } finally { dataSource.releaseConnection(cassandraClient); } }
@Override public Set<InternalCacheEntry> load(int numEntries) throws CacheLoaderException { Cassandra.Client cassandraClient = null; try { cassandraClient = dataSource.getConnection(); Set<InternalCacheEntry> s = new HashSet<InternalCacheEntry>(); SlicePredicate slicePredicate = new SlicePredicate(); slicePredicate.setSlice_range( new SliceRange( ByteBuffer.wrap(entryColumnPath.getColumn()), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1)); String startKey = ""; // Get the keys in SLICE_SIZE blocks int sliceSize = Math.min(SLICE_SIZE, numEntries); for (boolean complete = false; !complete; ) { KeyRange keyRange = new KeyRange(sliceSize); keyRange.setStart_token(startKey); keyRange.setEnd_token(""); List<KeySlice> keySlices = cassandraClient.get_range_slices( entryColumnParent, slicePredicate, keyRange, readConsistencyLevel); // Cycle through all the keys for (KeySlice keySlice : keySlices) { Object key = unhashKey(keySlice.getKey()); if (key == null) // Skip invalid keys continue; List<ColumnOrSuperColumn> columns = keySlice.getColumns(); if (columns.size() > 0) { if (log.isDebugEnabled()) { log.debugf("Loading %s", key); } byte[] value = columns.get(0).getColumn().getValue(); InternalCacheEntry ice = unmarshall(value, key); s.add(ice); } else if (log.isDebugEnabled()) { log.debugf("Skipping empty key %s", key); } } if (keySlices.size() < sliceSize) { // Cassandra has returned less keys than what we asked for. // Assume we have finished complete = true; } else { // Cassandra has returned exactly the amount of keys we // asked for. If we haven't reached the required quota yet, // assume we need to cycle again starting from // the last returned key (excluded) sliceSize = Math.min(SLICE_SIZE, numEntries - s.size()); if (sliceSize == 0) { complete = true; } else { startKey = new String(keySlices.get(keySlices.size() - 1).getKey(), UTF8Charset); } } } return s; } catch (Exception e) { throw new CacheLoaderException(e); } finally { dataSource.releaseConnection(cassandraClient); } }