예제 #1
0
 /** Reads from a stream the number of entries (long) then the entries themselves. */
 public void fromStream(ObjectInput in) throws CacheLoaderException {
   try {
     int count = 0;
     while (true) {
       count++;
       InternalCacheEntry entry = (InternalCacheEntry) getMarshaller().objectFromObjectStream(in);
       if (entry == null) break;
       store(entry);
     }
   } catch (IOException e) {
     throw new CacheLoaderException(e);
   } catch (ClassNotFoundException e) {
     throw new CacheLoaderException(e);
   } catch (InterruptedException ie) {
     if (log.isTraceEnabled()) log.trace("Interrupted while reading from stream");
     Thread.currentThread().interrupt();
   }
 }
예제 #2
0
/**
 * A persistent <code>CacheLoader</code> based on Apache Cassandra project. See
 * http://cassandra.apache.org/
 *
 * @author Tristan Tarrant
 */
@CacheLoaderMetadata(configurationClass = CassandraCacheStoreConfig.class)
public class CassandraCacheStore extends AbstractCacheStore {

  private static final String ENTRY_KEY_PREFIX = "entry_";
  private static final String ENTRY_COLUMN_NAME = "entry";
  private static final String EXPIRATION_KEY = "expiration";
  private static final int SLICE_SIZE = 100;
  private static final Log log = LogFactory.getLog(CassandraCacheStore.class, Log.class);
  private static final boolean trace = log.isTraceEnabled();

  private CassandraCacheStoreConfig config;

  private CassandraThriftDataSource dataSource;

  private ConsistencyLevel readConsistencyLevel;
  private ConsistencyLevel writeConsistencyLevel;

  private String cacheName;
  private ColumnPath entryColumnPath;
  private ColumnParent entryColumnParent;
  private ColumnParent expirationColumnParent;
  private String entryKeyPrefix;
  private ByteBuffer expirationKey;
  private TwoWayKey2StringMapper keyMapper;

  private static Charset UTF8Charset = Charset.forName("UTF-8");

  public Class<? extends CacheLoaderConfig> getConfigurationClass() {
    return CassandraCacheStoreConfig.class;
  }

  @Override
  public void init(CacheLoaderConfig clc, Cache<?, ?> cache, StreamingMarshaller m)
      throws CacheLoaderException {
    super.init(clc, cache, m);
    this.cacheName = cache.getName();
    this.config = (CassandraCacheStoreConfig) clc;
  }

  @Override
  public void start() throws CacheLoaderException {

    try {
      dataSource = new DataSource(config.getPoolProperties());
      readConsistencyLevel = ConsistencyLevel.valueOf(config.readConsistencyLevel);
      writeConsistencyLevel = ConsistencyLevel.valueOf(config.writeConsistencyLevel);
      entryColumnPath =
          new ColumnPath(config.entryColumnFamily)
              .setColumn(ENTRY_COLUMN_NAME.getBytes(UTF8Charset));
      entryColumnParent = new ColumnParent(config.entryColumnFamily);
      entryKeyPrefix = ENTRY_KEY_PREFIX + (config.isSharedKeyspace() ? cacheName + "_" : "");
      expirationColumnParent = new ColumnParent(config.expirationColumnFamily);
      expirationKey =
          ByteBufferUtil.bytes(EXPIRATION_KEY + (config.isSharedKeyspace() ? "_" + cacheName : ""));
      keyMapper = (TwoWayKey2StringMapper) Util.getInstance(config.getKeyMapper());
    } catch (Exception e) {
      throw new ConfigurationException(e);
    }

    log.debug("cleaning up expired entries...");
    purgeInternal();

    log.debug("started");
    super.start();
  }

  @Override
  public InternalCacheEntry load(Object key) throws CacheLoaderException {
    String hashKey = hashKey(key);
    Cassandra.Client cassandraClient = null;
    try {
      cassandraClient = dataSource.getConnection();
      ColumnOrSuperColumn column =
          cassandraClient.get(ByteBufferUtil.bytes(hashKey), entryColumnPath, readConsistencyLevel);
      InternalCacheEntry ice = unmarshall(column.getColumn().getValue(), key);
      if (ice != null && ice.isExpired()) {
        remove(key);
        return null;
      }
      return ice;
    } catch (NotFoundException nfe) {
      log.debugf("Key '%s' not found", hashKey);
      return null;
    } catch (Exception e) {
      throw new CacheLoaderException(e);
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }

  @Override
  public Set<InternalCacheEntry> loadAll() throws CacheLoaderException {
    return load(Integer.MAX_VALUE);
  }

  @Override
  public Set<InternalCacheEntry> load(int numEntries) throws CacheLoaderException {
    Cassandra.Client cassandraClient = null;
    try {
      cassandraClient = dataSource.getConnection();
      Set<InternalCacheEntry> s = new HashSet<InternalCacheEntry>();
      SlicePredicate slicePredicate = new SlicePredicate();
      slicePredicate.setSlice_range(
          new SliceRange(
              ByteBuffer.wrap(entryColumnPath.getColumn()),
              ByteBufferUtil.EMPTY_BYTE_BUFFER,
              false,
              1));
      String startKey = "";

      // Get the keys in SLICE_SIZE blocks
      int sliceSize = Math.min(SLICE_SIZE, numEntries);
      for (boolean complete = false; !complete; ) {
        KeyRange keyRange = new KeyRange(sliceSize);
        keyRange.setStart_token(startKey);
        keyRange.setEnd_token("");
        List<KeySlice> keySlices =
            cassandraClient.get_range_slices(
                entryColumnParent, slicePredicate, keyRange, readConsistencyLevel);

        // Cycle through all the keys
        for (KeySlice keySlice : keySlices) {
          Object key = unhashKey(keySlice.getKey());
          if (key == null) // Skip invalid keys
          continue;
          List<ColumnOrSuperColumn> columns = keySlice.getColumns();
          if (columns.size() > 0) {
            if (log.isDebugEnabled()) {
              log.debugf("Loading %s", key);
            }
            byte[] value = columns.get(0).getColumn().getValue();
            InternalCacheEntry ice = unmarshall(value, key);
            s.add(ice);
          } else if (log.isDebugEnabled()) {
            log.debugf("Skipping empty key %s", key);
          }
        }
        if (keySlices.size() < sliceSize) {
          // Cassandra has returned less keys than what we asked for.
          // Assume we have finished
          complete = true;
        } else {
          // Cassandra has returned exactly the amount of keys we
          // asked for. If we haven't reached the required quota yet,
          // assume we need to cycle again starting from
          // the last returned key (excluded)
          sliceSize = Math.min(SLICE_SIZE, numEntries - s.size());
          if (sliceSize == 0) {
            complete = true;
          } else {
            startKey = new String(keySlices.get(keySlices.size() - 1).getKey(), UTF8Charset);
          }
        }
      }
      return s;
    } catch (Exception e) {
      throw new CacheLoaderException(e);
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }

  @Override
  public Set<Object> loadAllKeys(Set<Object> keysToExclude) throws CacheLoaderException {
    Cassandra.Client cassandraClient = null;
    try {
      cassandraClient = dataSource.getConnection();
      Set<Object> s = new HashSet<Object>();
      SlicePredicate slicePredicate = new SlicePredicate();
      slicePredicate.setSlice_range(
          new SliceRange(
              ByteBuffer.wrap(entryColumnPath.getColumn()),
              ByteBufferUtil.EMPTY_BYTE_BUFFER,
              false,
              1));
      String startKey = "";
      boolean complete = false;
      // Get the keys in SLICE_SIZE blocks
      while (!complete) {
        KeyRange keyRange = new KeyRange(SLICE_SIZE);
        keyRange.setStart_token(startKey);
        keyRange.setEnd_token("");
        List<KeySlice> keySlices =
            cassandraClient.get_range_slices(
                entryColumnParent, slicePredicate, keyRange, readConsistencyLevel);
        if (keySlices.size() < SLICE_SIZE) {
          complete = true;
        } else {
          startKey = new String(keySlices.get(keySlices.size() - 1).getKey(), UTF8Charset);
        }

        for (KeySlice keySlice : keySlices) {
          if (keySlice.getColumnsSize() > 0) {
            Object key = unhashKey(keySlice.getKey());
            if (key != null && (keysToExclude == null || !keysToExclude.contains(key))) s.add(key);
          }
        }
      }
      return s;
    } catch (Exception e) {
      throw new CacheLoaderException(e);
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }

  /**
   * Closes all databases, ignoring exceptions, and nulls references to all database related
   * information.
   */
  @Override
  public void stop() {}

  @Override
  public void clear() throws CacheLoaderException {
    Cassandra.Client cassandraClient = null;
    try {
      cassandraClient = dataSource.getConnection();
      SlicePredicate slicePredicate = new SlicePredicate();
      slicePredicate.setSlice_range(
          new SliceRange(
              ByteBuffer.wrap(entryColumnPath.getColumn()),
              ByteBufferUtil.EMPTY_BYTE_BUFFER,
              false,
              1));
      String startKey = "";
      boolean complete = false;
      // Get the keys in SLICE_SIZE blocks
      while (!complete) {
        KeyRange keyRange = new KeyRange(SLICE_SIZE);
        keyRange.setStart_token(startKey);
        keyRange.setEnd_token("");
        List<KeySlice> keySlices =
            cassandraClient.get_range_slices(
                entryColumnParent, slicePredicate, keyRange, readConsistencyLevel);
        if (keySlices.size() < SLICE_SIZE) {
          complete = true;
        } else {
          startKey = new String(keySlices.get(keySlices.size() - 1).getKey(), UTF8Charset);
        }
        Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
            new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

        for (KeySlice keySlice : keySlices) {
          remove0(ByteBuffer.wrap(keySlice.getKey()), mutationMap);
        }
        cassandraClient.batch_mutate(mutationMap, ConsistencyLevel.ALL);
      }
    } catch (Exception e) {
      throw new CacheLoaderException(e);
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }

  @Override
  public boolean remove(Object key) throws CacheLoaderException {
    if (trace) log.tracef("remove(\"%s\") ", key);
    Cassandra.Client cassandraClient = null;
    try {
      cassandraClient = dataSource.getConnection();
      Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
          new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
      remove0(ByteBufferUtil.bytes(hashKey(key)), mutationMap);
      cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel);
      return true;
    } catch (Exception e) {
      log.errorRemovingKey(key, e);
      return false;
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }

  private void remove0(ByteBuffer key, Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap) {
    addMutation(mutationMap, key, config.entryColumnFamily, null, null);
  }

  private byte[] marshall(InternalCacheEntry entry) throws IOException, InterruptedException {
    return getMarshaller().objectToByteBuffer(entry.toInternalCacheValue());
  }

  private InternalCacheEntry unmarshall(Object o, Object key)
      throws IOException, ClassNotFoundException {
    if (o == null) return null;
    byte b[] = (byte[]) o;
    InternalCacheValue v = (InternalCacheValue) getMarshaller().objectFromByteBuffer(b);
    return v.toInternalCacheEntry(key);
  }

  public void store(InternalCacheEntry entry) throws CacheLoaderException {
    Cassandra.Client cassandraClient = null;

    try {
      cassandraClient = dataSource.getConnection();
      Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
          new HashMap<ByteBuffer, Map<String, List<Mutation>>>(2);
      store0(entry, mutationMap);

      cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel);
    } catch (Exception e) {
      throw new CacheLoaderException(e);
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }

  private void store0(
      InternalCacheEntry entry, Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap)
      throws IOException, UnsupportedKeyTypeException {
    Object key = entry.getKey();
    if (trace) log.tracef("store(\"%s\") ", key);
    String cassandraKey = hashKey(key);
    try {
      addMutation(
          mutationMap,
          ByteBufferUtil.bytes(cassandraKey),
          config.entryColumnFamily,
          ByteBuffer.wrap(entryColumnPath.getColumn()),
          ByteBuffer.wrap(marshall(entry)));
      if (entry.canExpire()) {
        addExpiryEntry(cassandraKey, entry.getExpiryTime(), mutationMap);
      }
    } catch (InterruptedException ie) {
      if (trace) log.trace("Interrupted while trying to marshall entry");
      Thread.currentThread().interrupt();
    }
  }

  private void addExpiryEntry(
      String cassandraKey,
      long expiryTime,
      Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap) {
    try {
      addMutation(
          mutationMap,
          expirationKey,
          config.expirationColumnFamily,
          ByteBufferUtil.bytes(expiryTime),
          ByteBufferUtil.bytes(cassandraKey),
          ByteBufferUtil.EMPTY_BYTE_BUFFER);
    } catch (Exception e) {
      // Should not happen
    }
  }

  /** Writes to a stream the number of entries (long) then the entries themselves. */
  public void toStream(ObjectOutput out) throws CacheLoaderException {
    try {
      Set<InternalCacheEntry> loadAll = loadAll();
      int count = 0;
      for (InternalCacheEntry entry : loadAll) {
        getMarshaller().objectToObjectStream(entry, out);
        count++;
      }
      getMarshaller().objectToObjectStream(null, out);
    } catch (IOException e) {
      throw new CacheLoaderException(e);
    }
  }

  /** Reads from a stream the number of entries (long) then the entries themselves. */
  public void fromStream(ObjectInput in) throws CacheLoaderException {
    try {
      int count = 0;
      while (true) {
        count++;
        InternalCacheEntry entry = (InternalCacheEntry) getMarshaller().objectFromObjectStream(in);
        if (entry == null) break;
        store(entry);
      }
    } catch (IOException e) {
      throw new CacheLoaderException(e);
    } catch (ClassNotFoundException e) {
      throw new CacheLoaderException(e);
    } catch (InterruptedException ie) {
      if (log.isTraceEnabled()) log.trace("Interrupted while reading from stream");
      Thread.currentThread().interrupt();
    }
  }

  /**
   * Purge expired entries. Expiration entries are stored in a single key (expirationKey) within a
   * specific ColumnFamily (set by configuration). The entries are grouped by expiration timestamp
   * in SuperColumns within which each entry's key is mapped to a column
   */
  @Override
  protected void purgeInternal() throws CacheLoaderException {
    if (trace) log.trace("purgeInternal");
    Cassandra.Client cassandraClient = null;
    try {
      cassandraClient = dataSource.getConnection();
      // We need to get all supercolumns from the beginning of time until
      // now, in SLICE_SIZE chunks
      SlicePredicate predicate = new SlicePredicate();
      predicate.setSlice_range(
          new SliceRange(
              ByteBufferUtil.EMPTY_BYTE_BUFFER,
              ByteBufferUtil.bytes(System.currentTimeMillis()),
              false,
              SLICE_SIZE));
      Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
          new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
      for (boolean complete = false; !complete; ) {
        // Get all columns
        List<ColumnOrSuperColumn> slice =
            cassandraClient.get_slice(
                expirationKey, expirationColumnParent, predicate, readConsistencyLevel);
        complete = slice.size() < SLICE_SIZE;
        // Delete all keys returned by the slice
        for (ColumnOrSuperColumn crumb : slice) {
          SuperColumn scol = crumb.getSuper_column();
          for (Iterator<Column> i = scol.getColumnsIterator(); i.hasNext(); ) {
            Column col = i.next();
            // Remove the entry row
            remove0(ByteBuffer.wrap(col.getName()), mutationMap);
          }
          // Remove the expiration supercolumn
          addMutation(
              mutationMap,
              expirationKey,
              config.expirationColumnFamily,
              ByteBuffer.wrap(scol.getName()),
              null,
              null);
        }
      }
      cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel);
    } catch (Exception e) {
      throw new CacheLoaderException(e);
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }

  @Override
  protected void applyModifications(List<? extends Modification> mods) throws CacheLoaderException {
    Cassandra.Client cassandraClient = null;

    try {
      cassandraClient = dataSource.getConnection();
      Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
          new HashMap<ByteBuffer, Map<String, List<Mutation>>>();

      for (Modification m : mods) {
        switch (m.getType()) {
          case STORE:
            store0(((Store) m).getStoredEntry(), mutationMap);
            break;
          case CLEAR:
            clear();
            break;
          case REMOVE:
            remove0(ByteBufferUtil.bytes(hashKey(((Remove) m).getKey())), mutationMap);
            break;
          default:
            throw new AssertionError();
        }
      }

      cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel);

    } catch (Exception e) {
      throw new CacheLoaderException(e);
    } finally {
      dataSource.releaseConnection(cassandraClient);
    }
  }

  @Override
  public String toString() {
    return "CassandraCacheStore";
  }

  private String hashKey(Object key) throws UnsupportedKeyTypeException {
    if (!keyMapper.isSupportedType(key.getClass())) {
      throw new UnsupportedKeyTypeException(key);
    }

    return entryKeyPrefix + keyMapper.getStringMapping(key);
  }

  private Object unhashKey(byte[] key) {
    String skey = new String(key, UTF8Charset);

    if (skey.startsWith(entryKeyPrefix))
      return keyMapper.getKeyMapping(skey.substring(entryKeyPrefix.length()));
    else return null;
  }

  private static void addMutation(
      Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap,
      ByteBuffer key,
      String columnFamily,
      ByteBuffer column,
      ByteBuffer value) {
    addMutation(mutationMap, key, columnFamily, null, column, value);
  }

  private static void addMutation(
      Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap,
      ByteBuffer key,
      String columnFamily,
      ByteBuffer superColumn,
      ByteBuffer column,
      ByteBuffer value) {
    Map<String, List<Mutation>> keyMutations = mutationMap.get(key);
    // If the key doesn't exist yet, create the mutation holder
    if (keyMutations == null) {
      keyMutations = new HashMap<String, List<Mutation>>();
      mutationMap.put(key, keyMutations);
    }
    // If the columnfamily doesn't exist yet, create the mutation holder
    List<Mutation> columnFamilyMutations = keyMutations.get(columnFamily);
    if (columnFamilyMutations == null) {
      columnFamilyMutations = new ArrayList<Mutation>();
      keyMutations.put(columnFamily, columnFamilyMutations);
    }

    if (value == null) { // Delete
      Deletion deletion = new Deletion(microTimestamp());
      if (superColumn != null) {
        deletion.setSuper_column(superColumn);
      }
      if (column != null) { // Single column delete		
        deletion.setPredicate(
            new SlicePredicate().setColumn_names(Collections.singletonList(column)));
      } // else Delete entire column family or supercolumn
      columnFamilyMutations.add(new Mutation().setDeletion(deletion));
    } else { // Insert/update
      ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
      if (superColumn != null) {
        List<Column> columns = new ArrayList<Column>();
        columns.add(new Column(column, value, microTimestamp()));
        cosc.setSuper_column(new SuperColumn(superColumn, columns));
      } else {
        cosc.setColumn(new Column(column, value, microTimestamp()));
      }
      columnFamilyMutations.add(new Mutation().setColumn_or_supercolumn(cosc));
    }
  }

  private static long microTimestamp() {
    return System.currentTimeMillis() * 1000l;
  }
}