예제 #1
0
  @SuppressWarnings("unchecked")
  public List<RayoNode> getRayoNodesForPlatform(String platformId) {

    try {
      log.debug("Finding rayo nodes for platform: [%s]", platformId);
      Set<String> platforms = new HashSet<String>();
      platforms.add(platformId);

      List<RayoNode> nodes = new ArrayList<RayoNode>();
      Selector selector = Pelops.createSelector(schemaName);
      List<SuperColumn> columns =
          selector.getSuperColumnsFromRow("nodes", platformId, false, ConsistencyLevel.ONE);
      for (SuperColumn column : columns) {
        String id = Bytes.toUTF8(column.getName());
        RayoNode rayoNode = buildNode(column.getColumns());
        rayoNode.setHostname(id);
        rayoNode.setPlatforms(platforms);
        nodes.add(rayoNode);
      }

      return nodes;
    } catch (PelopsException pe) {
      log.error(pe.getMessage(), pe);
      return Collections.EMPTY_LIST;
    }
  }
예제 #2
0
  /** Test insertion of a supercolumn using insert */
  @Test
  public void testInsertSuper()
      throws IllegalArgumentException, NoSuchElementException, IllegalStateException,
          HNotFoundException, Exception {

    // insert value
    ColumnParent columnParent = new ColumnParent("Super1");
    columnParent.setSuper_column(StringSerializer.get().toByteBuffer("testInsertSuper_super"));
    Column column =
        new Column(
            StringSerializer.get().toByteBuffer("testInsertSuper_column"),
            StringSerializer.get().toByteBuffer("testInsertSuper_value"),
            connectionManager.createClock());

    keyspace.insert(
        StringSerializer.get().toByteBuffer("testInsertSuper_key"), columnParent, column);
    column.setName(StringSerializer.get().toByteBuffer("testInsertSuper_column2"));
    keyspace.insert(
        StringSerializer.get().toByteBuffer("testInsertSuper_key"), columnParent, column);

    // get value and assert
    ColumnPath cp2 = new ColumnPath("Super1");
    cp2.setSuper_column(bytes("testInsertSuper_super"));
    SuperColumn sc = keyspace.getSuperColumn("testInsertSuper_key", cp2);
    assertNotNull(sc);
    assertEquals("testInsertSuper_super", string(sc.getName()));
    assertEquals(2, sc.getColumns().size());
    assertEquals("testInsertSuper_value", string(sc.getColumns().get(0).getValue()));

    // remove value
    keyspace.remove("testInsertSuper_super", cp2);
  }
예제 #3
0
 /**
  * Purge expired entries. Expiration entries are stored in a single key (expirationKey) within a
  * specific ColumnFamily (set by configuration). The entries are grouped by expiration timestamp
  * in SuperColumns within which each entry's key is mapped to a column
  */
 @Override
 protected void purgeInternal() throws CacheLoaderException {
   if (trace) log.trace("purgeInternal");
   Cassandra.Client cassandraClient = null;
   try {
     cassandraClient = dataSource.getConnection();
     // We need to get all supercolumns from the beginning of time until
     // now, in SLICE_SIZE chunks
     SlicePredicate predicate = new SlicePredicate();
     predicate.setSlice_range(
         new SliceRange(
             ByteBufferUtil.EMPTY_BYTE_BUFFER,
             ByteBufferUtil.bytes(System.currentTimeMillis()),
             false,
             SLICE_SIZE));
     Map<ByteBuffer, Map<String, List<Mutation>>> mutationMap =
         new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
     for (boolean complete = false; !complete; ) {
       // Get all columns
       List<ColumnOrSuperColumn> slice =
           cassandraClient.get_slice(
               expirationKey, expirationColumnParent, predicate, readConsistencyLevel);
       complete = slice.size() < SLICE_SIZE;
       // Delete all keys returned by the slice
       for (ColumnOrSuperColumn crumb : slice) {
         SuperColumn scol = crumb.getSuper_column();
         for (Iterator<Column> i = scol.getColumnsIterator(); i.hasNext(); ) {
           Column col = i.next();
           // Remove the entry row
           remove0(ByteBuffer.wrap(col.getName()), mutationMap);
         }
         // Remove the expiration supercolumn
         addMutation(
             mutationMap,
             expirationKey,
             config.expirationColumnFamily,
             ByteBuffer.wrap(scol.getName()),
             null,
             null);
       }
     }
     cassandraClient.batch_mutate(mutationMap, writeConsistencyLevel);
   } catch (Exception e) {
     throw new CacheLoaderException(e);
   } finally {
     dataSource.releaseConnection(cassandraClient);
   }
 }
 @Override
 public ByteBuffer getRawName() {
   return ByteBuffer.wrap(column.getName());
 }