@Test public void basicLifecycle() throws Exception { final String id = "basicLifecycle"; EntityManager<SampleEntity, String> entityPersister = new DefaultEntityManager.Builder<SampleEntity, String>() .withEntityType(SampleEntity.class) .withKeyspace(keyspace) .withColumnFamily(CF_SAMPLE_ENTITY) .build(); SampleEntity origEntity = createSampleEntity(id); entityPersister.put(origEntity); // use low-level astyanax API to confirm the write { ColumnList<String> cl = keyspace.prepareQuery(CF_SAMPLE_ENTITY).getKey(id).execute().getResult(); // 19 simple columns // 2 one-level-deep nested columns from Bar // 2 two-level-deep nested columns from BarBar Assert.assertEquals(23, cl.size()); // simple columns Assert.assertEquals(origEntity.getString(), cl.getColumnByName("STRING").getStringValue()); Assert.assertArrayEquals( origEntity.getByteArray(), cl.getColumnByName("BYTE_ARRAY").getByteArrayValue()); // nested fields Assert.assertEquals(origEntity.getBar().i, cl.getColumnByName("BAR.i").getIntegerValue()); Assert.assertEquals(origEntity.getBar().s, cl.getColumnByName("BAR.s").getStringValue()); Assert.assertEquals( origEntity.getBar().barbar.i, cl.getColumnByName("BAR.barbar.i").getIntegerValue()); Assert.assertEquals( origEntity.getBar().barbar.s, cl.getColumnByName("BAR.barbar.s").getStringValue()); } SampleEntity getEntity = entityPersister.get(id); Assert.assertEquals(origEntity, getEntity); entityPersister.delete(id); // use low-level astyanax API to confirm the delete { ColumnList<String> cl = keyspace.prepareQuery(CF_SAMPLE_ENTITY).getKey(id).execute().getResult(); Assert.assertEquals(0, cl.size()); } }
/** * Fast check to see if a shard has messages to process * * @param shardName * @throws MessageQueueException */ private boolean hasMessages(String shardName) throws MessageQueueException { UUID currentTime = TimeUUIDUtils.getUniqueTimeUUIDinMicros(); try { ColumnList<MessageQueueEntry> result = keyspace .prepareQuery(queueColumnFamily) .setConsistencyLevel(consistencyLevel) .getKey(shardName) .withColumnRange( new RangeBuilder() .setLimit(1) // Read extra messages because of the lock column .setStart( entrySerializer .makeEndpoint( (byte) MessageQueueEntryType.Message.ordinal(), Equality.EQUAL) .toBytes()) .setEnd( entrySerializer .makeEndpoint( (byte) MessageQueueEntryType.Message.ordinal(), Equality.EQUAL) .append((byte) 0, Equality.EQUAL) .append(currentTime, Equality.LESS_THAN_EQUALS) .toBytes()) .build()) .execute() .getResult(); return !result.isEmpty(); } catch (ConnectionException e) { throw new MessageQueueException("Error checking shard for messages. " + shardName, e); } }
@Override public Message peekMessage(String messageId) throws MessageQueueException { String[] parts = splitCompositeKey(messageId); String shardKey = parts[0]; MessageQueueEntry entry = new MessageQueueEntry(parts[1]); try { Column<MessageQueueEntry> column = keyspace .prepareQuery(queueColumnFamily) .setConsistencyLevel(consistencyLevel) .getKey(shardKey) .getColumn(entry) .execute() .getResult(); try { ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue()); return mapper.readValue(bais, Message.class); } catch (Exception e) { LOG.warn("Error parsing message", e); // Error parsing the message so we pass it on to the invalid message handler. try { return invalidMessageHandler.apply(column.getStringValue()); } catch (Exception e2) { LOG.warn("Error handling invalid message message", e2); throw new MessageQueueException("Error parsing message " + messageId); } } } catch (NotFoundException e) { return null; } catch (ConnectionException e) { throw new MessageQueueException("Error getting message " + messageId, e); } }
/** * Read the data stored with the unique row. This data is normally a 'foreign' key to another * column family. * * @return * @throws Exception */ public ByteBuffer readData() throws Exception { ColumnList<C> result = keyspace .prepareQuery(columnFamily) .setConsistencyLevel(consistencyLevel) .getKey(key) .execute() .getResult(); boolean hasColumn = false; ByteBuffer data = null; for (Column<C> column : result) { if (column.getTtl() == 0) { if (hasColumn) { throw new IllegalStateException("Row has multiple uniquneness locks"); } hasColumn = true; data = column.getByteBufferValue(); } } if (!hasColumn) { throw new NotFoundException(this.key.toString() + " has no uniquness lock"); } return data; }
@Override public List<Message> peekMessagesByKey(String key) throws MessageQueueException { String groupRowKey = getCompositeKey(getName(), key); List<Message> messages = Lists.newArrayList(); try { ColumnList<MessageMetadataEntry> columns = keyspace .prepareQuery(keyIndexColumnFamily) .getRow(groupRowKey) .withColumnRange( metadataSerializer .buildRange() .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); for (Column<MessageMetadataEntry> entry : columns) { if (entry.getTtl() != 0) continue; Message message = peekMessage(entry.getName().getName()); if (message != null) { messages.add(peekMessage(entry.getName().getName())); } else { LOG.warn("No queue item for " + entry.getName()); } } } catch (NotFoundException e) { } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } return messages; }
@Override public Message peekMessageByKey(String key) throws MessageQueueException { String groupRowKey = getCompositeKey(getName(), key); try { ColumnList<MessageMetadataEntry> columns = keyspace .prepareQuery(keyIndexColumnFamily) .setConsistencyLevel(consistencyLevel) .getRow(groupRowKey) .withColumnRange( metadataSerializer .buildRange() .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); for (Column<MessageMetadataEntry> entry : columns) { if (entry.getTtl() != 0) continue; // Return the first one we get. Hmmm... maybe we want to do some validation checks here return peekMessage(entry.getName().getName()); } return null; } catch (NotFoundException e) { return null; } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } }
/** * Return history for a single key for the specified time range * * <p>TODO: honor the time range :) */ @Override public List<MessageHistory> getKeyHistory(String key, Long startTime, Long endTime, int count) throws MessageQueueException { List<MessageHistory> list = Lists.newArrayList(); ColumnList<UUID> columns; try { columns = keyspace .prepareQuery(historyColumnFamily) .setConsistencyLevel(consistencyLevel) .getRow(key) .execute() .getResult(); } catch (ConnectionException e) { throw new MessageQueueException("Failed to load history for " + key, e); } for (Column<UUID> column : columns) { try { list.add(deserializeString(column.getStringValue(), MessageHistory.class)); } catch (Exception e) { LOG.info("Error deserializing history entry", e); } } return list; }
@Override public UniqueValueSet load( final ApplicationScope appScope, final ConsistencyLevel consistencyLevel, final String type, final Collection<Field> fields) throws ConnectionException { Preconditions.checkNotNull(fields, "fields are required"); Preconditions.checkArgument(fields.size() > 0, "More than 1 field must be specified"); final List<ScopedRowKey<FieldKey>> keys = new ArrayList<>(fields.size()); final Id applicationId = appScope.getApplication(); for (Field field : fields) { final FieldKey key = createUniqueValueKey(applicationId, type, field); final ScopedRowKey<FieldKey> rowKey = ScopedRowKey.fromKey(applicationId, key); keys.add(rowKey); } final UniqueValueSetImpl uniqueValueSet = new UniqueValueSetImpl(fields.size()); Iterator<Row<ScopedRowKey<FieldKey>, EntityVersion>> results = keyspace .prepareQuery(CF_UNIQUE_VALUES) .setConsistencyLevel(consistencyLevel) .getKeySlice(keys) .withColumnRange(new RangeBuilder().setLimit(1).build()) .execute() .getResult() .iterator(); while (results.hasNext()) { final Row<ScopedRowKey<FieldKey>, EntityVersion> unique = results.next(); final Field field = parseRowKey(unique.getKey()); final Iterator<Column<EntityVersion>> columnList = unique.getColumns().iterator(); // sanity check, nothing to do, skip it if (!columnList.hasNext()) { continue; } final EntityVersion entityVersion = columnList.next().getName(); final UniqueValueImpl uniqueValue = new UniqueValueImpl(field, entityVersion.getEntityId(), entityVersion.getEntityVersion()); uniqueValueSet.addValue(uniqueValue); } return uniqueValueSet; }
@Test public void testTtlOverride() throws Exception { final String id = "testTtlAnnotation"; EntityManager<TtlEntity, String> entityPersister = new DefaultEntityManager.Builder<TtlEntity, String>() .withEntityType(TtlEntity.class) .withKeyspace(keyspace) .withColumnFamily(CF_SAMPLE_ENTITY) .withTTL(5) .build(); TtlEntity origEntity = createTtlEntity(id); entityPersister.put(origEntity); // use low-level astyanax API to confirm the write { ColumnList<String> cl = keyspace.prepareQuery(CF_SAMPLE_ENTITY).getKey(id).execute().getResult(); // test column number Assert.assertEquals(1, cl.size()); // test column value Assert.assertEquals(origEntity.getColumn(), cl.getColumnByName("column").getStringValue()); // custom ttl Assert.assertEquals(5, cl.getColumnByName("column").getTtl()); } TtlEntity getEntity = entityPersister.get(id); Assert.assertEquals(origEntity, getEntity); // entity should still be alive after 3s since TTL is overriden to 5s Thread.sleep(1000 * 3); getEntity = entityPersister.get(id); Assert.assertEquals(origEntity, getEntity); // entity should expire after 3s since 6s have passed with 5s TTL Thread.sleep(1000 * 3); // use low-level astyanax API to confirm the TTL expiration { ColumnList<String> cl = keyspace.prepareQuery(CF_SAMPLE_ENTITY).getKey(id).execute().getResult(); Assert.assertEquals(0, cl.size()); } }
@Override protected RowQuery<String, IndexColumnName> genQuery() { RowQuery<String, IndexColumnName> query = _keyspace .prepareQuery(_field.getIndexCF()) .getKey(_indexKey.toString()) .withColumnRange(_field.buildPrefixRange(_prefix, pageCount)); return query; }
private void doPut(Keyspace keyspace, CacheKey key, Integer dataOffset, Point d) throws ConnectionException { keyspace .prepareQuery(columnFamily) .withCql(CQL_STMT) .asPreparedStatement() .withByteBufferValue(key, cacheKeySerializer) .withIntegerValue(dataOffset) .withDoubleValue(d.getValue()) .execute(); }
@Test public void testAcquireAndMutate() throws Exception { final String row = "testAcquireAndMutate"; final String dataColumn = "data"; final String value = "test"; ColumnPrefixUniquenessConstraint<String> unique = new ColumnPrefixUniquenessConstraint<String>(keyspace, UNIQUE_CF, row) .withConsistencyLevel(ConsistencyLevel.CL_ONE) .withUniqueId("def"); try { unique.acquireAndApplyMutation( new Function<MutationBatch, Boolean>() { @Override public Boolean apply(MutationBatch m) { m.withRow(UNIQUE_CF, row).putColumn(dataColumn, value, null); return true; } }); String column = unique.readUniqueColumn(); Assert.assertNotNull(column); } catch (Exception e) { e.printStackTrace(); LOG.error("", e); Assert.fail(); } finally { } ColumnList<String> columns = keyspace.prepareQuery(UNIQUE_CF).getKey(row).execute().getResult(); Assert.assertEquals(2, columns.size()); Assert.assertEquals(value, columns.getStringValue(dataColumn, null)); unique.release(); columns = keyspace.prepareQuery(UNIQUE_CF).getKey(row).execute().getResult(); Assert.assertEquals(1, columns.size()); Assert.assertEquals(value, columns.getStringValue(dataColumn, null)); }
@Test public void doubleIdColumnAnnotation() throws Exception { final String id = "doubleIdColumnAnnotation"; EntityManager<DoubleIdColumnEntity, String> entityPersister = new DefaultEntityManager.Builder<DoubleIdColumnEntity, String>() .withEntityType(DoubleIdColumnEntity.class) .withKeyspace(keyspace) .withColumnFamily(CF_SAMPLE_ENTITY) .build(); DoubleIdColumnEntity origEntity = createDoubleIdColumnEntity(id); entityPersister.put(origEntity); // use low-level astyanax API to confirm the write { ColumnList<String> cl = keyspace.prepareQuery(CF_SAMPLE_ENTITY).getKey(id).execute().getResult(); // test column number Assert.assertEquals(3, cl.size()); // test column value Assert.assertEquals(origEntity.getId(), cl.getColumnByName("id").getStringValue()); Assert.assertEquals(origEntity.getNum(), cl.getColumnByName("num").getIntegerValue()); Assert.assertEquals(origEntity.getStr(), cl.getColumnByName("str").getStringValue()); } DoubleIdColumnEntity getEntity = entityPersister.get(id); Assert.assertEquals(origEntity, getEntity); entityPersister.delete(id); // use low-level astyanax API to confirm the delete { ColumnList<String> cl = keyspace.prepareQuery(CF_SAMPLE_ENTITY).getKey(id).execute().getResult(); Assert.assertEquals(0, cl.size()); } }
public void insertToTimeSpanColumnFamily(int userId, int skuId, int priceValue) { try { OperationResult<CqlResult<Integer, String>> result = keyspace .prepareQuery(TIMESPAN_CF) .withCql(INSERT_STATEMENT) .asPreparedStatement() .withIntegerValue(userId) .withIntegerValue(skuId) .withIntegerValue(priceValue) .execute(); } catch (ConnectionException e) { throw new RuntimeException("Failed to write to column", e); } }
@Override public boolean deleteMessageByKey(String key) throws MessageQueueException { MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); String groupRowKey = getCompositeKey(getName(), key); try { ColumnList<MessageMetadataEntry> columns = keyspace .prepareQuery(keyIndexColumnFamily) .setConsistencyLevel(consistencyLevel) .getRow(groupRowKey) .withColumnRange( metadataSerializer .buildRange() .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); for (Column<MessageMetadataEntry> entry : columns) { String[] parts = splitCompositeKey(entry.getName().getName()); String shardKey = parts[0]; MessageQueueEntry queueEntry = new MessageQueueEntry(parts[1]); mb.withRow(queueColumnFamily, shardKey).deleteColumn(queueEntry); } mb.withRow(keyIndexColumnFamily, groupRowKey).delete(); } catch (NotFoundException e) { return false; } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } try { mb.execute(); } catch (ConnectionException e) { throw new MessageQueueException("Error deleting queue item " + groupRowKey, e); } return true; }
@Override public ByteBuffer get(ByteBuffer key, ByteBuffer column, TransactionHandle txh) throws StorageException { try { OperationResult<Column<ByteBuffer>> result = keyspace .prepareQuery(cf) .setConsistencyLevel(readLevel) .withRetryPolicy(retryPolicy.duplicate()) .getKey(key) .getColumn(column) .execute(); return result.getResult().getByteBufferValue(); } catch (NotFoundException e) { return null; } catch (ConnectionException e) { throw new TemporaryStorageException(e); } }
@Override public boolean containsKey(ByteBuffer key, TransactionHandle txh) throws StorageException { try { // See getSlice() below for a warning suppression justification @SuppressWarnings("rawtypes") RowQuery rq = (RowQuery) keyspace .prepareQuery(cf) .withRetryPolicy(retryPolicy.duplicate()) .setConsistencyLevel(readLevel) .getKey(key); @SuppressWarnings("unchecked") OperationResult<ColumnList<ByteBuffer>> r = rq.withColumnRange(EMPTY, EMPTY, false, 1).execute(); return 0 < r.getResult().size(); } catch (ConnectionException e) { throw new TemporaryStorageException(e); } }
@Override public void acquireAndApplyMutation(Function<MutationBatch, Boolean> callback) throws NotUniqueException, Exception { try { // Phase 1: Write a unique column MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); if (data == null) { m.withRow(columnFamily, key).putEmptyColumn(uniqueColumn, ttl); } else { m.withRow(columnFamily, key).putColumn(uniqueColumn, data, ttl); } m.execute(); // Phase 2: Read back all columns. There should be only 1 ColumnList<C> result = keyspace .prepareQuery(columnFamily) .setConsistencyLevel(consistencyLevel) .getKey(key) .execute() .getResult(); if (result.size() != 1) { throw new NotUniqueException(key.toString()); } // Phase 3: Persist the uniqueness with m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); if (callback != null) callback.apply(m); if (data == null) { m.withRow(columnFamily, key).putEmptyColumn(uniqueColumn, null); } else { m.withRow(columnFamily, key).putColumn(uniqueColumn, data, null); } m.execute(); } catch (Exception e) { release(); throw e; } }
@Override public Iterator<UniqueValue> getAllUniqueFields( final ApplicationScope collectionScope, final Id entityId) { Preconditions.checkNotNull(collectionScope, "collectionScope is required"); Preconditions.checkNotNull(entityId, "entity id is required"); final Id applicationId = collectionScope.getApplication(); final EntityKey entityKey = createEntityUniqueLogKey(applicationId, entityId); final ScopedRowKey<EntityKey> rowKey = ScopedRowKey.fromKey(applicationId, entityKey); RowQuery<ScopedRowKey<EntityKey>, UniqueFieldEntry> query = keyspace .prepareQuery(CF_ENTITY_UNIQUE_VALUE_LOG) .getKey(rowKey) .withColumnRange( (UniqueFieldEntry) null, null, false, serializationFig.getBufferSize()); return new ColumnNameIterator(query, new UniqueEntryParser(entityId), false); }
@Override public Map<String, Integer> getShardCounts() throws MessageQueueException { try { List<String> keys = Lists.newArrayList(); for (int i = 0; i < metadata.getPartitionCount(); i++) { for (int j = 0; j < metadata.getShardCount(); j++) { keys.add(getName() + ":" + i + ":" + j); } } Map<String, Integer> result = Maps.newTreeMap(); result.putAll( keyspace .prepareQuery(queueColumnFamily) .getKeySlice(keys) .getColumnCounts() .execute() .getResult()); return result; } catch (ConnectionException e) { throw new MessageQueueException("Failed to get counts", e); } }
/** * Peek into messages contained in the shard. This call does not take trigger time into account * and will return messages that are not yet due to be executed * * @param shardName * @param itemsToPop * @return * @throws MessageQueueException */ private Collection<Message> peekMessages(String shardName, int itemsToPeek) throws MessageQueueException { try { ColumnList<MessageQueueEntry> result = keyspace .prepareQuery(queueColumnFamily) .setConsistencyLevel(consistencyLevel) .getKey(shardName) .withColumnRange( new RangeBuilder() .setLimit(itemsToPeek) .setStart( entrySerializer .makeEndpoint( (byte) MessageQueueEntryType.Message.ordinal(), Equality.GREATER_THAN_EQUALS) .toBytes()) .setEnd( entrySerializer .makeEndpoint( (byte) MessageQueueEntryType.Message.ordinal(), Equality.LESS_THAN_EQUALS) .toBytes()) .build()) .execute() .getResult(); List<Message> messages = Lists.newArrayListWithCapacity(result.size()); for (Column<MessageQueueEntry> column : result) { Message message = extractMessageFromColumn(column); if (message != null) messages.add(message); } return messages; } catch (ConnectionException e) { throw new MessageQueueException("Error peeking for messages from shard " + shardName, e); } }
public static void main(String args[]) throws ConnectionException { String[] calles_28001 = {"Alcala", "Preciados", "Gran Via", "Princesa"}; String[] calles_28002 = {"Castellana", "Goya", "Serrano", "Velazquez"}; int index_28001 = 0; int index_28002 = 0; List<User> users = new ArrayList<User>(); for (int i = 0; i < 10; i++) { String id = (i + 1) + ""; String email = "user" + id + "@void.com"; String nombre = "nombre_" + id; String cp; String calle; if (i % 2 == 0) { cp = "28001"; calle = calles_28001[index_28001]; index_28001++; index_28001 = index_28001 % 4; } else { cp = "28002"; calle = calles_28002[index_28002]; index_28002++; index_28002 = index_28002 % 4; } User user = new User(id, email, nombre, cp, calle); users.add(user); } // conectar y crear column family Keyspace ksUsers = Utils.getKeyspace("utad"); String columnFamily = "compositeKeys"; ColumnFamily<String, String> cfUsers = new ColumnFamily<String, String>( columnFamily, StringSerializer.get(), StringSerializer.get()); try { ksUsers.dropColumnFamily(columnFamily); } catch (Exception e) { System.out.println("No existe el column family a borrar: " + columnFamily); } try { ksUsers.createColumnFamily( cfUsers, ImmutableMap.<String, Object>builder() .put("key_validation_class", "BytesType") .put("comparator_type", "BytesType") .build()); } catch (Exception e) { System.out.println("Error creando el column family: " + columnFamily + " " + e.getMessage()); } MutationBatch m = ksUsers.prepareMutationBatch(); String rowKey = "usersByCPAddress"; ColumnListMutation<String> clm = m.withRow(cfUsers, rowKey); System.out.println("\nEscribimos los datos"); for (User user : users) { String id = user.id; String cp = user.cp; String nombre = user.nombre; String email = user.email; String calle = user.calle; // escribir String key = id + ":" + cp + ":" + calle; String value = id + ":" + nombre + ":" + email; clm.putColumn(key, value); ksUsers.prepareColumnMutation(cfUsers, rowKey, key).putValue(value, null).execute(); } // leer el resultado System.out.println("\nLeer el resultado"); RowQuery<String, String> query = ksUsers .prepareQuery(cfUsers) .getKey(rowKey) .withColumnRange(new RangeBuilder().build()) .autoPaginate(true); ColumnList<String> columns = query.execute().getResult(); for (Column<String> c : columns) { String key = c.getName(); String value = c.getStringValue(); System.out.println("\nclave"); String[] ksplit = key.split(":"); for (String string : ksplit) { System.out.println("\t" + string); } System.out.println("valor"); String[] kvalue = value.split(":"); for (String string : kvalue) { System.out.println("\t" + string); } } }
public ColumnList<Long> readRow(Keyspace ks, ColumnFamily<String, Long> cf, String rowKey) throws ConnectionException { OperationResult<ColumnList<Long>> result = ks.prepareQuery(cf).getKey(rowKey).execute(); ColumnList<Long> columns = result.getResult(); return columns; }
@Override public List<Entry> getSlice( ByteBuffer key, ByteBuffer columnStart, ByteBuffer columnEnd, int limit, TransactionHandle txh) throws StorageException { /* * The following hideous cast dance avoids a type-erasure error in the * RowQuery<K, V> type that emerges when K=V=ByteBuffer. Specifically, * these two methods erase to the same signature after generic reduction * during compilation: * * RowQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean * reversed, int count) RowQuery<K, C> withColumnRange(ByteBuffer * startColumn, ByteBuffer endColumn, boolean reversed, int count) * * * The compiler substitutes ByteBuffer=C for both startColumn and * endColumn, compares it to its identical twin with that type * hard-coded, and dies. * * Here's the compiler error I received when attempting to compile this * code without the following casts. I used Oracle JDK 6 Linux x86_64. * * AstyanaxOrderedKeyColumnValueStore.java:[108,4] reference to * withColumnRange is ambiguous, both method * withColumnRange(C,C,boolean,int) in * com.netflix.astyanax.query.RowQuery<java.nio.ByteBuffer,java.nio.ByteBuffer> * and method * withColumnRange(java.nio.ByteBuffer,java.nio.ByteBuffer,boolean,int) * in * com.netflix.astyanax.query.RowQuery<java.nio.ByteBuffer,java.nio.ByteBuffer> * match * */ @SuppressWarnings("rawtypes") RowQuery rq = (RowQuery) keyspace .prepareQuery(cf) .setConsistencyLevel(readLevel) .withRetryPolicy(retryPolicy.duplicate()) .getKey(key); // RowQuery<ByteBuffer, ByteBuffer> rq = keyspace.prepareQuery(cf).getKey(key); rq.withColumnRange(columnStart, columnEnd, false, limit + 1); OperationResult<ColumnList<ByteBuffer>> r; try { @SuppressWarnings("unchecked") OperationResult<ColumnList<ByteBuffer>> tmp = (OperationResult<ColumnList<ByteBuffer>>) rq.execute(); r = tmp; } catch (ConnectionException e) { throw new TemporaryStorageException(e); } List<Entry> result = new ArrayList<Entry>(r.getResult().size()); int i = 0; for (Column<ByteBuffer> c : r.getResult()) { ByteBuffer colName = c.getName(); if (colName.equals(columnEnd)) { break; } result.add(new Entry(colName, c.getByteBufferValue())); if (++i == limit) { break; } } return result; }