@Override public Message peekMessage(String messageId) throws MessageQueueException { String[] parts = splitCompositeKey(messageId); String shardKey = parts[0]; MessageQueueEntry entry = new MessageQueueEntry(parts[1]); try { Column<MessageQueueEntry> column = keyspace .prepareQuery(queueColumnFamily) .setConsistencyLevel(consistencyLevel) .getKey(shardKey) .getColumn(entry) .execute() .getResult(); try { ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue()); return mapper.readValue(bais, Message.class); } catch (Exception e) { LOG.warn("Error parsing message", e); // Error parsing the message so we pass it on to the invalid message handler. try { return invalidMessageHandler.apply(column.getStringValue()); } catch (Exception e2) { LOG.warn("Error handling invalid message message", e2); throw new MessageQueueException("Error parsing message " + messageId); } } } catch (NotFoundException e) { return null; } catch (ConnectionException e) { throw new MessageQueueException("Error getting message " + messageId, e); } }
/** * Read the data stored with the unique row. This data is normally a 'foreign' key to another * column family. * * @return * @throws Exception */ public ByteBuffer readData() throws Exception { ColumnList<C> result = keyspace .prepareQuery(columnFamily) .setConsistencyLevel(consistencyLevel) .getKey(key) .execute() .getResult(); boolean hasColumn = false; ByteBuffer data = null; for (Column<C> column : result) { if (column.getTtl() == 0) { if (hasColumn) { throw new IllegalStateException("Row has multiple uniquneness locks"); } hasColumn = true; data = column.getByteBufferValue(); } } if (!hasColumn) { throw new NotFoundException(this.key.toString() + " has no uniquness lock"); } return data; }
/** * Return history for a single key for the specified time range * * <p>TODO: honor the time range :) */ @Override public List<MessageHistory> getKeyHistory(String key, Long startTime, Long endTime, int count) throws MessageQueueException { List<MessageHistory> list = Lists.newArrayList(); ColumnList<UUID> columns; try { columns = keyspace .prepareQuery(historyColumnFamily) .setConsistencyLevel(consistencyLevel) .getRow(key) .execute() .getResult(); } catch (ConnectionException e) { throw new MessageQueueException("Failed to load history for " + key, e); } for (Column<UUID> column : columns) { try { list.add(deserializeString(column.getStringValue(), MessageHistory.class)); } catch (Exception e) { LOG.info("Error deserializing history entry", e); } } return list; }
@Override public Message peekMessageByKey(String key) throws MessageQueueException { String groupRowKey = getCompositeKey(getName(), key); try { ColumnList<MessageMetadataEntry> columns = keyspace .prepareQuery(keyIndexColumnFamily) .setConsistencyLevel(consistencyLevel) .getRow(groupRowKey) .withColumnRange( metadataSerializer .buildRange() .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); for (Column<MessageMetadataEntry> entry : columns) { if (entry.getTtl() != 0) continue; // Return the first one we get. Hmmm... maybe we want to do some validation checks here return peekMessage(entry.getName().getName()); } return null; } catch (NotFoundException e) { return null; } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } }
@Override public List<Message> peekMessagesByKey(String key) throws MessageQueueException { String groupRowKey = getCompositeKey(getName(), key); List<Message> messages = Lists.newArrayList(); try { ColumnList<MessageMetadataEntry> columns = keyspace .prepareQuery(keyIndexColumnFamily) .getRow(groupRowKey) .withColumnRange( metadataSerializer .buildRange() .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); for (Column<MessageMetadataEntry> entry : columns) { if (entry.getTtl() != 0) continue; Message message = peekMessage(entry.getName().getName()); if (message != null) { messages.add(peekMessage(entry.getName().getName())); } else { LOG.warn("No queue item for " + entry.getName()); } } } catch (NotFoundException e) { } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } return messages; }
public static void main(String[] args) throws ConnectionException { BasicConfigurator.configure(); CassandraRead me = new CassandraRead(); Keyspace ks = me.init(); ColumnFamily<String, Long> cf = me.getCf(); ColumnList<Long> columns = me.readRow(ks, cf, "test_Jakobs-MacBook-Air.local_1355356800000"); for (Column<Long> c : columns) { System.out.println(c.getName()); } }
/** * Extract a message body from a column * * @param column * @return */ Message extractMessageFromColumn(Column<MessageQueueEntry> column) { // Next, parse the message metadata and add a timeout entry Message message = null; try { ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue()); message = mapper.readValue(bais, Message.class); } catch (Exception e) { LOG.warn("Error processing message ", e); try { message = invalidMessageHandler.apply(column.getStringValue()); } catch (Exception e2) { LOG.warn("Error processing invalid message", e2); } } return message; }
public void run() { try { ColumnList<String> sessions = baseCassandraDao.readWithKey( ColumnFamily.SESSIONS.getColumnFamily(), (gooruUId + SEPERATOR + SESSIONS), 0); for (Column<String> session : sessions) { if (session.getStringValue() != null && !sessionId.equalsIgnoreCase(session.getName()) && session.getStringValue().equalsIgnoreCase(START)) { ColumnList<String> sessionInfo = baseCassandraDao.readWithKey( ColumnFamily.SESSION_ACTIVITY.getColumnFamily(), session.getName(), 0); if (sessionInfo != null) { logger.info("Closing session : {}", session.getName()); long endTime = sessionInfo.getLongValue(_END_TIME, 0L); long totalTimeSpent = (sessionInfo.getLongValue(_END_TIME, 0L) - sessionInfo.getLongValue(_START_TIME, 0L)); ColumnList<String> eventDetail = baseCassandraDao.readWithKey( ColumnFamily.EVENTDETAIL.getColumnFamily(), sessionInfo.getStringValue(_EVENT_ID, null), 0); if (eventDetail != null) { baseCassandraDao.saveStringValue( ColumnFamily.SESSIONS.getColumnFamily(), (gooruUId + SEPERATOR + SESSIONS), session.getName(), STOP, 1); String eventField = eventDetail.getStringValue(FIELDS, null); JSONObject eventJson = new JSONObject(eventField); Event event = gson.fromJson(eventField, Event.class); event.setEndTime(endTime); JSONObject metrics = new JSONObject(event.getMetrics()); metrics.put(TOTALTIMEINMS, totalTimeSpent); metrics.put(VIEWS_COUNT, 1L); JSONObject context = new JSONObject(event.getContext()); context.put(TYPE, STOP); context.put(LOGGED_BY, SYSTEM); eventJson.put(METRICS, metrics.toString()); eventJson.put(CONTEXT, context.toString()); StringEntity eventEntity = new StringEntity("[" + eventJson + "]"); HttpPost postRequest = new HttpPost(restPoint + LOGGING_URL + apiKey); postRequest.setEntity(eventEntity); postRequest.setHeader(CONTENT_TYPE, CONTENT_TYPE_VALUES); httpClient = new DefaultHttpClient(); HttpResponse response = httpClient.execute(postRequest); logger.info("Status : {} ", response.getStatusLine().getStatusCode()); logger.info("System logged Event : {} ", eventJson); } } } } } catch (Exception e) { logger.error("Error while closing events", e); } }
@Override public boolean deleteMessageByKey(String key) throws MessageQueueException { MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); String groupRowKey = getCompositeKey(getName(), key); try { ColumnList<MessageMetadataEntry> columns = keyspace .prepareQuery(keyIndexColumnFamily) .setConsistencyLevel(consistencyLevel) .getRow(groupRowKey) .withColumnRange( metadataSerializer .buildRange() .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); for (Column<MessageMetadataEntry> entry : columns) { String[] parts = splitCompositeKey(entry.getName().getName()); String shardKey = parts[0]; MessageQueueEntry queueEntry = new MessageQueueEntry(parts[1]); mb.withRow(queueColumnFamily, shardKey).deleteColumn(queueEntry); } mb.withRow(keyIndexColumnFamily, groupRowKey).delete(); } catch (NotFoundException e) { return false; } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } try { mb.execute(); } catch (ConnectionException e) { throw new MessageQueueException("Error deleting queue item " + groupRowKey, e); } return true; }
@Override boolean removeColumn( String recordKey, Column<CompositeColumnName> column, String className, RowMutator mutator, Map<String, List<Column<CompositeColumnName>>> fieldColumnMap) { String rowKey = getRowKey(column); ColumnListMutation<IndexColumnName> indexColList = mutator.getIndexColumnList(indexCF, rowKey); UUID uuid = column.getName().getTimeUUID(); IndexColumnName indexEntry = new IndexColumnName(className, recordKey, column.getStringValue(), uuid); indexColList.deleteColumn(indexEntry); return true; }
private List<MessageContext> readMessagesInternal( String shardName, int itemsToPop, int lockColumnCount, MessageQueueEntry lockColumn, ColumnListMutation<MessageQueueEntry> rowMutation, MutationBatch m, long curTimeMicros) throws BusyLockException, MessageQueueException { try { List<MessageContext> entries = Lists.newArrayList(); RangeEndpoint re = ShardedDistributedMessageQueue.entrySerializer .makeEndpoint((byte) MessageQueueEntryType.Message.ordinal(), Equality.EQUAL) .append((byte) 0, Equality.EQUAL); if (lockColumn != null) { re.append(lockColumn.getTimestamp(), Equality.LESS_THAN_EQUALS); } else { re.append(TimeUUIDUtils.getMicrosTimeUUID(curTimeMicros), Equality.LESS_THAN_EQUALS); } ColumnList<MessageQueueEntry> result = queue .keyspace .prepareQuery(queue.queueColumnFamily) .setConsistencyLevel(queue.consistencyLevel) .getKey(shardName) .withColumnRange( new RangeBuilder() .setLimit(itemsToPop + (lockColumn == null ? 0 : (lockColumnCount + 1))) .setEnd(re.toBytes()) .build()) .execute() .getResult(); for (Column<MessageQueueEntry> column : result) { if (itemsToPop == 0) { break; } MessageQueueEntry entry = column.getName(); switch (entry.getType()) { case Lock: // TODO: Track number of locks read and make sure we don't exceed itemsToPop // We have the lock if (lockColumn != null && entry.getState() == MessageQueueEntryState.Acquired) { if (!entry.getTimestamp().equals(lockColumn.getTimestamp())) { throw new BusyLockException("Someone else snuck in"); } } break; case Message: { try { itemsToPop--; // First, we always want to remove the old item String messageId = queue.getCompositeKey(shardName, entry.getMessageId()); rowMutation.deleteColumn(entry); // Next, parse the message metadata and add a timeout entry final Message message = queue.extractMessageFromColumn(column); // Update the message state if (message != null) { MessageContext context = new MessageContext(); context.setMessage(message); // Message has a trigger so we need to figure out if it is an // unfinished repeating trigger and re-add it. if (message.hasTrigger()) { // Read back all messageIds associated with this key and check to see if we have // duplicates. String groupRowKey = queue.getCompositeKey(queue.getName(), message.getKey()); try { // Use consistency level ColumnList<MessageMetadataEntry> columns = queue .keyspace .prepareQuery(queue.keyIndexColumnFamily) .getRow(groupRowKey) .withColumnRange( ShardedDistributedMessageQueue.metadataSerializer .buildRange() .greaterThanEquals( (byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals( (byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); MessageMetadataEntry mostRecentMessageMetadata = null; long mostRecentTriggerTime = 0; for (Column<MessageMetadataEntry> currMessageEntry : columns) { MessageQueueEntry pendingMessageEntry = MessageQueueEntry.fromMetadata(currMessageEntry.getName()); if (currMessageEntry.getTtl() == 0) { long currMessageTriggerTime = pendingMessageEntry.getTimestamp(TimeUnit.MICROSECONDS); // First message we found, so treat as the most recent if (mostRecentMessageMetadata == null) { mostRecentMessageMetadata = currMessageEntry.getName(); mostRecentTriggerTime = currMessageTriggerTime; } else { // This message's trigger time is after what we thought was the most // recent. // Discard the previous 'most' recent and accept this one instead if (currMessageTriggerTime > mostRecentTriggerTime) { LOG.warn( "Need to discard : " + entry.getMessageId() + " => " + mostRecentMessageMetadata.getName()); m.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn( mostRecentMessageMetadata, queue.metadataDeleteTTL); mostRecentTriggerTime = currMessageTriggerTime; mostRecentMessageMetadata = currMessageEntry.getName(); } else { LOG.warn( "Need to discard : " + entry.getMessageId() + " => " + currMessageEntry.getName()); m.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn( currMessageEntry.getName(), queue.metadataDeleteTTL); } } } } if (mostRecentMessageMetadata != null) { if (!mostRecentMessageMetadata.getName().endsWith(entry.getMessageId())) { throw new DuplicateMessageException("Duplicate trigger for " + messageId); } } } catch (NotFoundException e) { } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } // Update the trigger final Message nextMessage; Trigger trigger = message.getTrigger().nextTrigger(); if (trigger != null) { nextMessage = message.clone(); nextMessage.setTrigger(trigger); context.setNextMessage(nextMessage); if (message.isAutoCommitTrigger()) { queue.fillMessageMutation(m, nextMessage); } } } // Message has a key so we remove this item from the messages by key index. // A timeout item will be added later if (message.hasKey()) { m.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn( MessageMetadataEntry.newMessageId(messageId), queue.metadataDeleteTTL); LOG.debug( "Removing from key : " + queue.getCompositeKey(queue.getName(), message.getKey()) + " : " + messageId); if (message.isKeepHistory()) { MessageHistory history = context.getHistory(); history.setToken(entry.getTimestamp()); history.setStartTime(curTimeMicros); history.setTriggerTime(message.getTrigger().getTriggerTime()); history.setStatus(MessageStatus.RUNNING); try { m.withRow(queue.historyColumnFamily, message.getKey()) .putColumn( entry.getTimestamp(), queue.serializeToString(history), queue.metadata.getHistoryTtl()); } catch (Exception e) { LOG.warn("Error serializing history for key '" + message.getKey() + "'", e); } } } // Message has a timeout so we add a timeout event. if (message.getTimeout() > 0) { MessageQueueEntry timeoutEntry = MessageQueueEntry.newMessageEntry( (byte) 0, TimeUUIDUtils.getMicrosTimeUUID( curTimeMicros + TimeUnit.MICROSECONDS.convert( message.getTimeout(), TimeUnit.SECONDS) + (queue.counter.incrementAndGet() % 1000)), MessageQueueEntryState.Busy); message.setToken(timeoutEntry.getTimestamp()); message.setRandom(timeoutEntry.getRandom()); m.withRow(queue.queueColumnFamily, queue.getShardKey(message)) .putColumn( timeoutEntry, column.getStringValue(), queue.metadata.getRetentionTimeout()); MessageMetadataEntry messageIdEntry = MessageMetadataEntry.newMessageId( queue.getCompositeKey( queue.getShardKey(message), timeoutEntry.getMessageId())); // Add the timeout column to the key if (message.hasKey()) { m.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn(messageIdEntry, queue.metadata.getRetentionTimeout()); } context.setAckMessageId(messageIdEntry.getName()); } else { message.setToken(null); } // Update some stats switch (entry.getState()) { case Waiting: queue.stats.incProcessCount(); break; case Busy: queue.stats.incReprocessCount(); break; default: LOG.warn("Unknown message state: " + entry.getState()); // TODO: break; } entries.add(context); } else { queue.stats.incInvalidMessageCount(); // TODO: Add to poison queue } } catch (DuplicateMessageException e) { // OK to ignore this error. All the proper columns will have been deleted in the // batch. } break; } default: { // TODO: Error: Unknown type break; } } } return entries; } catch (BusyLockException e) { queue.stats.incLockContentionCount(); throw e; } catch (Exception e) { throw new MessageQueueException("Error processing queue shard : " + shardName, e); } finally { try { m.execute(); } catch (Exception e) { throw new MessageQueueException("Error processing queue shard : " + shardName, e); } } }
List<MessageContext> readMessagesFromShardUsingDefaultLock(String shardName, int itemsToPop) throws MessageQueueException, BusyLockException { MutationBatch m = null; MessageQueueEntry lockColumn = null; ColumnListMutation<MessageQueueEntry> rowMutation = null; int lockColumnCount = 0; // Try locking first try { // 1. Write the lock column lockColumn = MessageQueueEntry.newLockEntry(MessageQueueEntryState.None); long curTimeMicros = TimeUUIDUtils.getTimeFromUUID(lockColumn.getTimestamp()); m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); m.withRow(queue.queueColumnFamily, shardName) .putColumn(lockColumn, curTimeMicros + queue.lockTimeout, queue.lockTtl); m.execute(); // 2. Read back lock columns and entries ColumnList<MessageQueueEntry> result = queue .keyspace .prepareQuery(queue.queueColumnFamily) .setConsistencyLevel(queue.consistencyLevel) .getKey(shardName) .withColumnRange( ShardedDistributedMessageQueue.entrySerializer .buildRange() .greaterThanEquals((byte) MessageQueueEntryType.Lock.ordinal()) .lessThanEquals((byte) MessageQueueEntryType.Lock.ordinal()) .build()) .execute() .getResult(); m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); rowMutation = m.withRow(queue.queueColumnFamily, shardName); rowMutation.deleteColumn(lockColumn); int lockCount = 0; boolean lockAcquired = false; lockColumnCount = result.size(); for (Column<MessageQueueEntry> column : result) { MessageQueueEntry lock = column.getName(); if (lock.getType() == MessageQueueEntryType.Lock) { lockColumnCount++; // Stale lock so we can discard it if (column.getLongValue() < curTimeMicros) { queue.stats.incExpiredLockCount(); rowMutation.deleteColumn(lock); } else if (lock.getState() == MessageQueueEntryState.Acquired) { throw new BusyLockException("Not first lock"); } else { lockCount++; if (lockCount == 1 && lock.getTimestamp().equals(lockColumn.getTimestamp())) { lockAcquired = true; } } if (!lockAcquired) { throw new BusyLockException("Not first lock"); } // Write the acquired lock column lockColumn = MessageQueueEntry.newLockEntry( lockColumn.getTimestamp(), MessageQueueEntryState.Acquired); rowMutation.putColumn(lockColumn, curTimeMicros + queue.lockTimeout, queue.lockTtl); } } } catch (BusyLockException e) { queue.stats.incLockContentionCount(); throw e; } catch (ConnectionException e) { LOG.error("Error reading shard " + shardName, e); throw new MessageQueueException("Error", e); } finally { try { m.execute(); } catch (Exception e) { throw new MessageQueueException("Error committing lock", e); } } long curTimeMicros = TimeUUIDUtils.getMicrosTimeFromUUID(lockColumn.getTimestamp()); m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); // First, release the lock column rowMutation = m.withRow(queue.queueColumnFamily, shardName); rowMutation.deleteColumn(lockColumn); return readMessagesInternal( shardName, itemsToPop, lockColumnCount, lockColumn, rowMutation, m, curTimeMicros); }
@Override public List<Entry> getSlice( ByteBuffer key, ByteBuffer columnStart, ByteBuffer columnEnd, int limit, TransactionHandle txh) throws StorageException { /* * The following hideous cast dance avoids a type-erasure error in the * RowQuery<K, V> type that emerges when K=V=ByteBuffer. Specifically, * these two methods erase to the same signature after generic reduction * during compilation: * * RowQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean * reversed, int count) RowQuery<K, C> withColumnRange(ByteBuffer * startColumn, ByteBuffer endColumn, boolean reversed, int count) * * * The compiler substitutes ByteBuffer=C for both startColumn and * endColumn, compares it to its identical twin with that type * hard-coded, and dies. * * Here's the compiler error I received when attempting to compile this * code without the following casts. I used Oracle JDK 6 Linux x86_64. * * AstyanaxOrderedKeyColumnValueStore.java:[108,4] reference to * withColumnRange is ambiguous, both method * withColumnRange(C,C,boolean,int) in * com.netflix.astyanax.query.RowQuery<java.nio.ByteBuffer,java.nio.ByteBuffer> * and method * withColumnRange(java.nio.ByteBuffer,java.nio.ByteBuffer,boolean,int) * in * com.netflix.astyanax.query.RowQuery<java.nio.ByteBuffer,java.nio.ByteBuffer> * match * */ @SuppressWarnings("rawtypes") RowQuery rq = (RowQuery) keyspace .prepareQuery(cf) .setConsistencyLevel(readLevel) .withRetryPolicy(retryPolicy.duplicate()) .getKey(key); // RowQuery<ByteBuffer, ByteBuffer> rq = keyspace.prepareQuery(cf).getKey(key); rq.withColumnRange(columnStart, columnEnd, false, limit + 1); OperationResult<ColumnList<ByteBuffer>> r; try { @SuppressWarnings("unchecked") OperationResult<ColumnList<ByteBuffer>> tmp = (OperationResult<ColumnList<ByteBuffer>>) rq.execute(); r = tmp; } catch (ConnectionException e) { throw new TemporaryStorageException(e); } List<Entry> result = new ArrayList<Entry>(r.getResult().size()); int i = 0; for (Column<ByteBuffer> c : r.getResult()) { ByteBuffer colName = c.getName(); if (colName.equals(columnEnd)) { break; } result.add(new Entry(colName, c.getByteBufferValue())); if (++i == limit) { break; } } return result; }
@Test public void testMultiRowUniqueness() { DedicatedMultiRowUniquenessConstraint<UUID> constraint = new DedicatedMultiRowUniquenessConstraint<UUID>( keyspace, TimeUUIDUtils.getUniqueTimeUUIDinMicros()) .withConsistencyLevel(ConsistencyLevel.CL_ONE) .withRow(CF_USER_UNIQUE_UUID, "user1") .withRow(CF_EMAIL_UNIQUE_UUID, "*****@*****.**"); DedicatedMultiRowUniquenessConstraint<UUID> constraint2 = new DedicatedMultiRowUniquenessConstraint<UUID>( keyspace, TimeUUIDUtils.getUniqueTimeUUIDinMicros()) .withConsistencyLevel(ConsistencyLevel.CL_ONE) .withRow(CF_USER_UNIQUE_UUID, "user1") .withRow(CF_EMAIL_UNIQUE_UUID, "*****@*****.**"); try { Column<UUID> c = constraint.getUniqueColumn(); Assert.fail(); } catch (Exception e) { LOG.info(e.getMessage()); } try { constraint.acquire(); Column<UUID> c = constraint.getUniqueColumn(); LOG.info("Unique column is " + c.getName()); try { constraint2.acquire(); Assert.fail("Should already be acquired"); } catch (NotUniqueException e) { } catch (Exception e) { e.printStackTrace(); Assert.fail(); } finally { try { constraint2.release(); } catch (Exception e) { e.printStackTrace(); Assert.fail(); } } } catch (Exception e) { e.printStackTrace(); Assert.fail(); } finally { try { constraint.release(); } catch (Exception e) { e.printStackTrace(); Assert.fail(); } } try { constraint2.acquire(); Column<UUID> c = constraint.getUniqueColumn(); LOG.info("Unique column is " + c.getName()); } catch (NotUniqueException e) { Assert.fail("Should already be unique"); } catch (Exception e) { e.printStackTrace(); Assert.fail(); } finally { try { constraint2.release(); } catch (Exception e) { e.printStackTrace(); Assert.fail(); } } }
String getRowKey(Column<CompositeColumnName> column) { return column.getName().getTwo(); }
@Override protected <T> T createQueryHit(final QueryResult<T> result, Column<IndexColumnName> column) { return result.createQueryHit( getURI(column), column.getName().getThree(), column.getName().getTimeUUID()); }
@Override protected URI getURI(Column<IndexColumnName> col) { return URI.create(col.getName().getFour()); }
@Override public UniqueValue parseColumn(final Column<UniqueFieldEntry> column) { final UniqueFieldEntry entry = column.getName(); return new UniqueValueImpl(entry.getField(), entityId, entry.getVersion()); }
public static void main(String args[]) throws ConnectionException { String[] calles_28001 = {"Alcala", "Preciados", "Gran Via", "Princesa"}; String[] calles_28002 = {"Castellana", "Goya", "Serrano", "Velazquez"}; int index_28001 = 0; int index_28002 = 0; List<User> users = new ArrayList<User>(); for (int i = 0; i < 10; i++) { String id = (i + 1) + ""; String email = "user" + id + "@void.com"; String nombre = "nombre_" + id; String cp; String calle; if (i % 2 == 0) { cp = "28001"; calle = calles_28001[index_28001]; index_28001++; index_28001 = index_28001 % 4; } else { cp = "28002"; calle = calles_28002[index_28002]; index_28002++; index_28002 = index_28002 % 4; } User user = new User(id, email, nombre, cp, calle); users.add(user); } // conectar y crear column family Keyspace ksUsers = Utils.getKeyspace("utad"); String columnFamily = "compositeKeys"; ColumnFamily<String, String> cfUsers = new ColumnFamily<String, String>( columnFamily, StringSerializer.get(), StringSerializer.get()); try { ksUsers.dropColumnFamily(columnFamily); } catch (Exception e) { System.out.println("No existe el column family a borrar: " + columnFamily); } try { ksUsers.createColumnFamily( cfUsers, ImmutableMap.<String, Object>builder() .put("key_validation_class", "BytesType") .put("comparator_type", "BytesType") .build()); } catch (Exception e) { System.out.println("Error creando el column family: " + columnFamily + " " + e.getMessage()); } MutationBatch m = ksUsers.prepareMutationBatch(); String rowKey = "usersByCPAddress"; ColumnListMutation<String> clm = m.withRow(cfUsers, rowKey); System.out.println("\nEscribimos los datos"); for (User user : users) { String id = user.id; String cp = user.cp; String nombre = user.nombre; String email = user.email; String calle = user.calle; // escribir String key = id + ":" + cp + ":" + calle; String value = id + ":" + nombre + ":" + email; clm.putColumn(key, value); ksUsers.prepareColumnMutation(cfUsers, rowKey, key).putValue(value, null).execute(); } // leer el resultado System.out.println("\nLeer el resultado"); RowQuery<String, String> query = ksUsers .prepareQuery(cfUsers) .getKey(rowKey) .withColumnRange(new RangeBuilder().build()) .autoPaginate(true); ColumnList<String> columns = query.execute().getResult(); for (Column<String> c : columns) { String key = c.getName(); String value = c.getStringValue(); System.out.println("\nclave"); String[] ksplit = key.split(":"); for (String string : ksplit) { System.out.println("\t" + string); } System.out.println("valor"); String[] kvalue = value.split(":"); for (String string : kvalue) { System.out.println("\t" + string); } } }