/** * Do the column update or delete for the given column and row key * * @param applicationScope We need to use this when getting the keyspace * @param uniqueValue The unique value to write * @param op The operation to write */ private MutationBatch doWrite( ApplicationScope applicationScope, UniqueValue uniqueValue, RowOp op) { final MutationBatch batch = keyspace.prepareMutationBatch(); final Id applicationId = applicationScope.getApplication(); final FieldKey fieldKey = createUniqueValueKey( applicationId, uniqueValue.getEntityId().getType(), uniqueValue.getField()); op.doLookup(batch.withRow(CF_UNIQUE_VALUES, ScopedRowKey.fromKey(applicationId, fieldKey))); final EntityKey entityKey = createEntityUniqueLogKey(applicationId, uniqueValue.getEntityId()); op.doLog( batch.withRow(CF_ENTITY_UNIQUE_VALUE_LOG, ScopedRowKey.fromKey(applicationId, entityKey))); if (log.isDebugEnabled()) { log.debug( "Writing unique value version={} name={} value={} ", new Object[] { uniqueValue.getEntityVersion(), uniqueValue.getField().getName(), uniqueValue.getField().getValue() }); } return batch; }
@Override public void mutateMany(Map<ByteBuffer, Mutation> mutations, TransactionHandle txh) throws StorageException { // null txh means a Transaction is calling this method if (null != txh) { // non-null txh -> make sure locks are valid AstyanaxTransaction atxh = (AstyanaxTransaction) txh; if (!atxh.isMutationStarted()) { // This is the first mutate call in the transaction atxh.mutationStarted(); // Verify all blind lock claims now atxh.verifyAllLockClaims(); // throws GSE and unlocks everything on any lock failure } } MutationBatch m = keyspace .prepareMutationBatch() .setConsistencyLevel(writeLevel) .withRetryPolicy(retryPolicy.duplicate()); final long delTS = TimestampProvider.getApproxNSSinceEpoch(false); final long addTS = TimestampProvider.getApproxNSSinceEpoch(true); for (Map.Entry<ByteBuffer, Mutation> ent : mutations.entrySet()) { // The CLMs for additions and deletions are separated because // Astyanax's operation timestamp cannot be set on a per-delete // or per-addition basis. ColumnListMutation<ByteBuffer> dels = m.withRow(cf, ent.getKey()); dels.setTimestamp(delTS); ColumnListMutation<ByteBuffer> adds = m.withRow(cf, ent.getKey()); adds.setTimestamp(addTS); Mutation titanMutation = ent.getValue(); if (titanMutation.hasDeletions()) { for (ByteBuffer b : titanMutation.getDeletions()) { dels.deleteColumn(b); } } if (titanMutation.hasAdditions()) { for (Entry e : titanMutation.getAdditions()) { adds.putColumn(e.getColumn(), e.getValue(), null); } } } try { m.execute(); } catch (ConnectionException e) { throw new TemporaryStorageException(e); } }
@Override public void ackMessage(MessageContext context) throws MessageQueueException { MutationBatch mb = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); fillAckMutation(context, mb); try { mb.execute(); } catch (ConnectionException e) { throw new MessageQueueException("Failed to ack message", e); } }
/* * (non-Javadoc) * * @see * backtype.storm.contrib.cassandra.client.CassandraClient#writeTuple(backtype * .storm.tuple.Tuple, * backtype.storm.contrib.cassandra.bolt.mapper.TupleMapper) */ @Override public void writeTuple(Tuple input, TupleMapper<T> tupleMapper) throws Exception { String columnFamilyName = tupleMapper.mapToColumnFamily(input); String rowKey = (String) tupleMapper.mapToRowKey(input); MutationBatch mutation = keyspace.prepareMutationBatch(); ColumnFamily<String, T> columnFamily = new ColumnFamily<String, T>( columnFamilyName, StringSerializer.get(), this.getColumnNameSerializer(tupleMapper)); this.addTupleToMutation(input, columnFamily, rowKey, mutation, tupleMapper); mutation.execute(); }
@Override public void close() throws IOException { if (mb != null && !mb.isEmpty()) { try { OperationResult<Void> result = mb.execute(); } catch (ConnectionException e) { e.printStackTrace(); // To change body of catch statement use File | Settings | File // Templates. } } context.shutdown(); }
@Override public void mutateMany(Map<String, Map<ByteBuffer, Mutation>> batch, StoreTransaction txh) throws StorageException { MutationBatch m = keyspaceContext .getEntity() .prepareMutationBatch() .setConsistencyLevel(getTx(txh).getWriteConsistencyLevel().getAstyanaxConsistency()) .withRetryPolicy(retryPolicy.duplicate()); final long delTS = TimeUtility.getApproxNSSinceEpoch(false); final long addTS = TimeUtility.getApproxNSSinceEpoch(true); for (Map.Entry<String, Map<ByteBuffer, Mutation>> batchentry : batch.entrySet()) { String storeName = batchentry.getKey(); Preconditions.checkArgument( openStores.containsKey(storeName), "Store cannot be found: " + storeName); ColumnFamily<ByteBuffer, ByteBuffer> columnFamily = openStores.get(storeName).getColumnFamily(); Map<ByteBuffer, Mutation> mutations = batchentry.getValue(); for (Map.Entry<ByteBuffer, Mutation> ent : mutations.entrySet()) { // The CLMs for additions and deletions are separated because // Astyanax's operation timestamp cannot be set on a per-delete // or per-addition basis. ColumnListMutation<ByteBuffer> dels = m.withRow(columnFamily, ent.getKey()); dels.setTimestamp(delTS); ColumnListMutation<ByteBuffer> adds = m.withRow(columnFamily, ent.getKey()); adds.setTimestamp(addTS); Mutation titanMutation = ent.getValue(); if (titanMutation.hasDeletions()) { for (ByteBuffer b : titanMutation.getDeletions()) { dels.deleteColumn(b); } } if (titanMutation.hasAdditions()) { for (Entry e : titanMutation.getAdditions()) { adds.putColumn(e.getColumn(), e.getValue(), null); } } } } try { m.execute(); } catch (ConnectionException e) { throw new TemporaryStorageException(e); } }
String fillMessageMutation(MutationBatch mb, Message message) throws MessageQueueException { // Get the execution time from the message or set to current time so it runs immediately long curTimeMicros; if (!message.hasTrigger()) { curTimeMicros = TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS); } else { curTimeMicros = TimeUnit.MICROSECONDS.convert( message.getTrigger().getTriggerTime(), TimeUnit.MILLISECONDS); } curTimeMicros += (counter.incrementAndGet() % 1000); // Update the message for the new token message.setToken(TimeUUIDUtils.getMicrosTimeUUID(curTimeMicros)); // Set up the queue entry MessageQueueEntry entry = MessageQueueEntry.newMessageEntry( message.getPriority(), message.getToken(), MessageQueueEntryState.Waiting); // Convert the message object to JSON ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { mapper.writeValue(baos, message); baos.flush(); } catch (Exception e) { throw new MessageQueueException("Failed to serialize message data: " + message, e); } // Write the queue entry String shardKey = getShardKey(message); mb.withRow(queueColumnFamily, shardKey) .putColumn(entry, new String(baos.toByteArray()), metadata.getRetentionTimeout()); // Write the lookup from queue key to queue entry if (message.hasKey()) { mb.withRow(keyIndexColumnFamily, getCompositeKey(getName(), message.getKey())) .putEmptyColumn( MessageMetadataEntry.newMessageId(getCompositeKey(shardKey, entry.getMessageId())), metadata.getRetentionTimeout()); } // Allow hook processing for (MessageQueueHooks hook : hooks) { hook.beforeSendMessage(message, mb); } // Update state and retun the token stats.incSendMessageCount(); return getCompositeKey(shardKey, entry.getMessageId()); }
@Override public void ackPoisonMessage(MessageContext context) throws MessageQueueException { // TODO: Remove bad message and add to poison queue MutationBatch mb = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); fillAckMutation(context, mb); try { mb.execute(); } catch (ConnectionException e) { queue.stats.incPersistError(); throw new MessageQueueException("Failed to ack messages", e); } }
@Override public void map( LongWritable key, Text value, OutputCollector<NullWritable, NullWritable> collector, Reporter reporter) throws IOException { if (value.getLength() == 0) return; byte[] raw = value.getBytes(); Map<String, Object> msg = mapper.readValue(raw, Map.class); String rowId = createRowId(msg); // System.out.println("rowId:" + rowId.toString()); if (rowId == null) { // TODO ... Error Handler return; } if (mb == null) { mb = ks.prepareMutationBatch(); } ColumnListMutation<String> c = mb.withRow(cf, rowId); c.putColumn("raw", value.toString(), null); if (storeAttirbute) { for (String k : msg.keySet()) { if (k.startsWith("__")) continue; Object v = msg.get(k); if (v == null) continue; if (v.equals("")) continue; c.putColumn(k.toLowerCase(), v.toString(), null); } } try { if (mb.getRowCount() > 300) { OperationResult<Void> result = mb.execute(); mb = null; } } catch (ConnectionException e) { e.printStackTrace(); // To change body of catch statement use File | Settings | File // Templates. mb = null; } }
private void fillAckMutation(MessageContext context, MutationBatch mb) { queue.stats.incAckMessageCount(); Message message = context.getMessage(); // Token refers to the timeout event. If 0 (i.e. no) timeout was specified // then the token will not exist if (message.getToken() != null) { MessageQueueEntry entry = MessageQueueEntry.newBusyEntry(message); // Remove timeout entry from the queue mb.withRow(queue.queueColumnFamily, queue.getShardKey(message)).deleteColumn(entry); // Remove entry lookup from the key, if one exists if (message.hasKey()) { mb.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn( MessageMetadataEntry.newMessageId( queue.getCompositeKey(queue.getShardKey(message), entry.getMessageId())), queue.metadataDeleteTTL); if (message.isKeepHistory()) { MessageHistory history = context.getHistory(); if (history.getStatus() == MessageStatus.RUNNING) { history.setStatus(MessageStatus.DONE); } history.setEndTime( TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS)); try { mb.withRow(queue.historyColumnFamily, message.getKey()) .putColumn( history.getToken(), queue.serializeToString(context.getHistory()), queue.metadata.getHistoryTtl()); // TTL } catch (Exception e) { LOG.warn("Error serializing message history for " + message.getKey(), e); } } } // Run hooks for (MessageQueueHooks hook : queue.hooks) { hook.beforeAckMessage(message, mb); } } if (context.getNextMessage() != null) { try { queue.fillMessageMutation(mb, context.getNextMessage()); } catch (MessageQueueException e) { LOG.warn("Error filling nextMessage for " + message.getKey(), e); } } }
@Override public void clearMessages() throws MessageQueueException { LOG.info("Clearing messages from '" + getName() + "'"); MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); for (MessageQueueShard partition : shardReaderPolicy.listShards()) { mb.withRow(queueColumnFamily, partition.getName()).delete(); } try { mb.execute(); } catch (ConnectionException e) { throw new MessageQueueException("Failed to clear messages from queue " + getName(), e); } }
/* * (non-Javadoc) * * @see * backtype.storm.contrib.cassandra.client.CassandraClient#writeTuples(java * .util.List, backtype.storm.contrib.cassandra.bolt.mapper.TupleMapper) */ @Override public void writeTuples(List<Tuple> inputs, TupleMapper<T> tupleMapper) throws Exception { MutationBatch mutation = keyspace.prepareMutationBatch(); for (Tuple input : inputs) { String columnFamilyName = tupleMapper.mapToColumnFamily(input); String rowKey = (String) tupleMapper.mapToRowKey(input); ColumnFamily<String, T> columnFamily = new ColumnFamily<String, T>( columnFamilyName, StringSerializer.get(), this.getColumnNameSerializer(tupleMapper)); this.addTupleToMutation(input, columnFamily, rowKey, mutation, tupleMapper); } try { mutation.execute(); } catch (Exception e) { LOG.error("Could not execute mutation.", e); } }
@Override public void deleteMessages(Collection<String> messageIds) throws MessageQueueException { MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); for (String messageId : messageIds) { String[] parts = splitCompositeKey(messageId); String shardKey = parts[0]; MessageQueueEntry entry = new MessageQueueEntry(parts[1]); mb.withRow(queueColumnFamily, shardKey).deleteColumn(entry); } try { mb.execute(); } catch (ConnectionException e) { throw new MessageQueueException("Error deleting messages " + messageIds, e); } }
@Override public boolean deleteMessageByKey(String key) throws MessageQueueException { MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); String groupRowKey = getCompositeKey(getName(), key); try { ColumnList<MessageMetadataEntry> columns = keyspace .prepareQuery(keyIndexColumnFamily) .setConsistencyLevel(consistencyLevel) .getRow(groupRowKey) .withColumnRange( metadataSerializer .buildRange() .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); for (Column<MessageMetadataEntry> entry : columns) { String[] parts = splitCompositeKey(entry.getName().getName()); String shardKey = parts[0]; MessageQueueEntry queueEntry = new MessageQueueEntry(parts[1]); mb.withRow(queueColumnFamily, shardKey).deleteColumn(queueEntry); } mb.withRow(keyIndexColumnFamily, groupRowKey).delete(); } catch (NotFoundException e) { return false; } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } try { mb.execute(); } catch (ConnectionException e) { throw new MessageQueueException("Error deleting queue item " + groupRowKey, e); } return true; }
private void addTupleToMutation( Tuple input, ColumnFamily<String, T> columnFamily, String rowKey, MutationBatch mutation, TupleMapper<T> tupleMapper) { Map<T, String> columns = tupleMapper.mapToColumns(input); for (Map.Entry<T, String> entry : columns.entrySet()) { mutation.withRow(columnFamily, rowKey).putColumn(entry.getKey(), entry.getValue(), null); } }
List<MessageContext> readMessagesFromShardUsingLockManager(String shardName, int itemToPop) throws MessageQueueException, BusyLockException { ShardLock lock = null; try { lock = queue.lockManager.acquireLock(shardName); MutationBatch m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); ColumnListMutation<MessageQueueEntry> rowMutation = m.withRow(queue.queueColumnFamily, shardName); long curTimeMicros = TimeUUIDUtils.getMicrosTimeFromUUID(TimeUUIDUtils.getUniqueTimeUUIDinMicros()); return readMessagesInternal(shardName, itemToPop, 0, null, rowMutation, m, curTimeMicros); } catch (BusyLockException e) { queue.stats.incLockContentionCount(); throw e; } catch (Exception e) { LOG.error("Error reading shard " + shardName, e); throw new MessageQueueException("Error", e); } finally { queue.lockManager.releaseLock(lock); } }
@Override public void acquireAndApplyMutation(Function<MutationBatch, Boolean> callback) throws NotUniqueException, Exception { try { // Phase 1: Write a unique column MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); if (data == null) { m.withRow(columnFamily, key).putEmptyColumn(uniqueColumn, ttl); } else { m.withRow(columnFamily, key).putColumn(uniqueColumn, data, ttl); } m.execute(); // Phase 2: Read back all columns. There should be only 1 ColumnList<C> result = keyspace .prepareQuery(columnFamily) .setConsistencyLevel(consistencyLevel) .getKey(key) .execute() .getResult(); if (result.size() != 1) { throw new NotUniqueException(key.toString()); } // Phase 3: Persist the uniqueness with m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); if (callback != null) callback.apply(m); if (data == null) { m.withRow(columnFamily, key).putEmptyColumn(uniqueColumn, null); } else { m.withRow(columnFamily, key).putColumn(uniqueColumn, data, null); } m.execute(); } catch (Exception e) { release(); throw e; } }
@Test public void twoFieldsPerVersion() throws ConnectionException, InterruptedException { ApplicationScope scope = new ApplicationScopeImpl(new SimpleId("organization")); Id entityId = new SimpleId(UUIDGenerator.newTimeUUID(), "entity"); final UUID version1 = UUIDGenerator.newTimeUUID(); // write V1 of everything IntegerField version1Field1 = new IntegerField("count", 1); StringField version1Field2 = new StringField("field", "v1value"); UniqueValue version1Field1Value = new UniqueValueImpl(version1Field1, entityId, version1); UniqueValue version1Field2Value = new UniqueValueImpl(version1Field2, entityId, version1); final MutationBatch batch = strategy.write(scope, version1Field1Value); batch.mergeShallow(strategy.write(scope, version1Field2Value)); // write V2 of everything final UUID version2 = UUIDGenerator.newTimeUUID(); IntegerField version2Field1 = new IntegerField("count", 2); StringField version2Field2 = new StringField("field", "v2value"); UniqueValue version2Field1Value = new UniqueValueImpl(version2Field1, entityId, version2); UniqueValue version2Field2Value = new UniqueValueImpl(version2Field2, entityId, version2); batch.mergeShallow(strategy.write(scope, version2Field1Value)); batch.mergeShallow(strategy.write(scope, version2Field2Value)); batch.execute(); UniqueValueSet fields = strategy.load( scope, entityId.getType(), Arrays.<Field>asList(version1Field1, version1Field2)); UniqueValue retrieved = fields.getValue(version1Field1.getName()); assertEquals(version1Field1Value, retrieved); retrieved = fields.getValue(version1Field2.getName()); assertEquals(version1Field2Value, retrieved); Iterator<UniqueValue> allFieldsWritten = strategy.getAllUniqueFields(scope, entityId); assertTrue(allFieldsWritten.hasNext()); // test this interface. In most cases, we won't know the field name, so we want them all UniqueValue allFieldsValue = allFieldsWritten.next(); // version 2 fields should come first, ordered by field name assertEquals(version2Field1, allFieldsValue.getField()); assertEquals(version2, allFieldsValue.getEntityVersion()); allFieldsValue = allFieldsWritten.next(); assertEquals(version2Field2, allFieldsValue.getField()); assertEquals(version2, allFieldsValue.getEntityVersion()); // version 1 should come next ordered by field name allFieldsValue = allFieldsWritten.next(); assertEquals(version1Field1, allFieldsValue.getField()); assertEquals(version1, allFieldsValue.getEntityVersion()); allFieldsValue = allFieldsWritten.next(); assertEquals(version1Field2, allFieldsValue.getField()); assertEquals(version1, allFieldsValue.getEntityVersion()); assertFalse(allFieldsWritten.hasNext()); }
public static void createKeyspace() throws Exception { keyspaceContext = new AstyanaxContext.Builder() .forCluster(TEST_CLUSTER_NAME) .forKeyspace(TEST_KEYSPACE_NAME) .withAstyanaxConfiguration( new AstyanaxConfigurationImpl() .setDiscoveryType(NodeDiscoveryType.RING_DESCRIBE) .setConnectionPoolType(ConnectionPoolType.TOKEN_AWARE) .setDiscoveryDelayInSeconds(60000)) .withConnectionPoolConfiguration( new ConnectionPoolConfigurationImpl(TEST_CLUSTER_NAME + "_" + TEST_KEYSPACE_NAME) .setSocketTimeout(30000) .setMaxTimeoutWhenExhausted(2000) .setMaxConnsPerHost(20) .setInitConnsPerHost(10) .setSeeds(SEEDS)) .withConnectionPoolMonitor(new CountingConnectionPoolMonitor()) .buildKeyspace(ThriftFamilyFactory.getInstance()); keyspaceContext.start(); keyspace = keyspaceContext.getEntity(); try { keyspace.dropKeyspace(); } catch (Exception e) { LOG.info(e.getMessage()); } keyspace.createKeyspace( ImmutableMap.<String, Object>builder() .put( "strategy_options", ImmutableMap.<String, Object>builder().put("replication_factor", "1").build()) .put("strategy_class", "SimpleStrategy") .build()); keyspace.createColumnFamily(CF_USER_UNIQUE_UUID, null); keyspace.createColumnFamily(CF_EMAIL_UNIQUE_UUID, null); keyspace.createColumnFamily(CF_ALL_ROWS, null); keyspace.createColumnFamily( LOCK_CF_LONG, ImmutableMap.<String, Object>builder() .put("default_validation_class", "LongType") .put("key_validation_class", "UTF8Type") .put("comparator_type", "UTF8Type") .build()); keyspace.createColumnFamily( LOCK_CF_STRING, ImmutableMap.<String, Object>builder() .put("default_validation_class", "UTF8Type") .put("key_validation_class", "UTF8Type") .put("comparator_type", "UTF8Type") .build()); keyspace.createColumnFamily( CF_STANDARD1, ImmutableMap.<String, Object>builder() .put( "column_metadata", ImmutableMap.<String, Object>builder() .put( "Index1", ImmutableMap.<String, Object>builder() .put("validation_class", "UTF8Type") .put("index_name", "Index1") .put("index_type", "KEYS") .build()) .put( "Index2", ImmutableMap.<String, Object>builder() .put("validation_class", "UTF8Type") .put("index_name", "Index2") .put("index_type", "KEYS") .build()) .build()) .build()); keyspace.createColumnFamily(UNIQUE_CF, null); KeyspaceDefinition ki = keyspaceContext.getEntity().describeKeyspace(); System.out.println("Describe Keyspace: " + ki.getName()); try { // // CF_Super : // 'A' : // 'a' : // 1 : 'Aa1', // 2 : 'Aa2', // 'b' : // ... // 'z' : // ... // 'B' : // ... // // CF_Standard : // 'A' : // 'a' : 1, // 'b' : 2, // ... // 'z' : 26, // 'B' : // ... // MutationBatch m; OperationResult<Void> result; m = keyspace.prepareMutationBatch(); for (char keyName = 'A'; keyName <= 'Z'; keyName++) { String rowKey = Character.toString(keyName); ColumnListMutation<String> cfmStandard = m.withRow(CF_STANDARD1, rowKey); for (char cName = 'a'; cName <= 'z'; cName++) { cfmStandard.putColumn(Character.toString(cName), (int) (cName - 'a') + 1, null); } cfmStandard.putColumn("Index1", (int) (keyName - 'A') + 1, null); cfmStandard.putColumn("Index2", 42, null); m.execute(); } m.withRow(CF_STANDARD1, "Prefixes") .putColumn("Prefix1_a", 1, null) .putColumn("Prefix1_b", 2, null) .putColumn("prefix2_a", 3, null); result = m.execute(); m.execute(); m = keyspace.prepareMutationBatch(); for (int i = 0; i < ALL_ROWS_COUNT; i++) { m.withRow(CF_ALL_ROWS, i).putColumn(0, true); if (m.getRowCount() == 50) { m.execute(); } } m.execute(); } catch (Exception e) { System.out.println(e.getMessage()); Assert.fail(); } }
private List<MessageContext> readMessagesInternal( String shardName, int itemsToPop, int lockColumnCount, MessageQueueEntry lockColumn, ColumnListMutation<MessageQueueEntry> rowMutation, MutationBatch m, long curTimeMicros) throws BusyLockException, MessageQueueException { try { List<MessageContext> entries = Lists.newArrayList(); RangeEndpoint re = ShardedDistributedMessageQueue.entrySerializer .makeEndpoint((byte) MessageQueueEntryType.Message.ordinal(), Equality.EQUAL) .append((byte) 0, Equality.EQUAL); if (lockColumn != null) { re.append(lockColumn.getTimestamp(), Equality.LESS_THAN_EQUALS); } else { re.append(TimeUUIDUtils.getMicrosTimeUUID(curTimeMicros), Equality.LESS_THAN_EQUALS); } ColumnList<MessageQueueEntry> result = queue .keyspace .prepareQuery(queue.queueColumnFamily) .setConsistencyLevel(queue.consistencyLevel) .getKey(shardName) .withColumnRange( new RangeBuilder() .setLimit(itemsToPop + (lockColumn == null ? 0 : (lockColumnCount + 1))) .setEnd(re.toBytes()) .build()) .execute() .getResult(); for (Column<MessageQueueEntry> column : result) { if (itemsToPop == 0) { break; } MessageQueueEntry entry = column.getName(); switch (entry.getType()) { case Lock: // TODO: Track number of locks read and make sure we don't exceed itemsToPop // We have the lock if (lockColumn != null && entry.getState() == MessageQueueEntryState.Acquired) { if (!entry.getTimestamp().equals(lockColumn.getTimestamp())) { throw new BusyLockException("Someone else snuck in"); } } break; case Message: { try { itemsToPop--; // First, we always want to remove the old item String messageId = queue.getCompositeKey(shardName, entry.getMessageId()); rowMutation.deleteColumn(entry); // Next, parse the message metadata and add a timeout entry final Message message = queue.extractMessageFromColumn(column); // Update the message state if (message != null) { MessageContext context = new MessageContext(); context.setMessage(message); // Message has a trigger so we need to figure out if it is an // unfinished repeating trigger and re-add it. if (message.hasTrigger()) { // Read back all messageIds associated with this key and check to see if we have // duplicates. String groupRowKey = queue.getCompositeKey(queue.getName(), message.getKey()); try { // Use consistency level ColumnList<MessageMetadataEntry> columns = queue .keyspace .prepareQuery(queue.keyIndexColumnFamily) .getRow(groupRowKey) .withColumnRange( ShardedDistributedMessageQueue.metadataSerializer .buildRange() .greaterThanEquals( (byte) MessageMetadataEntryType.MessageId.ordinal()) .lessThanEquals( (byte) MessageMetadataEntryType.MessageId.ordinal()) .build()) .execute() .getResult(); MessageMetadataEntry mostRecentMessageMetadata = null; long mostRecentTriggerTime = 0; for (Column<MessageMetadataEntry> currMessageEntry : columns) { MessageQueueEntry pendingMessageEntry = MessageQueueEntry.fromMetadata(currMessageEntry.getName()); if (currMessageEntry.getTtl() == 0) { long currMessageTriggerTime = pendingMessageEntry.getTimestamp(TimeUnit.MICROSECONDS); // First message we found, so treat as the most recent if (mostRecentMessageMetadata == null) { mostRecentMessageMetadata = currMessageEntry.getName(); mostRecentTriggerTime = currMessageTriggerTime; } else { // This message's trigger time is after what we thought was the most // recent. // Discard the previous 'most' recent and accept this one instead if (currMessageTriggerTime > mostRecentTriggerTime) { LOG.warn( "Need to discard : " + entry.getMessageId() + " => " + mostRecentMessageMetadata.getName()); m.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn( mostRecentMessageMetadata, queue.metadataDeleteTTL); mostRecentTriggerTime = currMessageTriggerTime; mostRecentMessageMetadata = currMessageEntry.getName(); } else { LOG.warn( "Need to discard : " + entry.getMessageId() + " => " + currMessageEntry.getName()); m.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn( currMessageEntry.getName(), queue.metadataDeleteTTL); } } } } if (mostRecentMessageMetadata != null) { if (!mostRecentMessageMetadata.getName().endsWith(entry.getMessageId())) { throw new DuplicateMessageException("Duplicate trigger for " + messageId); } } } catch (NotFoundException e) { } catch (ConnectionException e) { throw new MessageQueueException("Error fetching row " + groupRowKey, e); } // Update the trigger final Message nextMessage; Trigger trigger = message.getTrigger().nextTrigger(); if (trigger != null) { nextMessage = message.clone(); nextMessage.setTrigger(trigger); context.setNextMessage(nextMessage); if (message.isAutoCommitTrigger()) { queue.fillMessageMutation(m, nextMessage); } } } // Message has a key so we remove this item from the messages by key index. // A timeout item will be added later if (message.hasKey()) { m.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn( MessageMetadataEntry.newMessageId(messageId), queue.metadataDeleteTTL); LOG.debug( "Removing from key : " + queue.getCompositeKey(queue.getName(), message.getKey()) + " : " + messageId); if (message.isKeepHistory()) { MessageHistory history = context.getHistory(); history.setToken(entry.getTimestamp()); history.setStartTime(curTimeMicros); history.setTriggerTime(message.getTrigger().getTriggerTime()); history.setStatus(MessageStatus.RUNNING); try { m.withRow(queue.historyColumnFamily, message.getKey()) .putColumn( entry.getTimestamp(), queue.serializeToString(history), queue.metadata.getHistoryTtl()); } catch (Exception e) { LOG.warn("Error serializing history for key '" + message.getKey() + "'", e); } } } // Message has a timeout so we add a timeout event. if (message.getTimeout() > 0) { MessageQueueEntry timeoutEntry = MessageQueueEntry.newMessageEntry( (byte) 0, TimeUUIDUtils.getMicrosTimeUUID( curTimeMicros + TimeUnit.MICROSECONDS.convert( message.getTimeout(), TimeUnit.SECONDS) + (queue.counter.incrementAndGet() % 1000)), MessageQueueEntryState.Busy); message.setToken(timeoutEntry.getTimestamp()); message.setRandom(timeoutEntry.getRandom()); m.withRow(queue.queueColumnFamily, queue.getShardKey(message)) .putColumn( timeoutEntry, column.getStringValue(), queue.metadata.getRetentionTimeout()); MessageMetadataEntry messageIdEntry = MessageMetadataEntry.newMessageId( queue.getCompositeKey( queue.getShardKey(message), timeoutEntry.getMessageId())); // Add the timeout column to the key if (message.hasKey()) { m.withRow( queue.keyIndexColumnFamily, queue.getCompositeKey(queue.getName(), message.getKey())) .putEmptyColumn(messageIdEntry, queue.metadata.getRetentionTimeout()); } context.setAckMessageId(messageIdEntry.getName()); } else { message.setToken(null); } // Update some stats switch (entry.getState()) { case Waiting: queue.stats.incProcessCount(); break; case Busy: queue.stats.incReprocessCount(); break; default: LOG.warn("Unknown message state: " + entry.getState()); // TODO: break; } entries.add(context); } else { queue.stats.incInvalidMessageCount(); // TODO: Add to poison queue } } catch (DuplicateMessageException e) { // OK to ignore this error. All the proper columns will have been deleted in the // batch. } break; } default: { // TODO: Error: Unknown type break; } } } return entries; } catch (BusyLockException e) { queue.stats.incLockContentionCount(); throw e; } catch (Exception e) { throw new MessageQueueException("Error processing queue shard : " + shardName, e); } finally { try { m.execute(); } catch (Exception e) { throw new MessageQueueException("Error processing queue shard : " + shardName, e); } } }
@Override public void release() throws Exception { MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel); m.withRow(columnFamily, key).deleteColumn(uniqueColumn); m.execute(); }
@Override public MutationBatch writeEdge( final ApplicationScope scope, final MarkedEdge markedEdge, final UUID timestamp) { ValidationUtils.validateApplicationScope(scope); GraphValidation.validateEdge(markedEdge); ValidationUtils.verifyTimeUuid(timestamp, "timestamp"); final long now = timeService.getCurrentTime(); final Id sourceNode = markedEdge.getSourceNode(); final Id targetNode = markedEdge.getTargetNode(); final String edgeType = markedEdge.getType(); final long edgeTimestamp = markedEdge.getTimestamp(); /** Source write */ final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceNode, edgeType); final Collection<Shard> sourceWriteShards = edgeShardStrategy.getWriteShards(scope, edgeTimestamp, sourceEdgeMeta).getWriteShards(now); final MutationBatch batch = shardedEdgeSerialization.writeEdgeFromSource( edgeColumnFamilies, scope, markedEdge, sourceWriteShards, sourceEdgeMeta, timestamp); /** Source with target type write */ final DirectedEdgeMeta sourceTargetTypeEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType(sourceNode, edgeType, targetNode.getType()); final Collection<Shard> sourceTargetTypeWriteShards = edgeShardStrategy .getWriteShards(scope, edgeTimestamp, sourceTargetTypeEdgeMeta) .getWriteShards(now); batch.mergeShallow( shardedEdgeSerialization.writeEdgeFromSourceWithTargetType( edgeColumnFamilies, scope, markedEdge, sourceTargetTypeWriteShards, sourceTargetTypeEdgeMeta, timestamp)); /** Target write */ final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNode(targetNode, edgeType); final Collection<Shard> targetWriteShards = edgeShardStrategy.getWriteShards(scope, edgeTimestamp, targetEdgeMeta).getWriteShards(now); batch.mergeShallow( shardedEdgeSerialization.writeEdgeToTarget( edgeColumnFamilies, scope, markedEdge, targetWriteShards, targetEdgeMeta, timestamp)); /** Target with source type write */ final DirectedEdgeMeta targetSourceTypeEdgeMeta = DirectedEdgeMeta.fromTargetNodeSourceType(targetNode, edgeType, sourceNode.getType()); final Collection<Shard> targetSourceTypeWriteShards = edgeShardStrategy .getWriteShards(scope, edgeTimestamp, targetSourceTypeEdgeMeta) .getWriteShards(now); batch.mergeShallow( shardedEdgeSerialization.writeEdgeToTargetWithSourceType( edgeColumnFamilies, scope, markedEdge, targetSourceTypeWriteShards, targetSourceTypeEdgeMeta, timestamp)); /** Version write */ final DirectedEdgeMeta edgeVersionsMeta = DirectedEdgeMeta.fromEdge(sourceNode, targetNode, edgeType); final Collection<Shard> edgeVersionsShards = edgeShardStrategy .getWriteShards(scope, edgeTimestamp, edgeVersionsMeta) .getWriteShards(now); batch.mergeShallow( shardedEdgeSerialization.writeEdgeVersions( edgeColumnFamilies, scope, markedEdge, edgeVersionsShards, edgeVersionsMeta, timestamp)); return batch; }
public static void main(String args[]) throws ConnectionException { String[] calles_28001 = {"Alcala", "Preciados", "Gran Via", "Princesa"}; String[] calles_28002 = {"Castellana", "Goya", "Serrano", "Velazquez"}; int index_28001 = 0; int index_28002 = 0; List<User> users = new ArrayList<User>(); for (int i = 0; i < 10; i++) { String id = (i + 1) + ""; String email = "user" + id + "@void.com"; String nombre = "nombre_" + id; String cp; String calle; if (i % 2 == 0) { cp = "28001"; calle = calles_28001[index_28001]; index_28001++; index_28001 = index_28001 % 4; } else { cp = "28002"; calle = calles_28002[index_28002]; index_28002++; index_28002 = index_28002 % 4; } User user = new User(id, email, nombre, cp, calle); users.add(user); } // conectar y crear column family Keyspace ksUsers = Utils.getKeyspace("utad"); String columnFamily = "compositeKeys"; ColumnFamily<String, String> cfUsers = new ColumnFamily<String, String>( columnFamily, StringSerializer.get(), StringSerializer.get()); try { ksUsers.dropColumnFamily(columnFamily); } catch (Exception e) { System.out.println("No existe el column family a borrar: " + columnFamily); } try { ksUsers.createColumnFamily( cfUsers, ImmutableMap.<String, Object>builder() .put("key_validation_class", "BytesType") .put("comparator_type", "BytesType") .build()); } catch (Exception e) { System.out.println("Error creando el column family: " + columnFamily + " " + e.getMessage()); } MutationBatch m = ksUsers.prepareMutationBatch(); String rowKey = "usersByCPAddress"; ColumnListMutation<String> clm = m.withRow(cfUsers, rowKey); System.out.println("\nEscribimos los datos"); for (User user : users) { String id = user.id; String cp = user.cp; String nombre = user.nombre; String email = user.email; String calle = user.calle; // escribir String key = id + ":" + cp + ":" + calle; String value = id + ":" + nombre + ":" + email; clm.putColumn(key, value); ksUsers.prepareColumnMutation(cfUsers, rowKey, key).putValue(value, null).execute(); } // leer el resultado System.out.println("\nLeer el resultado"); RowQuery<String, String> query = ksUsers .prepareQuery(cfUsers) .getKey(rowKey) .withColumnRange(new RangeBuilder().build()) .autoPaginate(true); ColumnList<String> columns = query.execute().getResult(); for (Column<String> c : columns) { String key = c.getName(); String value = c.getStringValue(); System.out.println("\nclave"); String[] ksplit = key.split(":"); for (String string : ksplit) { System.out.println("\t" + string); } System.out.println("valor"); String[] kvalue = value.split(":"); for (String string : kvalue) { System.out.println("\t" + string); } } }
List<MessageContext> readMessagesFromShardUsingDefaultLock(String shardName, int itemsToPop) throws MessageQueueException, BusyLockException { MutationBatch m = null; MessageQueueEntry lockColumn = null; ColumnListMutation<MessageQueueEntry> rowMutation = null; int lockColumnCount = 0; // Try locking first try { // 1. Write the lock column lockColumn = MessageQueueEntry.newLockEntry(MessageQueueEntryState.None); long curTimeMicros = TimeUUIDUtils.getTimeFromUUID(lockColumn.getTimestamp()); m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); m.withRow(queue.queueColumnFamily, shardName) .putColumn(lockColumn, curTimeMicros + queue.lockTimeout, queue.lockTtl); m.execute(); // 2. Read back lock columns and entries ColumnList<MessageQueueEntry> result = queue .keyspace .prepareQuery(queue.queueColumnFamily) .setConsistencyLevel(queue.consistencyLevel) .getKey(shardName) .withColumnRange( ShardedDistributedMessageQueue.entrySerializer .buildRange() .greaterThanEquals((byte) MessageQueueEntryType.Lock.ordinal()) .lessThanEquals((byte) MessageQueueEntryType.Lock.ordinal()) .build()) .execute() .getResult(); m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); rowMutation = m.withRow(queue.queueColumnFamily, shardName); rowMutation.deleteColumn(lockColumn); int lockCount = 0; boolean lockAcquired = false; lockColumnCount = result.size(); for (Column<MessageQueueEntry> column : result) { MessageQueueEntry lock = column.getName(); if (lock.getType() == MessageQueueEntryType.Lock) { lockColumnCount++; // Stale lock so we can discard it if (column.getLongValue() < curTimeMicros) { queue.stats.incExpiredLockCount(); rowMutation.deleteColumn(lock); } else if (lock.getState() == MessageQueueEntryState.Acquired) { throw new BusyLockException("Not first lock"); } else { lockCount++; if (lockCount == 1 && lock.getTimestamp().equals(lockColumn.getTimestamp())) { lockAcquired = true; } } if (!lockAcquired) { throw new BusyLockException("Not first lock"); } // Write the acquired lock column lockColumn = MessageQueueEntry.newLockEntry( lockColumn.getTimestamp(), MessageQueueEntryState.Acquired); rowMutation.putColumn(lockColumn, curTimeMicros + queue.lockTimeout, queue.lockTtl); } } } catch (BusyLockException e) { queue.stats.incLockContentionCount(); throw e; } catch (ConnectionException e) { LOG.error("Error reading shard " + shardName, e); throw new MessageQueueException("Error", e); } finally { try { m.execute(); } catch (Exception e) { throw new MessageQueueException("Error committing lock", e); } } long curTimeMicros = TimeUUIDUtils.getMicrosTimeFromUUID(lockColumn.getTimestamp()); m = queue.keyspace.prepareMutationBatch().setConsistencyLevel(queue.consistencyLevel); // First, release the lock column rowMutation = m.withRow(queue.queueColumnFamily, shardName); rowMutation.deleteColumn(lockColumn); return readMessagesInternal( shardName, itemsToPop, lockColumnCount, lockColumn, rowMutation, m, curTimeMicros); }