// TM @Override protected void batchDeleteRow(String cf, List<Node[]> li, String keyspace) { if (cf.equals(CF_C_SPO)) { super.batchDeleteRow(cf, li, keyspace); } else if (cf.equals(CF_PO_S)) { Mutator<byte[]> m = HFactory.createMutator(getExistingKeyspace(keyspace), _bs); for (Node[] nx : li) { // reorder for the key Node[] reordered = Util.reorder(nx, _maps.get(cf)); ByteBuffer rowKey = createKey(new Node[] {reordered[0], reordered[1]}); String colKey = reordered[2].toN3(); // delete the full row m.addDeletion(rowKey.array(), cf); } m.execute(); } else { Mutator<String> m = HFactory.createMutator(getExistingKeyspace(keyspace), _ss); for (Node[] nx : li) { // reorder for the key Node[] reordered = Util.reorder(nx, _maps.get(cf)); String rowKey = reordered[0].toN3(); m.addDeletion(rowKey, cf); _log.info("Delete full row for " + rowKey + " cf= " + cf); } m.execute(); } }
/** * Add Message to a Given Queue in Cassandra * * @param columnFamily ColumnFamily name * @param queue queue name * @param messageId Message id * @param message message in bytes * @param keyspace Cassandra KeySpace * @throws CassandraDataAccessException In case of database access error */ public static void addMessageToQueue( String columnFamily, String queue, long messageId, byte[] message, Keyspace keyspace) throws CassandraDataAccessException { if (keyspace == null) { throw new CassandraDataAccessException("Can't add Data , no mutator provided "); } if (columnFamily == null || queue == null || message == null) { throw new CassandraDataAccessException( "Can't add data with columnFamily = " + columnFamily + " and queue=" + queue + " message id = " + messageId + " message = " + message); } try { Mutator<String> mutator = HFactory.createMutator(keyspace, stringSerializer); mutator.addInsertion( queue.trim(), columnFamily, HFactory.createColumn(messageId, message, longSerializer, bytesArraySerializer)); mutator.execute(); } catch (Exception e) { throw new CassandraDataAccessException("Error while adding message to Queue", e); } }
/** * Add a new Column <long,byte[]> to a given row in a given column family * * @param columnFamily column family name * @param row row name * @param key long key value * @param value value as a byte array * @param keyspace CassandraKeySpace * @throws CassandraDataAccessException */ public static void addLongByteArrayColumnToRow( String columnFamily, String row, long key, byte[] value, Keyspace keyspace) throws CassandraDataAccessException { if (keyspace == null) { throw new CassandraDataAccessException("Can't add Data , no keySpace provided "); } if (columnFamily == null || row == null || value == null) { throw new CassandraDataAccessException( "Can't add data with columnFamily = " + columnFamily + " and row=" + row + " key = " + key + " value = " + value); } try { Mutator<String> messageContentMutator = HFactory.createMutator(keyspace, stringSerializer); messageContentMutator.addInsertion( row, columnFamily, HFactory.createColumn(key, value, longSerializer, bytesArraySerializer)); messageContentMutator.execute(); } catch (Exception e) { throw new CassandraDataAccessException( "Error while adding new Column <int,byte[]> to cassandra store", e); } }
public void init() { CassandraHostConfigurator hostConfig = new CassandraHostConfigurator(darkStarNode + ":" + String.valueOf(darkStarPort)); cluster = HFactory.createCluster(clusterName, hostConfig); keySpace = HFactory.createKeyspace(keyspace, cluster, policy); mutator = HFactory.createMutator(keySpace, bfs); }
public static void deleteIntegerColumnFromRow( String columnFamily, String row, Integer key, Keyspace keyspace) throws CassandraDataAccessException { if (keyspace == null) { throw new CassandraDataAccessException("Can't delete Data , no keyspace provided "); } if (columnFamily == null || row == null) { throw new CassandraDataAccessException( "Can't delete data in columnFamily = " + columnFamily + " and rowName=" + row + " key = " + key); } try { Mutator<String> mutator = HFactory.createMutator(keyspace, stringSerializer); mutator.addDeletion(row, columnFamily, key, integerSerializer); mutator.execute(); } catch (Exception e) { throw new CassandraDataAccessException("Error while deleting data", e); } }
/** * Add new Column<long,long> to a given row in a given cassandra column family * * @param columnFamily column family name * @param row row name * @param key long key value of the column * @param value long value of the column * @param keyspace Cassandra KeySpace * @throws CassandraDataAccessException */ public static void addLongContentToRow( String columnFamily, String row, long key, long value, Keyspace keyspace) throws CassandraDataAccessException { if (keyspace == null) { throw new CassandraDataAccessException("Can't add Data , no keySpace provided "); } if (columnFamily == null || row == null) { throw new CassandraDataAccessException( "Can't add data with columnFamily = " + columnFamily + " and rowName=" + row + " key = " + key); } try { Mutator<String> mutator = HFactory.createMutator(keyspace, stringSerializer); mutator.insert( row, columnFamily, HFactory.createColumn(key, value, longSerializer, longSerializer)); mutator.execute(); } catch (Exception e) { throw new CassandraDataAccessException("Error while adding long content to a row", e); } }
/** * Add a Mapping to a Given Row in cassandra column family. Mappings are used as search indexes * * @param columnFamily columnFamilyName * @param row row name * @param cKey key name for the adding column * @param cValue value for the adding column * @param keyspace Cassandra KeySpace * @throws CassandraDataAccessException In case of database access error or data error */ public static void addMappingToRaw( String columnFamily, String row, String cKey, String cValue, Keyspace keyspace) throws CassandraDataAccessException { if (keyspace == null) { throw new CassandraDataAccessException("Can't add Data , no KeySpace provided "); } if (columnFamily == null || row == null || cKey == null) { throw new CassandraDataAccessException( "Can't add data with columnFamily = " + columnFamily + " and rowName=" + row + " key = " + cKey); } try { Mutator<String> mutator = HFactory.createMutator(keyspace, stringSerializer); mutator.addInsertion( row, columnFamily, HFactory.createColumn(cKey, cValue.trim(), stringSerializer, stringSerializer)); mutator.execute(); } catch (Exception e) { throw new CassandraDataAccessException("Error while adding a Mapping to row ", e); } }
public void truncate() { Mutator<K> mutator = HFactory.createMutator(keyspace, keySerializer); Iterator<K> iterator = new KeyIterator<K>(keyspace, columnFamily, keySerializer).iterator(); while (iterator.hasNext()) { this.removeRowBatch(iterator.next(), mutator); } this.executeMutator(mutator); }
public void put(String columnFamily, String key, byte[] value) { Mutator<String> mutator = HFactory.createMutator(this._keyspace, StringSerializer.get()); mutator.insert( key, columnFamily, HFactory.createColumn( DEFAULT_COLUMN_NAME, value, StringSerializer.get(), BytesArraySerializer.get())); }
@Override @CacheEvict(value = "user-cache", key = "#user.login") public void deleteUser(User user) { if (log.isDebugEnabled()) { log.debug("Deleting user : " + user); } Mutator<String> mutator = HFactory.createMutator(keyspaceOperator, StringSerializer.get()); mutator.addDeletion(user.getLogin(), USER_CF); mutator.execute(); }
/** * Decrement counter by 1 * * @param rawID raw name * @param columnFamily column family name * @param columnName name of column * @param keyspace keyspace * @throws CassandraDataAccessException */ public static void decrementCounter( String columnName, String columnFamily, String rawID, Keyspace keyspace, long decrementBy) throws CassandraDataAccessException { try { Mutator<String> mutator = HFactory.createMutator(keyspace, StringSerializer.get()); mutator.decrementCounter(rawID, columnFamily, columnName, decrementBy); mutator.execute(); } catch (Exception e) { throw new CassandraDataAccessException("Error while accessing:" + columnFamily, e); } }
void insertData(String dpname, Date d, Timestamp u, double value) { try { long timeInMicroSeconds = u.getTime(); // UUID timeUUIDColumnName = TimeUUIDUtils.getTimeUUID(timeInMicroSeconds); System.out.println(d + "\t" + timeInMicroSeconds + "\t" + value); DateSerializer ds = new DateSerializer(); Mutator<Date> mu = HFactory.createMutator(keyspace, ds); mu.insert(d, dpname, HFactory.createColumn(timeInMicroSeconds, value)); } catch (Exception e) { e.printStackTrace(); } }
@Override protected void batchInsert(String cf, List<Node[]> li, String keyspace) { if (cf.equals(CF_C_SPO)) { super.batchInsert(cf, li, keyspace); } else if (cf.equals(CF_PO_S)) { Mutator<byte[]> m = HFactory.createMutator(getExistingKeyspace(keyspace), _bs); for (Node[] nx : li) { // reorder for the key Node[] reordered = Util.reorder(nx, _maps.get(cf)); ByteBuffer rowKey = createKey(new Node[] {reordered[0], reordered[1]}); String colKey = reordered[2].toN3(); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn(colKey, "")); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn("!p", reordered[0].toN3())); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn("!o", reordered[1].toN3())); } m.execute(); } else { Mutator<String> m = HFactory.createMutator(getExistingKeyspace(keyspace), _ss); for (Node[] nx : li) { // reorder for the key Node[] reordered = Util.reorder(nx, _maps.get(cf)); String rowKey = reordered[0].toN3(); // for ERS_bridges_.. keyspaces, use the column value; it is easier for querying afterwards // few keyspacs such as ERS_graphs cannot be decoded String keyspace_decoded = this.decodeKeyspace(keyspace); if (keyspace_decoded != null && keyspace_decoded.startsWith(Listener.BRIDGES_KEYSPACE + "_")) { String colKey = reordered[1].toN3(); m.addInsertion(rowKey, cf, HFactory.createStringColumn(colKey, reordered[2].toN3())); } else { String colKey = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); m.addInsertion(rowKey, cf, HFactory.createStringColumn(colKey, "")); } } m.execute(); } }
@Override public void insertTatamibotConfiguration(TatamibotConfiguration tatamibotConfiguration) { UUID tatamibotConfigurationId = TimeUUIDUtils.getUniqueTimeUUIDinMillis(); Mutator<String> mutator = HFactory.createMutator(keyspaceOperator, StringSerializer.get()); mutator.insert( tatamibotConfiguration.getDomain(), ColumnFamilyKeys.DOMAIN_TATAMIBOT_CF, HFactory.createColumn( tatamibotConfigurationId, "", UUIDSerializer.get(), StringSerializer.get())); tatamibotConfiguration.setTatamibotConfigurationId(tatamibotConfigurationId.toString()); em.persist(tatamibotConfiguration); }
@Override @CacheEvict(value = "domain-tags-cache", key = "#domain") public void addTag(String domain, String tag) { HColumn<UUID, String> column = HFactory.createColumn( TimeUUIDUtils.getUniqueTimeUUIDinMillis(), tag, COLUMN_TTL, UUIDSerializer.get(), StringSerializer.get()); Mutator<String> mutator = HFactory.createMutator(keyspaceOperator, StringSerializer.get()); mutator.insert(domain, TRENDS_CF, column); }
public void removeColumnRange(K key, N start, N end, boolean reverse, int count) { Mutator<K> mutator = HFactory.createMutator(keyspace, keySerializer); List<HColumn<N, V>> columns = createSliceQuery(keyspace, keySerializer, columnNameSerializer, valueSerializer) .setColumnFamily(columnFamily) .setKey(key) .setRange(start, end, reverse, count) .execute() .get() .getColumns(); for (HColumn<N, V> column : columns) { mutator.addDeletion(key, columnFamily, column.getName(), columnNameSerializer); } this.executeMutator(mutator); }
/** * remove allocated raw-column space for counter * * @param cfName column family name * @param counterRowName name of row * @param queueColumn name of column * @param keyspace key space name * @throws CassandraDataAccessException */ public static void removeCounterColumn( String cfName, String counterRowName, String queueColumn, Keyspace keyspace) throws CassandraDataAccessException { try { Mutator<String> mutator = HFactory.createMutator(keyspace, StringSerializer.get()); mutator.deleteCounter(counterRowName, cfName, queueColumn, stringSerializer); mutator.execute(); CounterQuery<String, String> counter = new ThriftCounterColumnQuery<String, String>( keyspace, stringSerializer, stringSerializer); counter.setColumnFamily(cfName).setKey(counterRowName).setName(queueColumn); } catch (Exception e) { throw new CassandraDataAccessException("Error while accessing:" + cfName, e); } }
public void removeCounterRow(K key) { SliceCounterQuery<K, N> query = HFactory.createCounterSliceQuery(keyspace, keySerializer, columnNameSerializer) .setColumnFamily(columnFamily) .setKey(key); AchillesCounterSliceIterator<K, N> iterator = new AchillesCounterSliceIterator<K, N>( policy, columnFamily, query, (N) null, (N) null, false, DEFAULT_LENGTH); Mutator<K> mutator = HFactory.createMutator(keyspace, keySerializer); while (iterator.hasNext()) { HCounterColumn<N> counterCol = iterator.next(); mutator.decrementCounter(key, columnFamily, counterCol.getName(), counterCol.getValue()); } this.executeMutator(mutator); }
@Override public void run() { Calendar instance = Calendar.getInstance(); instance.set(2012, 1, 1, 0, 0, 0); Date time = instance.getTime(); Mutator<String> mutator = HFactory.createMutator(keyspace, new AsciiSerializer()); for (int i = 0; i < 300; i++) { HColumn<Date, byte[]> column = HFactory.createColumn( time, new byte[] {}, DateSerializer.get(), BytesArraySerializer.get()); String hitCampaignName = getHitCampaignName(); if (hitCampaignName != null) mutator.addInsertion(customer, hitCampaignName, column); mutator.addInsertion(customer, "global", column); time = new Date(time.getTime() + 2000); } mutator.execute(); }
@SuppressWarnings("unchecked") public void setAmountChunksWithContent(String owner_name, String fileName, long amountChunks) { try { Mutator<String> mutator = HFactory.createMutator(keyspaceOperator, stringSerializer); mutator.insert( owner_name, "UserFiles", HFactory.createSuperColumn( fileName, Arrays.asList( HFactory.createStringColumn("chunks_with_content", String.valueOf(amountChunks))), stringSerializer, stringSerializer, stringSerializer)); } catch (HectorException e) { log.error("Data was not inserted"); e.printStackTrace(); } }
@Override public void save( PersistenceManagementEvent persistenceManagementEvent, String nodeID, PersistenceObject persistenceObject) { Mutator<String> mutator = HFactory.createMutator(keyspace, sser); mutator.insert( persistenceManagementEvent.getRevision(), COLUMN_FAMILY_NAME, HFactory.createColumn(nodeID, ByteSerializer.OToB(persistenceObject), sser, bser)); mutator.insert( persistenceManagementEvent.getExecutionPlanIdentifier(), INDEX_COLUMN_FAMILY_NAME, HFactory.createColumn( persistenceManagementEvent.getRevision(), String.valueOf(System.currentTimeMillis()), sser, sser)); mutator.execute(); }
/** * Check if the tags exists and add them if they don't exist. * * @param keyspaceOperator * @param columnfamily * @param columnfamilyId * @param tags * @return */ private int addTags( Keyspace keyspaceOperator, String columnfamily, String columnfamilyId, List<String> tags) { // Get current user tags List<String> currentTags = getTags( keyspaceOperator, columnfamily, columnfamilyId, TAG_COLUMN_MIN_NAME, TAG_COLUMN_MAX_NAME); // Get the number of tags of the user int nbColumns = currentTags.size(); int nbAddedTags = 0; Mutator<String> mutator = HFactory.createMutator(keyspaceOperator, StringSerializer.get()); for (String tag : tags) { String tagLowerCase = tag.toLowerCase(); // Check if the tag already exists if (currentTags.contains(tagLowerCase)) { // This tag already exists => we don't add it! continue; } // Add the new tag nbColumns++; String columnName = COLUMN_TAG_PREFIX + nbColumns; mutator.insert( columnfamilyId, columnfamily, HFactory.createColumn( columnName, tagLowerCase, StringSerializer.get(), StringSerializer.get())); nbAddedTags++; } return nbAddedTags; }
public void save(Collection<Count> counts) { Map<String, Count> countHolder = new HashMap<String, Count>(); for (Count count : counts) { Count c = countHolder.get(count.getCounterName()); if (c != null) { c.apply(count); } else { countHolder.put(count.getCounterName(), count); } } Mutator<ByteBuffer> mutator = HFactory.createMutator(keyspace, ByteBufferSerializer.get()); for (Count count : countHolder.values()) { mutator.addCounter( count.getKeyNameBytes(), count.getTableName(), new HCounterColumnImpl( count.getColumnName(), count.getValue(), count.getColumnNameSerializer())); } try { mutator.execute(); } catch (Exception e) { // errors here happen a lot on shutdown, don't fill the logs with them String error = e.getClass().getCanonicalName(); if (counterInsertFailures.get(error) == null) { log.error("CounterStore insert failed, first instance", e); counterInsertFailures.put(error, 1); } else { int count = counterInsertFailures.get(error) + 1; counterInsertFailures.put(error, count); if (log.isDebugEnabled()) { log.debug(error + " caused CounterStore insert failure, count = " + count, e); } else { log.error(error + " caused CounterStore insert failure, count = " + count); } } } }
// create a batch of different operations and execute them all together @Override protected void batchRun(String cf, List<Node[]> li, String keyspace) { if (cf.equals(CF_C_SPO)) { // this is not implemented, we don't care about CSPO CF super.batchRun(cf, li, keyspace); } else if (cf.equals(CF_PO_S)) { Mutator<byte[]> m = HFactory.createMutator(getExistingKeyspace(keyspace), _bs); for (Node[] nx : li) { Node[] reordered; ByteBuffer rowKey; String colKey; // _log.info("$$$ " + nx[0].toString() + " " + nx[1].toString() + " " + nx[2].toString() // + " " + nx[3].toString()); switch (Integer.parseInt(nx[3].toString())) { case 0: // ignore query continue; case 1: // insertion // reorder for the key reordered = Util.reorder(nx, _maps_br.get(cf)); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn(colKey, "")); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!p", reordered[0].toN3())); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!o", reordered[1].toN3())); break; case 11: // insertion link // reorder for the key reordered = Util.reorder(nx, _maps_br.get(cf)); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn(colKey, "")); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!p", reordered[0].toN3())); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!o", reordered[1].toN3())); // add the back link as well reordered = Util.reorder(nx, _maps_br.get("link")); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn(colKey, "")); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!p", reordered[0].toN3())); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!o", reordered[1].toN3())); break; case 2: // deletion // reorder for the key reordered = Util.reorder(nx, _maps_br.get(cf)); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); // delete the full row m.addDeletion(rowKey.array(), cf); break; case 21: // deletion link // reorder for the key reordered = Util.reorder(nx, _maps_br.get(cf)); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); // delete the full row m.addDeletion(rowKey.array(), cf); // delete the back link as well if (nx[2] instanceof Resource) { reordered = Util.reorder(nx, _maps_br.get("link")); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); // delete the full row containing the back link m.addDeletion(rowKey.array(), cf); } break; case 3: // update, run a delete and an insert // deletion // reorder for the key reordered = Util.reorder(nx, _maps_br_update_d.get(cf)); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); // delete the full row m.addDeletion(rowKey.array(), cf); // insertion // reorder for the key reordered = Util.reorder(nx, _maps_br_update_i.get(cf)); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn(colKey, "")); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!p", reordered[0].toN3())); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!o", reordered[1].toN3())); break; case 4: // delete the full entity Node[] query = new Node[3]; query[0] = nx[0]; query[1] = new Variable("p"); query[2] = new Variable("o"); try { Iterator<Node[]> it = this.query(query, Integer.MAX_VALUE, keyspace); // if data, delete :) for (; it.hasNext(); ) { Node[] n = (Node[]) it.next(); // this.deleteData(n, keyspace, 0); rowKey = createKey(new Node[] {n[0], n[1]}); m.addDeletion(rowKey.array(), cf); } } catch (StoreException ex) { _log.severe(ex.getMessage()); } break; case 31: // update with link, run a delete and an insert // deletion // reorder for the key reordered = Util.reorder(nx, _maps_br_update_d.get(cf)); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); // delete the full row m.addDeletion(rowKey.array(), cf); // delete the back link as well if (nx[2] instanceof Resource) { reordered = Util.reorder(nx, _maps_br_update_d.get("link")); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); // delete the full row containing the back link m.addDeletion(rowKey.array(), cf); } // insertion // reorder for the key reordered = Util.reorder(nx, _maps_br_update_i.get(cf)); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn(colKey, "")); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!p", reordered[0].toN3())); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!o", reordered[1].toN3())); // insert also the new back link if (nx[5] instanceof Resource) { reordered = Util.reorder(nx, _maps_br_update_i.get("link")); rowKey = createKey(new Node[] {reordered[0], reordered[1]}); colKey = reordered[2].toN3(); m.addInsertion(rowKey.array(), cf, HFactory.createStringColumn(colKey, "")); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!p", reordered[0].toN3())); m.addInsertion( rowKey.array(), cf, HFactory.createStringColumn("!o", reordered[1].toN3())); } break; default: _log.info("OPERATION UNKNOWN, moving to next quad"); break; } } m.execute(); } else { Mutator<String> m = HFactory.createMutator(getExistingKeyspace(keyspace), _ss); for (Node[] nx : li) { Node[] reordered; String rowKey; String colKey; // _log.info("$$$ " + nx[0].toString() + " " + nx[1].toString() + " " + nx[2].toString() // + " " + nx[3].toString()); switch (Integer.parseInt(nx[3].toString())) { case 0: // ignore query continue; case 1: // insertion // reorder for the key reordered = Util.reorder(nx, _maps_br.get(cf)); rowKey = reordered[0].toN3(); colKey = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); m.addInsertion(rowKey, cf, HFactory.createStringColumn(colKey, "")); break; case 2: // deletion // reorder for the key reordered = Util.reorder(nx, _maps_br.get(cf)); rowKey = reordered[0].toN3(); colKey = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); m.addDeletion(rowKey, cf, colKey, _ss); break; case 3: // update = delete+insert // reorder for the key reordered = Util.reorder(nx, _maps_br_update_d.get(cf)); rowKey = reordered[0].toN3(); colKey = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); m.addDeletion(rowKey, cf, colKey, _ss); // reorder for the key reordered = Util.reorder(nx, _maps_br_update_i.get(cf)); rowKey = reordered[0].toN3(); colKey = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); m.addInsertion(rowKey, cf, HFactory.createStringColumn(colKey, "")); break; case 4: // delete the full entity Node[] query = new Node[3]; query[0] = nx[0]; query[1] = new Variable("p"); query[2] = new Variable("o"); try { Iterator<Node[]> it = this.query(query, Integer.MAX_VALUE, keyspace); // if data, delete :) for (; it.hasNext(); ) { Node[] n = (Node[]) it.next(); // this.deleteData(n, keyspace, 0); rowKey = n[0].toN3(); m.addDeletion(rowKey, cf); } } catch (StoreException ex) { _log.severe(ex.getMessage()); } break; default: _log.info("OPERATION UNKNOWN, moving to next quad"); break; } } m.execute(); } }
@Override public int batchUpdateVersioning( String cf, List<Node[]> li, String keyspace, String URN_author, String txID) { Hashtable<String, List<Node[]>> versioned_entities = new Hashtable<String, List<Node[]>>(); Hashtable<String, String> previous_commit_id = new Hashtable<String, String>(); boolean successful_fetch = fetchMostRecentVersions( keyspace, cf, li, txID, URN_author, versioned_entities, previous_commit_id); if (!successful_fetch) { // it means that for one entity there is already a lastCID > txID // we have to abort here the insertion return 2; } // now update the properties into the recent versions fetched for (Iterator<Node[]> it = li.iterator(); it.hasNext(); ) { Node[] current = it.next(); String prop = current[1].toN3(); String value_old = current[2].toN3(); Node value_new = current[3]; List<Node[]> entity_version = versioned_entities.get("<" + current[0].toString() + "-VER>"); for (Iterator<Node[]> it_version = entity_version.iterator(); it_version.hasNext(); ) { Node[] ver_ent = it_version.next(); String ver_prop = ver_ent[1].toN3(); String ver_value = ver_ent[2].toN3(); // check if this is the triple to be replaces if (prop.equals(ver_prop) && value_old.equals(ver_value)) ver_ent[2] = value_new; } } // SPO // insert 's-VER' and 's-URN' new versions Mutator<String> m = HFactory.createMutator(getExistingKeyspace(keyspace), _ss); for (Iterator<String> it = versioned_entities.keySet().iterator(); it.hasNext(); ) { String row_entity_key = it.next(); // there is a list of properties to be added to the new version of this entity List<Node[]> entity_old_version = versioned_entities.get(row_entity_key); String old_version_num = previous_commit_id.get(row_entity_key); for (Iterator it_old_v = entity_old_version.iterator(); it_old_v.hasNext(); ) { Node[] nx = (Node[]) it_old_v.next(); // reorder for the key Node[] reordered = Util.reorder(nx, _maps.get(cf)); String rowKey = new Resource(reordered[0].toString()).toN3(); if (!reordered[0].toString().contains("-VER")) rowKey = new Resource(reordered[0].toString() + "-VER").toN3(); /* this is the approach without using Snowflake int next_ver = old_version_num+1;*/ String next_ver; // if txID is different than null, it means this is called in a transactional context if (txID != null) next_ver = txID; else next_ver = String.valueOf(Integer.valueOf(old_version_num) + 1); // VER, URN Composite colKey = new Composite(); colKey.addComponent(next_ver, StringSerializer.get()); colKey.addComponent(URN_author, StringSerializer.get()); String colKey_s = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); colKey.addComponent(colKey_s, StringSerializer.get()); HColumn<Composite, String> hColumnObj_itemID = HFactory.createColumn(colKey, "", new CompositeSerializer(), StringSerializer.get()); m.addInsertion(rowKey, cf, hColumnObj_itemID); // URN, VER rowKey = new Resource(reordered[0].toString() + "-URN").toN3(); colKey = new Composite(); colKey.addComponent(URN_author, StringSerializer.get()); colKey.addComponent(String.valueOf(next_ver), StringSerializer.get()); colKey_s = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); colKey.addComponent(colKey_s, StringSerializer.get()); hColumnObj_itemID = HFactory.createColumn(colKey, "", new CompositeSerializer(), StringSerializer.get()); m.addInsertion(rowKey, cf, hColumnObj_itemID); } m.execute(); } // now try to write all CID,prevCID; if check my writes is enabled, it can abort in case // there are conflicts return commitOrAbort(keyspace, txID, URN_author, versioned_entities, previous_commit_id); }
@Override protected int batchInsertVersioning( String cf, List<Node[]> li, String keyspace, String URN_author, String txID) { Hashtable<String, List<Node[]>> versioned_entities = new Hashtable<String, List<Node[]>>(); Hashtable<String, String> previous_commit_id = new Hashtable<String, String>(); boolean successful_fetch = fetchMostRecentVersions( keyspace, cf, li, txID, URN_author, versioned_entities, previous_commit_id); if (!successful_fetch) { // it means that for one entity there is already a lastCID > txID // we have to abort here the insertion return 2; } /* // SIMULATE SOME DELAY HERE !! try{ Thread.sleep(System.currentTimeMillis()%10000); }catch(Exception ex) {} // END [THIS MUST BE ERASED IN PRODUCTION!!]*/ // add the new triples to previous versions for (Iterator<Node[]> it_triples = li.iterator(); it_triples.hasNext(); ) { Node[] triple = it_triples.next(); String rowKey = new Resource(triple[0].toString() + "-VER").toN3(); List<Node[]> prev_version = versioned_entities.get(rowKey); prev_version.add(triple); versioned_entities.put(rowKey, prev_version); } // SPO // insert 's-VER' and 's-URN' new versions Mutator<String> m = HFactory.createMutator(getExistingKeyspace(keyspace), _ss); for (Iterator<String> it = versioned_entities.keySet().iterator(); it.hasNext(); ) { String row_entity_key = it.next(); // there is a list of properties to be added to the new version of this entity List<Node[]> entity_old_version = versioned_entities.get(row_entity_key); String old_version_num = previous_commit_id.get(row_entity_key); for (Iterator it_old_v = entity_old_version.iterator(); it_old_v.hasNext(); ) { Node[] nx = (Node[]) it_old_v.next(); // reorder for the key Node[] reordered = Util.reorder(nx, _maps.get(cf)); String rowKey = new Resource(reordered[0].toString()).toN3(); if (!reordered[0].toString().contains("-VER")) rowKey = new Resource(reordered[0].toString() + "-VER").toN3(); /* this is the approach without using Snowflake int next_ver = old_version_num+1;*/ String next_ver; // if txID is different than null, it means this is called in a transactional context if (txID != null) next_ver = txID; else next_ver = String.valueOf(Integer.valueOf(old_version_num) + 1); // VER, URN Composite colKey = new Composite(); colKey.addComponent(next_ver, StringSerializer.get()); colKey.addComponent(URN_author, StringSerializer.get()); String colKey_s = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); colKey.addComponent(colKey_s, StringSerializer.get()); HColumn<Composite, String> hColumnObj_itemID = HFactory.createColumn(colKey, "", new CompositeSerializer(), StringSerializer.get()); m.addInsertion(rowKey, cf, hColumnObj_itemID); // URN, VER rowKey = new Resource(reordered[0].toString() + "-URN").toN3(); colKey = new Composite(); colKey.addComponent(URN_author, StringSerializer.get()); colKey.addComponent(String.valueOf(next_ver), StringSerializer.get()); colKey_s = Nodes.toN3(new Node[] {reordered[1], reordered[2]}); colKey.addComponent(colKey_s, StringSerializer.get()); hColumnObj_itemID = HFactory.createColumn(colKey, "", new CompositeSerializer(), StringSerializer.get()); m.addInsertion(rowKey, cf, hColumnObj_itemID); } m.execute(); } // now try to write all CID,prevCID; if check my writes is enabled, it can abort in case // there are conflicts return commitOrAbort(keyspace, txID, URN_author, versioned_entities, previous_commit_id); }
/** Inserts a new row on the UserFiles SuperColumn Family */ @SuppressWarnings("unchecked") public void insertRow( String owner_name, String fileName, String fileID, String size, String chunks, String version, int defaultChunkSize) { try { Mutator<String> mutator = HFactory.createMutator(keyspaceOperator, stringSerializer); mutator.insert( owner_name, "UserFiles", HFactory.createSuperColumn( fileName, Arrays.asList(HFactory.createStringColumn("file_id", fileID)), stringSerializer, stringSerializer, stringSerializer)); mutator.insert( owner_name, "UserFiles", HFactory.createSuperColumn( fileName, Arrays.asList(HFactory.createStringColumn("size", size)), stringSerializer, stringSerializer, stringSerializer)); mutator.insert( owner_name, "UserFiles", HFactory.createSuperColumn( fileName, Arrays.asList(HFactory.createStringColumn("chunks", chunks)), stringSerializer, stringSerializer, stringSerializer)); mutator.insert( owner_name, "UserFiles", HFactory.createSuperColumn( fileName, Arrays.asList(HFactory.createStringColumn("version", version)), stringSerializer, stringSerializer, stringSerializer)); mutator.insert( owner_name, "UserFiles", HFactory.createSuperColumn( fileName, Arrays.asList( HFactory.createStringColumn( "default_chunk_size", String.valueOf(defaultChunkSize))), stringSerializer, stringSerializer, stringSerializer)); } catch (HectorException e) { log.error("Data was not inserted"); e.printStackTrace(); } }
public CassandraWriteWork(Keyspace keyspace, CassandraWorkStatus callback) { this.callback = callback; mutator = HFactory.createMutator(keyspace, ByteBufferSerializer.get()); }
public Mutator<K> buildMutator() { return HFactory.createMutator(this.keyspace, this.keySerializer); }
/** * @param outputTransportMessageConfiguration - topic name to publish messages * @param message - is and Object[]{Event, EventDefinition} * @param outputTransportAdaptorConfiguration * @param tenantId */ public void publish( OutputTransportAdaptorMessageConfiguration outputTransportMessageConfiguration, Object message, OutputTransportAdaptorConfiguration outputTransportAdaptorConfiguration, int tenantId) { if (message instanceof Map) { ConcurrentHashMap<String, TransportAdaptorInfo> cassandraClusterCache = tenantedCassandraClusterCache.get(tenantId); if (null == cassandraClusterCache) { cassandraClusterCache = new ConcurrentHashMap<String, TransportAdaptorInfo>(); if (null != tenantedCassandraClusterCache.putIfAbsent(tenantId, cassandraClusterCache)) { cassandraClusterCache = tenantedCassandraClusterCache.get(tenantId); } } TransportAdaptorInfo transportAdaptorInfo = cassandraClusterCache.get(outputTransportAdaptorConfiguration.getName()); if (null == transportAdaptorInfo) { Map<String, String> properties = outputTransportAdaptorConfiguration.getOutputProperties(); Map<String, String> credentials = new HashMap<String, String>(); credentials.put( "username", properties.get(CassandraTransportAdaptorConstants.TRANSPORT_CASSANDRA_USER_NAME)); credentials.put( "password", properties.get(CassandraTransportAdaptorConstants.TRANSPORT_CASSANDRA_PASSWORD)); Cluster cluster = HFactory.createCluster( properties.get(CassandraTransportAdaptorConstants.TRANSPORT_CASSANDRA_CLUSTER_NAME), new CassandraHostConfigurator( properties.get(CassandraTransportAdaptorConstants.TRANSPORT_CASSANDRA_HOSTNAME) + ":" + properties.get( CassandraTransportAdaptorConstants.TRANSPORT_CASSANDRA_PORT)), credentials); String indexAllColumnsString = properties.get( CassandraTransportAdaptorConstants.TRANSPORT_CASSANDRA_INDEX_ALL_COLUMNS); boolean indexAllColumns = false; if (indexAllColumnsString != null && indexAllColumnsString.equals("true")) { indexAllColumns = true; } transportAdaptorInfo = new TransportAdaptorInfo(cluster, indexAllColumns); if (null != cassandraClusterCache.putIfAbsent( outputTransportAdaptorConfiguration.getName(), transportAdaptorInfo)) { transportAdaptorInfo = cassandraClusterCache.get(outputTransportAdaptorConfiguration.getName()); } else { log.info("Initiated Cassandra Writer " + outputTransportAdaptorConfiguration.getName()); } } String keySpaceName = outputTransportMessageConfiguration .getOutputMessageProperties() .get(CassandraTransportAdaptorConstants.TRANSPORT_CASSANDRA_KEY_SPACE_NAME); String columnFamilyName = outputTransportMessageConfiguration .getOutputMessageProperties() .get(CassandraTransportAdaptorConstants.TRANSPORT_CASSANDRA_COLUMN_FAMILY_NAME); MessageInfo messageInfo = transportAdaptorInfo.getMessageInfoMap().get(outputTransportMessageConfiguration); if (null == messageInfo) { Keyspace keyspace = HFactory.createKeyspace(keySpaceName, transportAdaptorInfo.getCluster()); messageInfo = new MessageInfo(keyspace); if (null != transportAdaptorInfo .getMessageInfoMap() .putIfAbsent(outputTransportMessageConfiguration, messageInfo)) { messageInfo = transportAdaptorInfo.getMessageInfoMap().get(outputTransportMessageConfiguration); } } if (transportAdaptorInfo.getCluster().describeKeyspace(keySpaceName) == null) { BasicColumnFamilyDefinition columnFamilyDefinition = new BasicColumnFamilyDefinition(); columnFamilyDefinition.setKeyspaceName(keySpaceName); columnFamilyDefinition.setName(columnFamilyName); columnFamilyDefinition.setComparatorType(ComparatorType.UTF8TYPE); columnFamilyDefinition.setDefaultValidationClass(ComparatorType.UTF8TYPE.getClassName()); columnFamilyDefinition.setKeyValidationClass(ComparatorType.UTF8TYPE.getClassName()); ColumnFamilyDefinition cfDef = new ThriftCfDef(columnFamilyDefinition); KeyspaceDefinition keyspaceDefinition = HFactory.createKeyspaceDefinition( keySpaceName, "org.apache.cassandra.locator.SimpleStrategy", 1, Arrays.asList(cfDef)); transportAdaptorInfo.getCluster().addKeyspace(keyspaceDefinition); KeyspaceDefinition fromCluster = transportAdaptorInfo.getCluster().describeKeyspace(keySpaceName); messageInfo.setColumnFamilyDefinition( new BasicColumnFamilyDefinition(fromCluster.getCfDefs().get(0))); } else { KeyspaceDefinition fromCluster = transportAdaptorInfo.getCluster().describeKeyspace(keySpaceName); for (ColumnFamilyDefinition columnFamilyDefinition : fromCluster.getCfDefs()) { if (columnFamilyDefinition.getName().equals(columnFamilyName)) { messageInfo.setColumnFamilyDefinition( new BasicColumnFamilyDefinition(columnFamilyDefinition)); break; } } } Mutator<String> mutator = HFactory.createMutator(messageInfo.getKeyspace(), sser); String uuid = UUID.randomUUID().toString(); for (Map.Entry<String, Object> entry : ((Map<String, Object>) message).entrySet()) { if (transportAdaptorInfo.isIndexAllColumns() && !messageInfo.getColumnNames().contains(entry.getKey())) { BasicColumnFamilyDefinition columnFamilyDefinition = messageInfo.getColumnFamilyDefinition(); BasicColumnDefinition columnDefinition = new BasicColumnDefinition(); columnDefinition.setName(StringSerializer.get().toByteBuffer(entry.getKey())); columnDefinition.setIndexType(ColumnIndexType.KEYS); columnDefinition.setIndexName( keySpaceName + "_" + columnFamilyName + "_" + entry.getKey() + "_Index"); columnDefinition.setValidationClass(ComparatorType.UTF8TYPE.getClassName()); columnFamilyDefinition.addColumnDefinition(columnDefinition); transportAdaptorInfo .getCluster() .updateColumnFamily(new ThriftCfDef(columnFamilyDefinition)); messageInfo.getColumnNames().add(entry.getKey()); } mutator.insert( uuid, columnFamilyName, HFactory.createStringColumn(entry.getKey(), entry.getValue().toString())); } mutator.execute(); } }