@Override public Result<K, T> execute(Query<K, T> query) { K startKey = query.getStartKey(); K endKey = query.getEndKey(); if (startKey == null) { if (!cacheEntryList.isEmpty()) { startKey = (K) cacheEntryList.first(); } } if (endKey == null) { if (!cacheEntryList.isEmpty()) { endKey = (K) cacheEntryList.last(); } } query.setFields(getFieldsToQuery(query.getFields())); ConcurrentSkipListSet<K> cacheEntrySubList = null; try { cacheEntrySubList = (ConcurrentSkipListSet<K>) cacheEntryList.subSet(startKey, true, endKey, true); } catch (NullPointerException npe) { LOG.error( "NPE occurred while executing the query for JCacheStore. Hence returning empty entry set.", npe); return new JCacheResult<>(this, query, new ConcurrentSkipListSet<K>()); } return new JCacheResult<>(this, query, cacheEntrySubList); }
@Override public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException { List<PartitionQuery<K, T>> partitions = new ArrayList<>(); try { Member[] clusterMembers = new Member[hazelcastInstance.getCluster().getMembers().size()]; this.hazelcastInstance.getCluster().getMembers().toArray(clusterMembers); for (Member member : clusterMembers) { JCacheResult<K, T> result = ((JCacheResult<K, T>) query.execute()); ConcurrentSkipListSet<K> memberOwnedCacheEntries = new ConcurrentSkipListSet<>(); while (result.next()) { K key = result.getKey(); Partition partition = hazelcastInstance.getPartitionService().getPartition(key); if (partition.getOwner().getUuid().equals(member.getUuid())) { memberOwnedCacheEntries.add(key); } } PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<>( query, memberOwnedCacheEntries.first(), memberOwnedCacheEntries.last(), member.getSocketAddress().getHostString()); partition.setConf(this.getConf()); partitions.add(partition); } } catch (java.lang.Exception ex) { LOG.error( "Exception occurred while partitioning the query based on Hazelcast partitions.", ex); return null; } LOG.info("Query is partitioned to {} number of partitions.", partitions.size()); return partitions; }
@Override public Result<K, T> execute(Query<K, T> query) { K startKey = query.getStartKey(); K endKey = query.getEndKey(); if (startKey == null) { startKey = map.firstKey(); } if (endKey == null) { endKey = map.lastKey(); } // check if query.fields is null query.setFields(getFieldsToQuery(query.getFields())); NavigableMap<K, T> submap = map.subMap(startKey, true, endKey, true); return new MemResult<K, T>(this, query, submap); }
@Override public long deleteByQuery(Query<K, T> query) { try { long deletedRows = 0; Result<K, T> result = query.execute(); while (result.next()) { if (delete(result.getKey())) deletedRows++; } return 0; } catch (Exception e) { return 0; } }
@Override public long deleteByQuery(Query<K, T> query) { try { long deletedRows = 0; Result<K, T> result = query.execute(); String[] fields = getFieldsToQuery(query.getFields()); boolean isAllFields = Arrays.equals(fields, getFields()); while (result.next()) { if (isAllFields) { if (delete(result.getKey())) { deletedRows++; } } else { ArrayList<String> excludedFields = new ArrayList<>(); for (String field : getFields()) { if (!Arrays.asList(fields).contains(field)) { excludedFields.add(field); } } T newClonedObj = getPersistent( result.get(), excludedFields.toArray(new String[excludedFields.size()])); if (delete(result.getKey())) { put(result.getKey(), newClonedObj); deletedRows++; } } } LOG.info("JCache Gora datastore deleled {} rows from Persistent datastore.", deletedRows); return deletedRows; } catch (Exception e) { LOG.error( "Exception occurred while deleting entries from JCache Gora datastore. Hence returning 0.", e); return 0; } }