/** @inheritDoc */ @SuppressWarnings("unchecked") public <T> T merge(final T theT) { assertStateOk(theT); Graph aExistingData = null; if (theT instanceof EmpireGenerated) { aExistingData = ((EmpireGenerated) theT).getInstanceTriples(); } if (aExistingData == null || aExistingData.isEmpty()) { try { aExistingData = assertContainsAndDescribe(theT); } catch (IllegalArgumentException e) { // it doesnt exist, so really, nothing to delete aExistingData = new GraphImpl(); } } try { preUpdate(theT); Graph aData = RdfGenerator.asRdf(theT); boolean isTopOperation = (mOp == null); DataSourceOperation aOp = new DataSourceOperation(); if (doesSupportNamedGraphs() && EmpireUtil.hasNamedGraphSpecified(theT)) { java.net.URI aGraphURI = EmpireUtil.getNamedGraph(theT); aOp.remove(aGraphURI, aExistingData); aOp.add(aGraphURI, aData); } else { aOp.remove(aExistingData); aOp.add(aData); } joinCurrentDataSourceOperation(aOp); // cascade the merge cascadeOperation(theT, new IsMergeCascade(), new MergeCascade()); finishCurrentDataSourceOperation(isTopOperation); postUpdate(theT); return theT; } catch (DataSourceException ex) { throw new PersistenceException(ex); } catch (InvalidRdfException ex) { throw new IllegalStateException(ex); } }
/** * Add this graph to the set of data to be added, to the specified named graph, when this * operation is executed * * @param theGraphURI the named graph the data should be added to * @param theGraph the data to add */ public void add(java.net.URI theGraphURI, Graph theGraph) { Graph aGraph = mAdd.get(theGraphURI); if (aGraph == null) { aGraph = new ExtGraph(); } aGraph.addAll(theGraph); mAdd.put(theGraphURI, aGraph); }
/** * Add this graph to the set of data to be removed, from the specified named graph, when this * operation is executed * * @param theGraphURI the named graph the data should be removed from * @param theGraph the data to remove */ public void remove(java.net.URI theGraphURI, Graph theGraph) { Graph aGraph = mRemove.get(theGraphURI); if (aGraph == null) { aGraph = new ExtGraph(); } aGraph.addAll(theGraph); mRemove.put(theGraphURI, aGraph); }
/** * call this method if you need the RDF Graph that represents the R2RML mappings * * @return an RDF Graph */ @Deprecated public Graph getGraph() { OBDAMappingTransformer transformer = new OBDAMappingTransformer(); transformer.setOntology(ontology); List<Statement> statements = new ArrayList<Statement>(); for (OBDAMappingAxiom axiom : this.mappings) { List<Statement> statements2 = transformer.getStatements(axiom, prefixmng); statements.addAll(statements2); } @SuppressWarnings("deprecation") Graph g = new GraphImpl(); g.addAll(statements); return g; }
/** * @param g Where to assemble the description. * @param tripleStore The KB instance to be described. * @param serviceURI The SPARQL service end point. * @param aDefaultDataset The data set identifier that will be used on the description (the * bigdata namespace of the dataset is obtained from the <i>tripleStore</i>). */ public VoID( final Graph g, final AbstractTripleStore tripleStore, final String[] serviceURI, final Resource aDataset) { if (g == null) throw new IllegalArgumentException(); if (tripleStore == null) throw new IllegalArgumentException(); if (serviceURI == null) throw new IllegalArgumentException(); if (serviceURI.length == 0) throw new IllegalArgumentException(); for (String s : serviceURI) if (s == null) throw new IllegalArgumentException(); if (aDataset == null) throw new IllegalArgumentException(); this.g = g; this.tripleStore = tripleStore; this.serviceURI = serviceURI; this.f = g.getValueFactory(); this.aDataset = aDataset; this.aDefaultGraph = f.createBNode("defaultGraph"); }
/** * Describe the vocabularies which are in use in the KB based on the predicate partition * statistics. * * @param predicateParitionCounts The predicate partition statistics. */ protected void describeVocabularies(final IVCount[] predicatePartitionCounts) { // Find the distinct vocabularies in use. final Set<String> namespaces = new LinkedHashSet<String>(); { // property partitions. for (IVCount tmp : predicatePartitionCounts) { final URI p = (URI) tmp.getValue(); String namespace = p.getNamespace(); if (namespace.endsWith("#")) { // Strip trailing '#' per VoID specification. namespace = namespace.substring(0, namespace.length() - 1); } namespaces.add(namespace); } } // Sort into dictionary order. final String[] a = namespaces.toArray(new String[namespaces.size()]); Arrays.sort(a); for (String namespace : a) { g.add(aDataset, VoidVocabularyDecl.vocabulary, f.createURI(namespace)); } }
/** * Describe a named or default graph. * * @param graph The named graph. * @param predicatePartitionCounts The predicate partition statistics for that graph. * @param classPartitionCounts The class partition statistics for that graph. */ protected void describeGraph( final Resource graph, final IVCount[] predicatePartitionCounts, final IVCount[] classPartitionCounts) { // The graph is a Graph. g.add(graph, RDF.TYPE, SD.Graph); // #of triples in the default graph g.add(graph, VoidVocabularyDecl.triples, f.createLiteral(tripleStore.getStatementCount())); // #of entities in the default graph. g.add(graph, VoidVocabularyDecl.entities, f.createLiteral(tripleStore.getURICount())); // #of distinct predicates in the default graph. g.add(graph, VoidVocabularyDecl.properties, f.createLiteral(predicatePartitionCounts.length)); // #of distinct classes in the default graph. g.add(graph, VoidVocabularyDecl.classes, f.createLiteral(classPartitionCounts.length)); // property partition statistics. for (IVCount tmp : predicatePartitionCounts) { final BNode propertyPartition = f.createBNode(); final URI p = (URI) tmp.getValue(); g.add(graph, VoidVocabularyDecl.propertyPartition, propertyPartition); g.add(propertyPartition, VoidVocabularyDecl.property, p); g.add(propertyPartition, VoidVocabularyDecl.triples, f.createLiteral(tmp.count)); } // class partition statistics. { // per class partition statistics. for (IVCount tmp : classPartitionCounts) { final BNode classPartition = f.createBNode(); final BigdataValue cls = tmp.getValue(); g.add(graph, VoidVocabularyDecl.classPartition, classPartition); g.add(classPartition, VoidVocabularyDecl.class_, cls); g.add(classPartition, VoidVocabularyDecl.triples, f.createLiteral(tmp.count)); } } // end class partition statistics. }
/** * Describe the default data set (the one identified by the namespace associated with the {@link * AbstractTripleStore}. * * @param describeStatistics When <code>true</code>, the VoID description will include the {@link * VoidVocabularyDecl#vocabulary} declarations, the property partition statistics, and the * class partition statistics. * @param describeNamedGraphs When <code>true</code>, each named graph will also be described in * in the same level of detail as the default graph. Otherwise only the default graph will be * described. */ public void describeDataSet(final boolean describeStatistics, final boolean describeNamedGraphs) { final String namespace = tripleStore.getNamespace(); // This is a VoID data set. g.add(aDataset, RDF.TYPE, VoidVocabularyDecl.Dataset); // The namespace is used as a title for the data set. g.add(aDataset, DCTermsVocabularyDecl.title, f.createLiteral(namespace)); // Also present the namespace in an unambiguous manner. g.add(aDataset, SD.KB_NAMESPACE, f.createLiteral(namespace)); /** * Service end point for this namespace. * * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > Missing URL encoding in * RemoteRepositoryManager </a> */ for (String uri : serviceURI) { g.add( aDataset, VoidVocabularyDecl.sparqlEndpoint, f.createURI(uri + "/" + ConnectOptions.urlEncode(namespace) + "/sparql")); } // any URI is considered to be an entity. g.add(aDataset, VoidVocabularyDecl.uriRegexPattern, f.createLiteral("^.*")); if (!describeStatistics) { // No statistics. return; } // Frequency count of the predicates in the default graph. final IVCount[] predicatePartitionCounts = predicateUsage(tripleStore); // Frequency count of the classes in the default graph. final IVCount[] classPartitionCounts = classUsage(tripleStore); // Describe vocabularies based on the predicate partitions. describeVocabularies(predicatePartitionCounts); // defaultGraph description. { // Default graph in the default data set. g.add(aDataset, SD.defaultGraph, aDefaultGraph); // Describe the default graph using statistics. describeGraph(aDefaultGraph, predicatePartitionCounts, classPartitionCounts); } // end defaultGraph // sb.append("termCount\t = " + tripleStore.getTermCount() + "\n"); // // sb.append("uriCount\t = " + tripleStore.getURICount() + "\n"); // // sb.append("literalCount\t = " + tripleStore.getLiteralCount() + // "\n"); // // /* // * Note: The blank node count is only available when using the told // * bnodes mode. // */ // sb // .append("bnodeCount\t = " // + (tripleStore.getLexiconRelation() // .isStoreBlankNodes() ? "" // + tripleStore.getBNodeCount() : "N/A") // + "\n"); /* * Report for each named graph. */ if (describeNamedGraphs && tripleStore.isQuads()) { final SPORelation r = tripleStore.getSPORelation(); // the index to use for distinct term scan. final SPOKeyOrder keyOrder = SPOKeyOrder.CSPO; // visit distinct IVs for context position on that index. @SuppressWarnings("rawtypes") final IChunkedIterator<IV> itr = r.distinctTermScan(keyOrder); // resolve IVs to terms efficiently during iteration. final BigdataValueIterator itr2 = new BigdataValueIteratorImpl(tripleStore /* resolveTerms */, itr); try { while (itr2.hasNext()) { /* * Describe this named graph. * * Note: This is using the predicate and class partition * statistics from the default graph (RDF merge) to identify * the set of all possible predicates and classes within * each named graph. It then tests each predicate and class * partition against the named graph and ignores those which * are not present in a given named graph. This is being * done because we do not have a CPxx index. */ final BigdataResource graph = (BigdataResource) itr2.next(); final IVCount[] predicatePartitionCounts2 = predicateUsage(tripleStore, graph.getIV(), predicatePartitionCounts); final IVCount[] classPartitionCounts2 = classUsage(tripleStore, graph.getIV(), classPartitionCounts); final BNode aNamedGraph = f.createBNode(); // Named graph in the default data set. g.add(aDataset, SD.namedGraph, aNamedGraph); // The name of that named graph. g.add(aNamedGraph, SD.name, graph); // Describe the named graph. describeGraph(aNamedGraph, predicatePartitionCounts2, classPartitionCounts2); } } finally { itr2.close(); } } }
/** * Implementation using the json.org API. * * @param json The RDF/JSON string to be parsed and converted into a Sesame Graph. * @return A Sesame Graph if successful, otherwise null. */ public static Graph rdfJsonToGraph(String json) { Graph result = new GraphImpl(); ValueFactory vf = result.getValueFactory(); try { JSONObject input = new JSONObject(json); Iterator<String> subjects = input.keys(); while (subjects.hasNext()) { String subjStr = subjects.next(); Resource subject = null; subject = subjStr.startsWith("_:") ? vf.createBNode(subjStr.substring(2)) : vf.createURI(subjStr); JSONObject pObj = input.getJSONObject(subjStr); Iterator<String> predicates = pObj.keys(); while (predicates.hasNext()) { String predStr = predicates.next(); URI predicate = vf.createURI(predStr); JSONArray predArr = pObj.getJSONArray(predStr); for (int i = 0; i < predArr.length(); i++) { Value object = null; JSONObject obj = predArr.getJSONObject(i); if (!obj.has("value")) { continue; } String value = obj.getString("value"); if (!obj.has("type")) { continue; } String type = obj.getString("type"); String lang = null; if (obj.has("lang")) { lang = obj.getString("lang"); } String datatype = null; if (obj.has("datatype")) { datatype = obj.getString("datatype"); } if ("literal".equals(type)) { if (lang != null) { object = vf.createLiteral(value, lang); } else if (datatype != null) { object = vf.createLiteral(value, vf.createURI(datatype)); } else { object = vf.createLiteral(value); } } else if ("bnode".equals(type)) { object = vf.createBNode(value.substring(2)); } else if ("uri".equals(type)) { object = vf.createURI(value); } if (obj.has("graphs")) { JSONArray a = obj.getJSONArray("graphs"); // System.out.println("a.length() = " + a.length()); for (int j = 0; j < a.length(); j++) { // Note: any nulls here will result in statements in the default context. String s = a.getString(j); Resource context = s.equals("null") ? null : vf.createURI(s); // System.out.println("context = " + context); result.add(subject, predicate, object, context); } } else { result.add(subject, predicate, object); } } } } } catch (JSONException e) { log.error(e.getMessage(), e); return null; } return result; }
/** * Implementation using the org.json API. * * @param graph A Sesame Graph. * @return An RDF/JSON string if successful, otherwise null. */ public static String graphToRdfJson(Graph graph) { JSONObject result = new JSONObject(); try { Set<Resource> subjects = new HashSet<Resource>(); for (Statement s1 : graph) { subjects.add(s1.getSubject()); } for (Resource subject : subjects) { JSONObject predicateObj = new JSONObject(); Set<URI> predicates = new HashSet<URI>(); Iterator<Statement> s2 = graph.match(subject, null, null); while (s2.hasNext()) { predicates.add(s2.next().getPredicate()); } for (URI predicate : predicates) { JSONArray valueArray = new JSONArray(); Iterator<Statement> stmnts = graph.match(subject, predicate, null); Set<Value> objects = new HashSet<Value>(); while (stmnts.hasNext()) { objects.add(stmnts.next().getObject()); } for (Value object : objects) { Iterator<Statement> stmnts2 = graph.match(subject, predicate, object); JSONArray contexts = new JSONArray(); int i = 0; boolean nonDefaultContext = false; while (stmnts2.hasNext()) { Resource context = stmnts2.next().getContext(); contexts.put(i, null == context ? null : context.toString()); if (null != context) { nonDefaultContext = true; } i++; } JSONObject valueObj = new JSONObject(); valueObj.put("value", object.stringValue()); if (object instanceof Literal) { valueObj.put("type", "literal"); Literal l = (Literal) object; if (l.getLanguage() != null) { valueObj.put("lang", l.getLanguage()); } else if (l.getDatatype() != null) { valueObj.put("datatype", l.getDatatype().stringValue()); } } else if (object instanceof BNode) { valueObj.put("type", "bnode"); } else if (object instanceof URI) { valueObj.put("type", "uri"); } if (nonDefaultContext) { valueObj.put("graphs", contexts); } valueArray.put(valueObj); } predicateObj.put(predicate.stringValue(), valueArray); } result.put(subject.stringValue(), predicateObj); } return result.toString(2); } catch (JSONException e) { log.error(e.getMessage(), e); } return null; }