private void upload(URI graphURI, Resource context) throws Exception { RepositoryConnection con = dataRep.getConnection(); con.setAutoCommit(false); try { RDFFormat rdfFormat = Rio.getParserFormatForFileName(graphURI.toString(), RDFFormat.TURTLE); RDFParser rdfParser = Rio.createParser(rdfFormat, dataRep.getValueFactory()); rdfParser.setVerifyData(false); rdfParser.setDatatypeHandling(DatatypeHandling.IGNORE); // rdfParser.setPreserveBNodeIDs(true); RDFInserter rdfInserter = new RDFInserter(con); rdfInserter.enforceContext(context); rdfParser.setRDFHandler(rdfInserter); URL graphURL = new URL(graphURI.toString()); InputStream in = graphURL.openStream(); try { rdfParser.parse(in, graphURI.toString()); } finally { in.close(); } con.setAutoCommit(true); } finally { con.close(); } }
public void setupInMemory() throws Exception { // create a sesame in-memory repository String owlfile = "src/test/resources/onto2.owl"; repo = new SesameClassicInMemoryRepo("my_name", owlfile, false, "TreeWitness"); repo.initialize(); con = (RepositoryConnection) repo.getConnection(); }
protected void load(final String modelPathWithoutExtension) throws RepositoryException, RDFParseException, IOException { final File modelFile = new File(modelPathWithoutExtension + getPostfix()); repository.initialize(); connection = repository.getConnection(); connection.add(modelFile, RDFConstants.BASE_PREFIX, RDFFormat.TURTLE); }
/** * Creating local repository, the standard way to get a Sesame-RDF repository. * * @param tempDir Warning! if not <code>null</code> then it will reuse the data left in this * directory from the previous session. * @return * @throws ConfigurationException */ public static Repository createLocalRepository(File tempDir) throws RepositoryException { // initializing RDF repository Repository myRepository = new SailRepository(new ForwardChainingRDFSInferencer(new MemoryStore(tempDir))); // , // NB! MemoryStore(dir) will restore its contents! myRepository.initialize(); return myRepository; }
public static LoadStatementsResult loadStatements(String file, String tempDir, PrintStream... out) throws Exception { LoadStatementsResult result = new LoadStatementsResult(); // loading Repository rdf = Helper.createLocalRepository(); RepositoryConnection con = rdf.getConnection(); try { importRDFXMLFile(rdf, "http://localhost/namespace", new File(file)); RepositoryResult<Statement> statements = con.getStatements(null, null, null, false); try { while (statements.hasNext()) { Statement st = statements.next(); if (st instanceof MemStatement) { if (((MemStatement) (st)).isExplicit()) { String property = st.getPredicate().toString(); result.subjects.add(st.getSubject().toString()); result.properties.add(st.getPredicate().toString()); String[] subject = st.getSubject().toString().split("#"); boolean isResource = (st.getObject() instanceof Resource); if (isResource) { result.values.add("R:" + st.getObject().stringValue()); } else { result.values.add( "L" + ((Literal) st.getObject()).getLanguage() + ":" + st.getObject().stringValue()); } if (subject.length == 2) { result.statements.add( formatLine( subject[0] + "#" /* ":" */ + subject[1], property, isResource, st.getObject().toString())); } else { if (subject.length > 2) throw new Exception("Multiple # in resource " + st.getSubject()); result.statements.add( formatLine(subject[0], property, isResource, st.getObject().toString())); } } } else throw new Exception("ERROR"); } } finally { statements.close(); } } finally { con.close(); } return result; }
/** Creating local repository, the standard way to get a Sesame-RDF repository. */ public static Repository createLocalRepository() throws RepositoryException { // initializing RDF repository Repository repository = new SailRepository( new DirectTypeHierarchyInferencer( new ForwardChainingRDFSInferencer(new MemoryStore()))); // NB! MemoryStore(dir) will restore its contents! repository.initialize(); return repository; }
public TestSuite createTestSuite() throws Exception { // Create test suite TestSuite suite = new TestSuite(N3ParserTestCase.class.getName()); // Add the manifest to a repository and query it Repository repository = new SailRepository(new MemoryStore()); repository.initialize(); RepositoryConnection con = repository.getConnection(); URL url = url(MANIFEST_URL); con.add(url, base(MANIFEST_URL), RDFFormat.TURTLE); // Add all positive parser tests to the test suite String query = "SELECT testURI, inputURL, outputURL " + "FROM {testURI} rdf:type {n3test:PositiveParserTest}; " + " n3test:inputDocument {inputURL}; " + " n3test:outputDocument {outputURL} " + "USING NAMESPACE n3test = <http://www.w3.org/2004/11/n3test#>"; TupleQueryResult queryResult = con.prepareTupleQuery(QueryLanguage.SERQL, query).evaluate(); while (queryResult.hasNext()) { BindingSet bindingSet = queryResult.next(); String testURI = bindingSet.getValue("testURI").toString(); String inputURL = bindingSet.getValue("inputURL").toString(); String outputURL = bindingSet.getValue("outputURL").toString(); suite.addTest(new PositiveParserTest(testURI, inputURL, outputURL)); } queryResult.close(); // Add all negative parser tests to the test suite query = "SELECT testURI, inputURL " + "FROM {testURI} rdf:type {n3test:NegativeParserTest}; " + " n3test:inputDocument {inputURL} " + "USING NAMESPACE n3test = <http://www.w3.org/2004/11/n3test#>"; queryResult = con.prepareTupleQuery(QueryLanguage.SERQL, query).evaluate(); while (queryResult.hasNext()) { BindingSet bindingSet = queryResult.next(); String testURI = bindingSet.getValue("testURI").toString(); String inputURL = bindingSet.getValue("inputURL").toString(); suite.addTest(new NegativeParserTest(testURI, inputURL)); } queryResult.close(); con.close(); repository.shutDown(); return suite; }
protected Repository createRepository() throws Exception { Repository repo = newRepository(); repo.initialize(); RepositoryConnection con = repo.getConnection(); try { con.clear(); con.clearNamespaces(); } finally { con.close(); } return repo; }
@Override public boolean toRdf(final Repository myRepository, final int modelVersion, final URI... keyToUse) throws OpenRDFException { super.toRdf(myRepository, modelVersion, keyToUse); final RepositoryConnection con = myRepository.getConnection(); try { if (SpinInferencingRuleImpl.DEBUG) { SpinInferencingRuleImpl.log.debug( "SparqlNormalisationRuleImpl.toRdf: keyToUse=" + keyToUse); } final URI keyUri = this.getKey(); con.setAutoCommit(false); con.add( keyUri, RDF.TYPE, SpinInferencingRuleSchema.getSpinInferencingRuleTypeUri(), keyToUse); // If everything went as planned, we can commit the result con.commit(); return true; } catch (final RepositoryException re) { // Something went wrong during the transaction, so we roll it back con.rollback(); SpinInferencingRuleImpl.log.error("RepositoryException: " + re.getMessage()); } finally { con.close(); } return false; }
/** * Execute a SELECT SPARQL query against the graphs * * @param qs SELECT SPARQL query * @return list of solutions, each containing a hashmap of bindings */ public List<HashMap<String, Value>> runSPARQL(String qs) { try { RepositoryConnection con = currentRepository.getConnection(); try { TupleQuery query = con.prepareTupleQuery(org.openrdf.query.QueryLanguage.SPARQL, qs); TupleQueryResult qres = query.evaluate(); ArrayList<HashMap<String, Value>> reslist = new ArrayList<HashMap<String, Value>>(); while (qres.hasNext()) { BindingSet b = qres.next(); Set<String> names = b.getBindingNames(); HashMap<String, Value> hm = new HashMap<String, Value>(); for (String n : names) { hm.put(n, b.getValue(n)); } reslist.add(hm); } return reslist; } finally { con.close(); } } catch (Exception e) { e.printStackTrace(); } return null; }
public ResponseHandler(String resource, Endpoint endpoint) throws RepositoryException { this.resource = resource; this.endpoint = endpoint; triples = new SailRepository(new MemoryStore()); triples.initialize(); }
@Override public void reInitialize() throws Exception { log.info("Reinitializing repository and connection due to error in past results."); if (conn.isOpen()) { try { log.debug("Trying to close connection, interrupt time is 10000"); Utils.closeConnectionTimeout(conn, 10000); } catch (Exception e) {; /*ignore*/ } } if (sailRepo != null) { boolean shutDown = Utils.shutdownRepositoryTimeout( sailRepo, 20 * 60 * 1000); // timeout of 20 min for shutdown sailRepo = null; if (shutDown) log.debug("Repository shut down."); else log.debug("Failed to shut down repository within 20 minutes!"); } // log.info("Waiting for 2 minutes to give server time for reinitialization"); // Thread.sleep(120000); log.debug("loading repositories from scratch."); System.gc(); sailRepo = SesameRepositoryLoader.loadRepositories(new VoidReportStream()); conn = sailRepo.getConnection(); log.debug("reinitialize done."); }
@Override protected void tearDown() throws Exception { if (dataRep != null) { dataRep.shutDown(); dataRep = null; } }
private void saveLog(DefaultQueryLog log) { try { RepositoryConnection conn = null; try { conn = repository.getConnection(); ValueFactory vf = conn.getValueFactory(); URI queryURI = vf.createURI(NS_QUERY + queryId); URI logURI = vf.createURI(NS_QUERY + log.timestamp()); conn.add(queryURI, RDF.TYPE, vf.createURI(NS_QUERY + "Query")); conn.add(queryURI, vf.createURI(NS_QUERY + "log"), logURI); conn.add(logURI, RDF.TYPE, vf.createURI(NS_QUERY + "QueryLog")); conn.add(logURI, vf.createURI(NS_QUERY + "timestamp"), vf.createLiteral(log.timestamp())); for (QueryResultBindingSet set : log.result()) { BNode setURI = vf.createBNode(); conn.add(logURI, vf.createURI(NS_QUERY + "bindingSet"), setURI); for (QueryResultBinding binding : set) { BNode bindingURI = vf.createBNode(); conn.add(setURI, vf.createURI(NS_QUERY + "binding"), bindingURI); conn.add(bindingURI, vf.createURI(NS_QUERY + "name"), vf.createLiteral(binding.name())); conn.add( bindingURI, vf.createURI(NS_QUERY + "value"), vf.createLiteral(binding.value())); } } } finally { if (conn != null) { conn.close(); } } } catch (Exception e) { throw new RuntimeException(e); } }
private String serialize(final Repository repo) throws RDFHandlerException, RepositoryException { ByteArrayOutputStream bos = new ByteArrayOutputStream(); RDFWriter w = Rio.createWriter(format, bos); RepositoryConnection rc = repo.getConnection(); try { rc.export(w); } finally { rc.close(); } String s = new String(bos.toByteArray()); if (RDFFormat.RDFXML == format || RDFFormat.TRIX == format) { // Chop off the XML declaration. int i = s.indexOf('\n'); s = s.substring(i + 1); } s = s.replaceAll("\n", ""); s = "<" + TAGNAME + " xmlns='" + NAMESPACE + "'>" + s + "</" + TAGNAME + ">"; // s = s.replaceAll("<rdf:", "<"); // s = s.replaceAll("</rdf:", "</"); // System.out.println("s = " + s); return s; // return "<body><one>foo</two></body>"; // return "<content><one/></content>"; // return "<body></body>"; }
public void loadDataFromURL(String stringURL) { RepositoryConnection con; try { con = currentRepository.getConnection(); try { // upload a URL URL url = new URL(stringURL); con.add(url, null, RDFFormat.RDFXML); } catch (RDFParseException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (RepositoryException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { try { con.close(); } catch (RepositoryException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } catch (RepositoryException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } }
/** * Insert Triple/Statement into graph * * @param s subject uriref * @param p predicate uriref * @param o value object (URIref or Literal) * @param contexts varArgs context objects (use default graph if null) */ public void add(Resource s, URI p, Value o, Resource... contexts) { if (log.isDebugEnabled()) log.debug( "[SesameDataSet:add] Add triple (" + s.stringValue() + ", " + p.stringValue() + ", " + o.stringValue() + ")."); try { RepositoryConnection con = currentRepository.getConnection(); try { ValueFactory myFactory = con.getValueFactory(); Statement st = myFactory.createStatement((Resource) s, p, (Value) o); con.add(st, contexts); con.commit(); } catch (Exception e) { e.printStackTrace(); } finally { con.close(); } } catch (Exception e) { // handle exception } }
/** * Load data in specified graph (use default graph if contexts is null) * * @param filePath * @param format * @param contexts */ public void loadDataFromFile(String filePath, RDFFormat format, Resource... contexts) { RepositoryConnection con; try { con = currentRepository.getConnection(); try { // upload a file File f = new File(filePath); con.add(f, null, format, contexts); } catch (RDFParseException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (RepositoryException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { try { con.close(); } catch (RepositoryException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } catch (RepositoryException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } }
private void calculateGraphsAndDataSets() { try { RepositoryConnection conn = repository.getConnection(); TupleQuery q = conn.prepareTupleQuery(QueryLanguage.SPARQL, SparqlUtils.PREFIXES + QUERY); TupleQueryResult results = q.evaluate(); graphs = new LinkedList<DataCubeGraph>(); datasets = new LinkedList<DataSet>(); String lastG = null; String lastDSD = null; SparqlDCGraph dcGraph; SparqlStructure dcStructure = null; Collection<DataSet> graphDataSets = null; Collection<Structure> graphStructures = null; Collection<DataSet> structDataSets = null; while (results.hasNext()) { BindingSet set = results.next(); String g = set.getValue("g").stringValue(); String ds = set.getValue("ds").stringValue(); String dsd = set.getValue("dsd").stringValue(); if (!g.equals(lastG)) { // new Graph dcGraph = new SparqlDCGraph(repository, g); graphDataSets = new LinkedList<DataSet>(); graphStructures = new LinkedList<Structure>(); dcGraph.setDatasets(graphDataSets); dcGraph.setStructures(graphStructures); graphs.add(dcGraph); // new structure dcStructure = new SparqlStructure(repository, dsd, g); structDataSets = new LinkedList<DataSet>(); dcStructure.setDatasets(structDataSets); graphStructures.add(dcStructure); } else if (!dsd.equals(lastDSD)) { // new structure dcStructure = new SparqlStructure(repository, dsd, g); structDataSets = new LinkedList<DataSet>(); dcStructure.setDatasets(structDataSets); graphStructures.add(dcStructure); } SparqlDataSet dcDataSet = new SparqlDataSet(repository, ds, g); dcDataSet.setStructure(dcStructure); graphDataSets.add(dcDataSet); structDataSets.add(dcDataSet); datasets.add(dcDataSet); lastG = g; lastDSD = dsd; } } catch (RepositoryException ex) { Logger.getLogger(SparqlDCRepository.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedQueryException ex) { Logger.getLogger(SparqlDCRepository.class.getName()).log(Level.SEVERE, null, ex); } catch (QueryEvaluationException ex) { Logger.getLogger(SparqlDCRepository.class.getName()).log(Level.SEVERE, null, ex); } }
/** * This test verifies whether the transparent caching works for the three resources provided by * our dummy provider. * * @throws Exception */ @Test public void testCachedResources() throws Exception { String uri1 = "http://localhost/resource1"; String uri2 = "http://localhost/resource2"; String uri3 = "http://localhost/resource3"; RepositoryConnection con = repository.getConnection(); try { con.begin(); List<Statement> list1 = Iterations.asList( con.getStatements(con.getValueFactory().createURI(uri1), null, null, true)); Assert.assertEquals(3, list1.size()); Assert.assertThat( list1, allOf( CoreMatchers.<Statement>hasItem(hasProperty("object", hasToString("\"Value 1\""))), CoreMatchers.<Statement>hasItem(hasProperty("object", hasToString("\"Value X\""))))); con.commit(); con.begin(); List<Statement> list2 = Iterations.asList( con.getStatements(con.getValueFactory().createURI(uri2), null, null, true)); Assert.assertEquals(2, list2.size()); Assert.assertThat( list2, allOf( CoreMatchers.<Statement>hasItem(hasProperty("object", hasToString("\"Value 2\""))))); con.commit(); con.begin(); List<Statement> list3 = Iterations.asList( con.getStatements(con.getValueFactory().createURI(uri3), null, null, true)); Assert.assertEquals(2, list3.size()); Assert.assertThat( list3, allOf( CoreMatchers.<Statement>hasItem(hasProperty("object", hasToString("\"Value 3\""))), CoreMatchers.<Statement>hasItem(hasProperty("object", hasToString("\"Value 4\""))))); con.commit(); } catch (RepositoryException ex) { con.rollback(); } finally { con.close(); } }
public SesameDataSet(String sesameServer, String repositoryID) { currentRepository = new HTTPRepository(sesameServer, repositoryID); try { currentRepository.initialize(); } catch (RepositoryException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
public ValueLoader(String host, String port, String user, String pwd) { String connectionString = "jdbc:virtuoso://" + host + ':' + port; repository = new VirtuosoRepository(connectionString, user, pwd, true); try { RepositoryConnection con = repository.getConnection(); } catch (RepositoryException e) { throw new IllegalArgumentException(e.getMessage(), e); } }
@Override public void initialize() throws Exception { log.info("Performing Sesame Initialization..."); sailRepo = SesameRepositoryLoader.loadRepositories(report); if (!Config.getConfig().isFill()) conn = sailRepo.getConnection(); log.info("Sesame Repository successfully initialized."); }
@Before public void setUp() throws Exception { repository = new SailRepository(new MemoryStore()); repository.initialize(); ObjectRepositoryFactory factory = new ObjectRepositoryFactory(); ObjectRepository objectRepository = factory.createRepository(repository); connection = objectRepository.getConnection(); }
/** * Compares the models of the default context of two repositories and returns true if rep1 is a * subset of rep2. Note that the method pulls the entire default context of both repositories into * main memory. Use with caution. */ public static boolean isSubset(Repository rep1, Repository rep2) throws RepositoryException { Set<Statement> model1, model2; RepositoryConnection con1 = rep1.getConnection(); try { model1 = Iterations.asSet(con1.getStatements(null, null, null, true)); } finally { con1.close(); } RepositoryConnection con2 = rep2.getConnection(); try { model2 = Iterations.asSet(con2.getStatements(null, null, null, true)); } finally { con2.close(); } return ModelUtil.isSubset(model1, model2); }
/** * Evaluates the SPARQL SELECT query from the specified query pair on the repository. * * @param query Query pair. * @return Query result set. * @throws RepositoryException If no connection could be established, or connection fails. * @throws MalformedQueryException On query-related errors. * @throws QueryEvaluationException On query-related errors. */ public SparqlResultSet query(QueryPair query) throws RepositoryException, MalformedQueryException, QueryEvaluationException { RepositoryConnection conn = repo.getConnection(); TupleQuery tq = conn.prepareTupleQuery(QueryLanguage.SPARQL, query.getSparqlQuery()); SparqlResultSet ret = new SparqlResultSet(tq.evaluate(), query.getName(), query.getSparqlEntityIdVariables()); conn.close(); return ret; }
public RepositoryConnection getConnection() { if (this.connection.get() == null) { try { this.connection.set(repository.getConnection()); this.connection.get().setAutoCommit(false); } catch (RepositoryException e) { throw new IllegalStateException(e); } } return connection.get(); }
/** * Compares two models defined by the default context of two repositories and returns the * difference between the first and the second model (that is, all statements that are present in * rep1 but not in rep2). Blank node IDs are not relevant for model equality, they are mapped from * one model to the other by using the attached properties. Note that the method pulls the entire * default context of both repositories into main memory. Use with caution. * * <p><b>NOTE: this algorithm is currently broken; it doesn't actually map blank nodes between the * two models.</b> * * @return The collection of statements that is the difference between rep1 and rep2. */ public static Collection<? extends Statement> difference(Repository rep1, Repository rep2) throws RepositoryException { Collection<Statement> model1 = new HashSet<Statement>(); Collection<Statement> model2 = new HashSet<Statement>(); RepositoryConnection con1 = rep1.getConnection(); try { Iterations.addAll(con1.getStatements(null, null, null, false), model1); } finally { con1.close(); } RepositoryConnection con2 = rep2.getConnection(); try { Iterations.addAll(con2.getStatements(null, null, null, false), model2); } finally { con2.close(); } return difference(model1, model2); }
/** * Import RDF data from a file * * @param location of file (/path/file) with RDF data * @param format RDF format of the string (used to select parser) */ public void addFile(String filepath, RDFFormat format) { try { RepositoryConnection con = currentRepository.getConnection(); try { con.add(new File(filepath), "", format); } finally { con.close(); } } catch (Exception e) { e.printStackTrace(); } }
/** * Runs a SPARQL UPDATE query as specified. * * @param sparqlString Query string. * @throws RepositoryException * @throws MalformedQueryException * @throws QueryEvaluationException * @throws UpdateExecutionException */ public void plainSparqlUpdate(String sparqlString) throws RepositoryException, MalformedQueryException, QueryEvaluationException, UpdateExecutionException { RepositoryConnection conn = repo.getConnection(); Update query = conn.prepareUpdate(QueryLanguage.SPARQL, sparqlString); query.execute(); conn.close(); }