public boolean canLoad(OWLOntologyDocumentSource documentSource) {
   if (documentSource.isReaderAvailable()) {
     return true;
   }
   if (documentSource.isInputStreamAvailable()) {
     return true;
   }
   if (parsableSchemes.contains(documentSource.getDocumentIRI().getScheme())) {
     return true;
   }
   // If we can open an input stream then we can attempt to parse the ontology
   // TODO: Take into consideration the request type!
   try {
     InputStream is = documentSource.getDocumentIRI().toURI().toURL().openStream();
     is.close();
     return true;
   } catch (UnknownHostException e) {
     logger.info("Unknown host: " + e.getMessage());
   } catch (MalformedURLException e) {
     logger.info("Malformed URL: " + e.getMessage());
   } catch (FileNotFoundException e) {
     logger.info("File not found: " + e.getMessage());
   } catch (IOException e) {
     logger.info("IO Exception: " + e.getMessage());
   }
   return false;
 }
  public OWLOntology loadOWLOntology(
      OWLOntologyDocumentSource documentSource, final OWLOntologyCreationHandler mediator)
      throws OWLOntologyCreationException {
    // Attempt to parse the ontology by looping through the parsers.  If the
    // ontology is parsed successfully then we break out and return the ontology.
    // I think that this is more reliable than selecting a parser based on a file extension
    // for example (perhaps the parser list could be ordered based on most likely parser, which
    // could be determined by file extension).
    Map<OWLParser, OWLParserException> exceptions =
        new LinkedHashMap<OWLParser, OWLParserException>();
    // Call the super method to create the ontology - this is needed, because
    // we throw an exception if someone tries to create an ontology directly

    OWLOntology existingOntology = null;
    IRI iri = documentSource.getDocumentIRI();
    if (getOWLOntologyManager().contains(iri)) {
      existingOntology = getOWLOntologyManager().getOntology(iri);
    }
    OWLOntologyID ontologyID = new OWLOntologyID();
    OWLOntology ont =
        super.createOWLOntology(ontologyID, documentSource.getDocumentIRI(), mediator);
    // Now parse the input into the empty ontology that we created
    for (final OWLParser parser : getParsers()) {
      try {
        if (existingOntology == null && !ont.isEmpty()) {
          // Junk from a previous parse.  We should clear the ont
          getOWLOntologyManager().removeOntology(ont);
          ont = super.createOWLOntology(ontologyID, documentSource.getDocumentIRI(), mediator);
        }
        OWLOntologyFormat format = parser.parse(documentSource, ont);
        mediator.setOntologyFormat(ont, format);
        return ont;
      } catch (IOException e) {
        // No hope of any parsers working?
        // First clean up
        getOWLOntologyManager().removeOntology(ont);
        throw new OWLOntologyCreationIOException(e);
      } catch (UnloadableImportException e) {
        // First clean up
        getOWLOntologyManager().removeOntology(ont);
        throw e;
      } catch (OWLParserException e) {
        // Record this attempts and continue trying to parse.
        exceptions.put(parser, e);
      } catch (RuntimeException e) {
        // Clean up and rethrow
        getOWLOntologyManager().removeOntology(ont);
        throw e;
      }
    }
    if (existingOntology == null) {
      getOWLOntologyManager().removeOntology(ont);
    }
    // We haven't found a parser that could parse the ontology properly.  Throw an
    // exception whose message contains the stack traces from all of the parsers
    // that we have tried.
    throw new UnparsableOntologyException(documentSource.getDocumentIRI(), exceptions);
  }